diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..29288c6efe --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,60 @@ +# SALTSTACK CODE OWNERS + +# See https://help.github.com/articles/about-codeowners/ +# for more info about CODEOWNERS file + +# Lines starting with '#' are comments. +# Each line is a file pattern followed by one or more owners. + +# See https://help.github.com/articles/about-codeowners/ +# for more info about the CODEOWNERS file + +# Team Boto +salt/**/*boto* @saltstack/team-boto + +# Team Core +salt/auth/ @saltstack/team-core +salt/cache/ @saltstack/team-core +salt/cli/ @saltstack/team-core +salt/client/* @saltstack/team-core +salt/config/* @saltstack/team-core +salt/daemons/ @saltstack/team-core +salt/pillar/ @saltstack/team-core +salt/loader.py @saltstack/team-core +salt/payload.py @saltstack/team-core +salt/**/master* @saltstack/team-core +salt/**/minion* @saltstack/team-core + +# Team Cloud +salt/cloud/ @saltstack/team-cloud +salt/utils/openstack/ @saltstack/team-cloud +salt/utils/aws.py @saltstack/team-cloud +salt/**/*cloud* @saltstack/team-cloud + +# Team NetAPI +salt/cli/api.py @saltstack/team-netapi +salt/client/netapi.py @saltstack/team-netapi +salt/netapi/ @saltstack/team-netapi + +# Team Network +salt/proxy/ @saltstack/team-proxy + +# Team SPM +salt/cli/spm.py @saltstack/team-spm +salt/spm/ @saltstack/team-spm + +# Team SSH +salt/cli/ssh.py @saltstack/team-ssh +salt/client/ssh/ @saltstack/team-ssh +salt/runners/ssh.py @saltstack/team-ssh +salt/**/thin.py @saltstack/team-ssh + +# Team State +salt/state.py @saltstack/team-state + +# Team Transport +salt/transport/ @saltstack/team-transport +salt/utils/zeromq.py @saltstack/team-transport + +# Team Windows +salt/**/*win* @saltstack/team-windows diff --git a/conf/master b/conf/master index e0711d0db6..abfc1fa808 100644 --- a/conf/master +++ b/conf/master @@ -59,15 +59,14 @@ # Directory for custom modules. This directory can contain subdirectories for # each of Salt's module types such as "runners", "output", "wheel", "modules", -# "states", "returners", etc. -#extension_modules: +# "states", "returners", "engines", "utils", etc. +#extension_modules: /var/cache/salt/master/extmods # Directory for custom modules. This directory can contain subdirectories for # each of Salt's module types such as "runners", "output", "wheel", "modules", -# "states", "returners", "engines", etc. +# "states", "returners", "engines", "utils", etc. # Like 'extension_modules' but can take an array of paths -#module_dirs: -# - /var/cache/salt/minion/extmods +#module_dirs: [] # Verify and set permissions on configuration directories at startup: #verify_env: True @@ -302,6 +301,9 @@ # public keys from the minions. Note that this is insecure. #auto_accept: False +# The size of key that should be generated when creating new keys. +#keysize: 2048 + # Time in minutes that an incoming public key with a matching name found in # pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys # are removed when the master checks the minion_autosign directory. @@ -959,6 +961,21 @@ #pillar_cache_backend: disk +###### Reactor Settings ##### +########################################### +# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/ +#reactor: [] + +#Set the TTL for the cache of the reactor configuration. +#reactor_refresh_interval: 60 + +#Configure the number of workers for the runner/wheel in the reactor. +#reactor_worker_threads: 10 + +#Define the queue size for workers in the reactor. +#reactor_worker_hwm: 10000 + + ##### Syndic settings ##### ########################################## # The Salt syndic is used to pass commands through a master from a higher diff --git a/conf/minion b/conf/minion index b1122c9e52..6cae043295 100644 --- a/conf/minion +++ b/conf/minion @@ -620,6 +620,9 @@ # you do so at your own risk! #open_mode: False +# The size of key that should be generated when creating new keys. +#keysize: 2048 + # Enable permissive access to the salt keys. This allows you to run the # master or minion as root, but have a non-root group be given access to # your pki_dir. To make the access explicit, root must belong to the group @@ -661,6 +664,21 @@ # ssl_version: PROTOCOL_TLSv1_2 +###### Reactor Settings ##### +########################################### +# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/ +#reactor: [] + +#Set the TTL for the cache of the reactor configuration. +#reactor_refresh_interval: 60 + +#Configure the number of workers for the runner/wheel in the reactor. +#reactor_worker_threads: 10 + +#Define the queue size for workers in the reactor. +#reactor_worker_hwm: 10000 + + ###### Thread settings ##### ########################################### # Disable multiprocessing support, by default when a minion receives a diff --git a/doc/conf.py b/doc/conf.py index 3e147f4e0b..de8db8ea90 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -245,9 +245,9 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2016.11.6' # latest release -previous_release = '2016.3.6' # latest release from previous branch -previous_release_dir = '2016.3' # path on web server for previous branch +latest_release = '2017.7.1' # latest release +previous_release = '2016.11.7' # latest release from previous branch +previous_release_dir = '2016.11' # path on web server for previous branch next_release = '' # next release next_release_dir = '' # path on web server for next release branch @@ -258,8 +258,8 @@ if on_saltstack: copyright = time.strftime("%Y") # < --- START do not merge these settings to other branches START ---> # -build_type = 'develop' # latest, previous, develop, next -release = version # version, latest_release, previous_release +build_type = 'latest' # latest, previous, develop, next +release = latest_release # version, latest_release, previous_release # < --- END do not merge these settings to other branches END ---> # # Set google custom search engine diff --git a/doc/faq.rst b/doc/faq.rst index 46c332966d..18674b370b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -321,7 +321,27 @@ Restart using states ******************** Now we can apply the workaround to restart the Minion in reliable way. -The following example works on both UNIX-like and Windows operating systems: +The following example works on UNIX-like operating systems: + +.. code-block:: jinja + + {%- if grains['os'] != 'Windows' % + Restart Salt Minion: + cmd.run: + - name: 'salt-call --local service.restart salt-minion' + - bg: True + - onchanges: + - pkg: Upgrade Salt Minion + {%- endif %} + +Note that restarting the ``salt-minion`` service on Windows operating systems is +not always necessary when performing an upgrade. The installer stops the +``salt-minion`` service, removes it, deletes the contents of the ``\salt\bin`` +directory, installs the new code, re-creates the ``salt-minion`` service, and +starts it (by default). The restart step **would** be necessary during the +upgrade process, however, if the minion config was edited after the upgrade or +installation. If a minion restart is necessary, the state above can be edited +as follows: .. code-block:: jinja @@ -337,8 +357,8 @@ The following example works on both UNIX-like and Windows operating systems: - pkg: Upgrade Salt Minion However, it requires more advanced tricks to upgrade from legacy version of -Salt (before ``2016.3.0``), where executing commands in the background is not -supported: +Salt (before ``2016.3.0``) on UNIX-like operating systems, where executing +commands in the background is not supported: .. code-block:: jinja diff --git a/doc/ref/cli/_includes/output-options.rst b/doc/ref/cli/_includes/output-options.rst index 0128a1cfb0..c679269e76 100644 --- a/doc/ref/cli/_includes/output-options.rst +++ b/doc/ref/cli/_includes/output-options.rst @@ -33,6 +33,10 @@ Output Options Write the output to the specified file. +.. option:: --out-file-append, --output-file-append + + Append the output to the specified file. + .. option:: --no-color Disable all colored output @@ -46,3 +50,14 @@ Output Options ``green`` denotes success, ``red`` denotes failure, ``blue`` denotes changes and success and ``yellow`` denotes a expected future change in configuration. + +.. option:: --state-output=STATE_OUTPUT, --state_output=STATE_OUTPUT + + Override the configured state_output value for minion + output. One of 'full', 'terse', 'mixed', 'changes' or + 'filter'. Default: 'none'. + +.. option:: --state-verbose=STATE_VERBOSE, --state_verbose=STATE_VERBOSE + + Override the configured state_verbose value for minion + output. Set to True or False. Default: none. diff --git a/doc/ref/cli/salt-cp.rst b/doc/ref/cli/salt-cp.rst index 3922163b32..c2087c022a 100644 --- a/doc/ref/cli/salt-cp.rst +++ b/doc/ref/cli/salt-cp.rst @@ -39,6 +39,13 @@ specified target expression. desitination will be assumed to be a directory. Finally, recursion is now supported, allowing for entire directories to be copied. +.. versionchanged:: 2016.11.7,2017.7.2 + Reverted back to the old copy mode to preserve backward compatibility. The + new functionality added in 2016.6.6 and 2017.7.0 is now available using the + ``-C`` or ``--chunked`` CLI arguments. Note that compression, recursive + copying, and support for copying large files is only available in chunked + mode. + Options ======= @@ -56,9 +63,16 @@ Options .. include:: _includes/target-selection.rst +.. option:: -C, --chunked + + Use new chunked mode to copy files. This mode supports large files, recursive + directories copying and compression. + + .. versionadded:: 2016.11.7,2017.7.2 + .. option:: -n, --no-compression - Disable gzip compression. + Disable gzip compression in chunked mode. .. versionadded:: 2016.3.7,2016.11.6,2017.7.0 diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index 61511f39b7..976919a343 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -94,64 +94,6 @@ The user to run the Salt processes user: root -.. conf_master:: max_open_files - -``max_open_files`` ------------------- - -Default: ``100000`` - -Each minion connecting to the master uses AT LEAST one file descriptor, the -master subscription connection. If enough minions connect you might start -seeing on the console(and then salt-master crashes): - -.. code-block:: bash - - Too many open files (tcp_listener.cpp:335) - Aborted (core dumped) - -.. code-block:: yaml - - max_open_files: 100000 - -By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for -max open files. - -To set a different value than the default one, uncomment, and configure this -setting. Remember that this value CANNOT be higher than the hard limit. Raising -the hard limit depends on the OS and/or distribution, a good way to find the -limit is to search the internet for something like this: - -.. code-block:: text - - raise max open files hard limit debian - -.. conf_master:: worker_threads - -``worker_threads`` ------------------- - -Default: ``5`` - -The number of threads to start for receiving commands and replies from minions. -If minions are stalling on replies because you have many minions, raise the -worker_threads value. - -Worker threads should not be put below 3 when using the peer system, but can -drop down to 1 worker otherwise. - -.. note:: - When the master daemon starts, it is expected behaviour to see - multiple salt-master processes, even if 'worker_threads' is set to '1'. At - a minimum, a controlling process will start along with a Publisher, an - EventPublisher, and a number of MWorker processes will be started. The - number of MWorker processes is tuneable by the 'worker_threads' - configuration value while the others are not. - -.. code-block:: yaml - - worker_threads: 5 - .. conf_master:: ret_port ``ret_port`` @@ -241,8 +183,8 @@ The directory to store the pki authentication keys. Directory for custom modules. This directory can contain subdirectories for each of Salt's module types such as ``runners``, ``output``, ``wheel``, -``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to -:conf_master:`root_dir`. +``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc. +This path is appended to :conf_master:`root_dir`. .. code-block:: yaml @@ -946,6 +888,74 @@ to socket concurrently. sock_pool_size: 15 +.. conf_master:: ipc_mode + +``ipc_mode`` +------------ + +Default: ``ipc`` + +The ipc strategy. (i.e., sockets versus tcp, etc.) Windows platforms lack +POSIX IPC and must rely on TCP based inter-process communications. ``ipc_mode`` +is set to ``tcp`` by default on Windows. + +.. code-block:: yaml + + ipc_mode: ipc + +.. conf_master:: + +``tcp_master_pub_port`` +----------------------- + +Default: ``4512`` + +The TCP port on which events for the master should be published if ``ipc_mode`` is TCP. + +.. code-block:: yaml + + tcp_master_pub_port: 4512 + +.. conf_master:: tcp_master_pull_port + +``tcp_master_pull_port`` +------------------------ + +Default: ``4513`` + +The TCP port on which events for the master should be pulled if ``ipc_mode`` is TCP. + +.. code-block:: yaml + + tcp_master_pull_port: 4513 + +.. conf_master:: tcp_master_publish_pull + +``tcp_master_publish_pull`` +--------------------------- + +Default: ``4514`` + +The TCP port on which events for the master should be pulled fom and then republished onto +the event bus on the master. + +.. code-block:: yaml + + tcp_master_publish_pull: 4514 + +.. conf_master:: tcp_master_workers + +``tcp_master_workers`` +---------------------- + +Default: ``4515`` + +The TCP port for ``mworkers`` to connect to on the master. + +.. code-block:: yaml + + tcp_master_workers: 4515 + .. _salt-ssh-configuration: @@ -1192,6 +1202,19 @@ public keys from minions. auto_accept: False +.. conf_master:: keysize + +``keysize`` +----------- + +Default: ``2048`` + +The size of key that should be generated when creating new keys. + +.. code-block:: yaml + + keysize: 2048 + .. conf_master:: autosign_timeout ``autosign_timeout`` @@ -1236,6 +1259,24 @@ minion IDs for which keys will automatically be rejected. Will override both membership in the :conf_master:`autosign_file` and the :conf_master:`auto_accept` setting. +.. conf_master:: permissive_pki_access + +``permissive_pki_access`` +------------------------- + +Default: ``False`` + +Enable permissive access to the salt keys. This allows you to run the +master or minion as root, but have a non-root group be given access to +your pki_dir. To make the access explicit, root must belong to the group +you've given access to. This is potentially quite insecure. If an autosign_file +is specified, enabling permissive_pki_access will allow group access to that +specific file. + +.. code-block:: yaml + + permissive_pki_access: False + .. conf_master:: publisher_acl ``publisher_acl`` @@ -1278,6 +1319,20 @@ This is completely disabled by default. - cmd.* - test.echo +.. conf_master:: sudo_acl + +``sudo_acl`` +------------ + +Default: ``False`` + +Enforce ``publisher_acl`` and ``publisher_acl_blacklist`` when users have sudo +access to the salt command. + +.. code-block:: yaml + + sudo_acl: False + .. conf_master:: external_auth ``external_auth`` @@ -1462,6 +1517,19 @@ Do not disable this unless it is absolutely clear what this does. rotate_aes_key: True +.. conf_master:: publish_session + +``publish_session`` +------------------- + +Default: ``86400`` + +The number of seconds between AES key rotations on the master. + +.. code-block:: yaml + + publish_session: Default: 86400 + .. conf_master:: ssl ``ssl`` @@ -1492,6 +1560,24 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23 ``allow_minion_key_revoke`` --------------------------- +Default: ``False`` + +By default, the master deletes its cache of minion data when the key for that +minion is removed. To preserve the cache after key deletion, set +``preserve_minion_cache`` to True. + +WARNING: This may have security implications if compromised minions auth with +a previous deleted minion ID. + +.. code-block:: yaml + + preserve_minion_cache: False + +.. conf_master:: allow_minion_key_revoke + +``allow_minion_key_revoke`` +--------------------------- + Default: ``True`` Controls whether a minion can request its own key revocation. When True @@ -1504,6 +1590,127 @@ the master will drop the request and the minion's key will remain accepted. rotate_aes_key: True +Master Large Scale Tuning Settings +================================== + +.. conf_master:: max_open_files + +``max_open_files`` +------------------ + +Default: ``100000`` + +Each minion connecting to the master uses AT LEAST one file descriptor, the +master subscription connection. If enough minions connect you might start +seeing on the console(and then salt-master crashes): + +.. code-block:: bash + + Too many open files (tcp_listener.cpp:335) + Aborted (core dumped) + +.. code-block:: yaml + + max_open_files: 100000 + +By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for +max open files. + +To set a different value than the default one, uncomment, and configure this +setting. Remember that this value CANNOT be higher than the hard limit. Raising +the hard limit depends on the OS and/or distribution, a good way to find the +limit is to search the internet for something like this: + +.. code-block:: text + + raise max open files hard limit debian + +.. conf_master:: worker_threads + +``worker_threads`` +------------------ + +Default: ``5`` + +The number of threads to start for receiving commands and replies from minions. +If minions are stalling on replies because you have many minions, raise the +worker_threads value. + +Worker threads should not be put below 3 when using the peer system, but can +drop down to 1 worker otherwise. + +.. note:: + When the master daemon starts, it is expected behaviour to see + multiple salt-master processes, even if 'worker_threads' is set to '1'. At + a minimum, a controlling process will start along with a Publisher, an + EventPublisher, and a number of MWorker processes will be started. The + number of MWorker processes is tuneable by the 'worker_threads' + configuration value while the others are not. + +.. code-block:: yaml + + worker_threads: 5 + +.. conf_master:: pub_hwm + +``pub_hwm`` +----------- + +Default: ``1000`` + +The zeromq high water mark on the publisher interface. + +.. code-block:: yaml + + pub_hwm: 1000 + +.. conf_master:: zmq_backlog + +``zmq_backlog`` +--------------- + +Default: ``1000`` + +The listen queue size of the ZeroMQ backlog. + +.. code-block:: yaml + + zmq_backlog: 1000 + +.. conf_master:: salt_event_pub_hwm +.. conf_master:: event_publisher_pub_hwm + +``salt_event_pub_hwm`` and ``event_publisher_pub_hwm`` +------------------------------------------------------ + +These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and +``event_publisher_pub_hwm`` are significant for masters with thousands of +minions. When these are insufficiently high it will manifest in random +responses missing in the CLI and even missing from the job cache. Masters +that have fast CPUs and many cores with appropriate ``worker_threads`` +will not need these set as high. + +The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is: + +.. code-block:: yaml + + salt_event_pub_hwm: 20000 + +The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is: + +.. code-block:: yaml + + event_publisher_pub_hwm: 10000 + +As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs, +24 cores, and 32GiB memory has these settings: + +.. code-block:: yaml + + salt_event_pub_hwm: 128000 + event_publisher_pub_hwm: 64000 + + .. _master-module-management: Master Module Management @@ -3179,6 +3386,26 @@ configuration. pillar_opts: False +.. conf_master:: pillar_safe_render_error + +``pillar_safe_render_error`` +---------------------------- + +Default: ``True`` + +The pillar_safe_render_error option prevents the master from passing pillar +render errors to the minion. This is set on by default because the error could +contain templating data which would give that minion information it shouldn't +have, like a password! When set ``True`` the error message will only show: + +.. code-block:: shell + + Rendering SLS 'my.sls' failed. Please see master log for details. + +.. code-block:: yaml + + pillar_safe_render_error: True + .. _master-configuration-ext-pillar: .. conf_master:: ext_pillar @@ -3849,6 +4076,62 @@ can be utilized: pillar_cache_backend: disk +Master Reactor Settings +======================= + +.. conf_master:: reactor + +``reactor`` +----------- + +Default: ``[]`` + +Defines a salt reactor. See the :ref:`Reactor ` documentation for more +information. + +.. code-block:: yaml + + reactor: [] + +.. conf_master:: reactor_refresh_interval + +``reactor_refresh_interval`` +---------------------------- + +Default: ``60`` + +The TTL for the cache of the reactor configuration. + +.. code-block:: yaml + + reactor_refresh_interval: 60 + +.. conf_master:: reactor_worker_threads + +``reactor_worker_threads`` +-------------------------- + +Default: ``10`` + +The number of workers for the runner/wheel in the reactor. + +.. code-block:: yaml + reactor_worker_threads: 10 + +.. conf_master:: reactor_worker_hwm + +``reactor_worker_hwm`` +---------------------- + +Default: ``10000`` + +The queue size for workers in the reactor. + +.. code-block:: yaml + + reactor_worker_hwm: 10000 + + .. _syndic-server-settings: Syndic Server Settings @@ -4315,6 +4598,63 @@ option then the master will log a warning message. - /etc/roles/webserver +Keepalive Settings +================== + +.. conf_master:: tcp_keepalive + +``tcp_keepalive`` +----------------- + +Default: ``True`` + +The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt +connectivity issues in messy network environments with misbehaving firewalls. + +.. code-block:: yaml + + tcp_keepalive: True + +.. conf_master:: tcp_keepalive_cnt + +``tcp_keepalive_cnt`` +--------------------- + +Default: ``-1`` + +Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_cnt: -1 + +.. conf_master:: tcp_keepalive_idle + +``tcp_keepalive_idle`` +---------------------- + +Default: ``300`` + +Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_idle: 300 + +.. conf_master:: tcp_keepalive_intvl + +``tcp_keepalive_intvl`` +----------------------- + +Default: ``-1`` + +Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_intvl': -1 + + .. _winrepo-master-config-opts: Windows Software Repo Settings @@ -4453,7 +4793,7 @@ URL of the repository: .. code-block:: yaml - winrepo_remotes: + winrepo_remotes_ng: - ' https://github.com/saltstack/salt-winrepo-ng.git' Replace ```` with the SHA1 hash of a commit ID. Specifying a commit diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index cd554268c1..e0f349931c 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -750,6 +750,20 @@ seconds each iteration. acceptance_wait_time_max: 0 +.. conf_minion:: rejected_retry + +``rejected_retry`` +------------------ + +Default: ``False`` + +If the master rejects the minion's public key, retry instead of exiting. +Rejected keys will be handled the same as waiting on acceptance. + +.. code-block:: yaml + + rejected_retry: False + .. conf_minion:: random_reauth_delay ``random_reauth_delay`` @@ -1180,7 +1194,7 @@ If certain returners should be disabled, this is the place .. conf_minion:: enable_whitelist_modules ``whitelist_modules`` ----------------------------- +--------------------- Default: ``[]`` (Module whitelisting is disabled. Adding anything to the config option will cause only the listed modules to be enabled. Modules not in the list will @@ -1272,6 +1286,20 @@ A list of extra directories to search for Salt renderers render_dirs: - /var/lib/salt/renderers +.. conf_minion:: utils_dirs + +``utils_dirs`` +-------------- + +Default: ``[]`` + +A list of extra directories to search for Salt utilities + +.. code-block:: yaml + + utils_dirs: + - /var/lib/salt/utils + .. conf_minion:: cython_enable ``cython_enable`` @@ -1320,6 +1348,20 @@ below. providers: service: systemd +.. conf_minion:: modules_max_memory + +``modules_max_memory`` +---------------------- + +Default: ``-1`` + +Specify a max size (in bytes) for modules on import. This feature is currently +only supported on *nix operating systems and requires psutil. + +.. code-block:: yaml + + modules_max_memory: -1 + .. conf_minion:: extmod_whitelist .. conf_minion:: extmod_blacklist @@ -1345,8 +1387,8 @@ whitelist an empty list. modules: - specific_module - Valid options: + - beacons - clouds - sdb @@ -1492,6 +1534,52 @@ environment lacks one. default_top: dev +.. conf_minion:: startup_states + +``startup_states`` +------------------ + +Default: ``''`` + +States to run when the minion daemon starts. To enable, set ``startup_states`` to: + +- ``highstate``: Execute state.highstate +- ``sls``: Read in the sls_list option and execute the named sls files +- ``top``: Read top_file option and execute based on that file on the Master + +.. code-block:: yaml + + startup_states: '' + +.. conf_minion:: sls_list + +``sls_list`` +------------ + +Default: ``[]`` + +List of states to run when the minion starts up if ``startup_states`` is set to ``sls``. + +.. code-block:: yaml + + sls_list: + - edit.vim + - hyper + +.. conf_minion:: top_file + +``top_file`` +------------ + +Default: ``''`` + +Top file to execute if ``startup_states`` is set to ``top``. + +.. code-block:: yaml + + top_file: '' + + State Management Settings ========================= @@ -1508,7 +1596,7 @@ The default renderer used for local state executions renderer: yaml_jinja -.. conf_master:: test +.. conf_minion:: test ``test`` -------- @@ -2026,6 +2114,35 @@ before the initial key exchange. The master fingerprint can be found by running master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13' +.. conf_minion:: keysize + +``keysize`` +----------- + +Default: ``2048`` + +The size of key that should be generated when creating new keys. + +.. code-block:: yaml + + keysize: 2048 + +.. conf_minion:: permissive_pki_access + +``permissive_pki_access`` +------------------------- + +Default: ``False`` + +Enable permissive access to the salt keys. This allows you to run the +master or minion as root, but have a non-root group be given access to +your pki_dir. To make the access explicit, root must belong to the group +you've given access to. This is potentially quite insecure. + +.. code-block:: yaml + + permissive_pki_access: False + .. conf_minion:: verify_master_pubkey_sign ``verify_master_pubkey_sign`` @@ -2133,7 +2250,7 @@ blocked. If `cmd_whitelist_glob` is NOT SET, then all shell commands are permitt - 'cat /etc/fstab' -.. conf_master:: ssl +.. conf_minion:: ssl ``ssl`` ------- @@ -2159,6 +2276,62 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23 ssl_version: PROTOCOL_TLSv1_2 +Reactor Settings +================ + +.. conf_minion:: reactor + +``reactor`` +----------- + +Default: ``[]`` + +Defines a salt reactor. See the :ref:`Reactor ` documentation for more +information. + +.. code-block:: yaml + + reactor: [] + +.. conf_minion:: reactor_refresh_interval + +``reactor_refresh_interval`` +---------------------------- + +Default: ``60`` + +The TTL for the cache of the reactor configuration. + +.. code-block:: yaml + + reactor_refresh_interval: 60 + +.. conf_minion:: reactor_worker_threads + +``reactor_worker_threads`` +-------------------------- + +Default: ``10`` + +The number of workers for the runner/wheel in the reactor. + +.. code-block:: yaml + reactor_worker_threads: 10 + +.. conf_minion:: reactor_worker_hwm + +``reactor_worker_hwm`` +---------------------- + +Default: ``10000`` + +The queue size for workers in the reactor. + +.. code-block:: yaml + + reactor_worker_hwm: 10000 + + Thread Settings =============== @@ -2429,6 +2602,62 @@ option then the minion will log a warning message. - /etc/roles/webserver +Keepalive Settings +================== + +.. conf_minion:: tcp_keepalive + +``tcp_keepalive`` +----------------- + +Default: ``True`` + +The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt +connectivity issues in messy network environments with misbehaving firewalls. + +.. code-block:: yaml + + tcp_keepalive: True + +.. conf_minion:: tcp_keepalive_cnt + +``tcp_keepalive_cnt`` +--------------------- + +Default: ``-1`` + +Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_cnt: -1 + +.. conf_minion:: tcp_keepalive_idle + +``tcp_keepalive_idle`` +---------------------- + +Default: ``300`` + +Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_idle: 300 + +.. conf_minion:: tcp_keepalive_intvl + +``tcp_keepalive_intvl`` +----------------------- + +Default: ``-1`` + +Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_intvl': -1 + Frozen Build Update Settings ============================ @@ -2530,6 +2759,36 @@ out. winrepo_dir: 'D:\winrepo' +.. conf_minion:: winrepo_dir_ng + +``winrepo_dir_ng`` +------------------ + +.. versionadded:: 2015.8.0 + A new :ref:`ng ` repo was added. + +Default: ``/srv/salt/win/repo-ng`` + +Location on the minion where the :conf_minion:`winrepo_remotes_ng` are checked +out for 2015.8.0 and later minions. + +.. code-block:: yaml + + winrepo_dir_ng: /srv/salt/win/repo-ng + +.. conf_minion:: winrepo_source_dir + +``winrepo_source_dir`` +---------------------- + +Default: ``salt://win/repo-ng/`` + +The source location for the winrepo sls files. + +.. code-block:: yaml + + winrepo_source_dir: salt://win/repo-ng/ + .. conf_minion:: winrepo_cachefile .. conf_minion:: win_repo_cachefile @@ -2582,3 +2841,33 @@ URL of the repository: Replace ```` with the SHA1 hash of a commit ID. Specifying a commit ID is useful in that it allows one to revert back to a previous version in the event that an error is introduced in the latest revision of the repo. + +.. conf_minion:: winrepo_remotes_ng + +``winrepo_remotes_ng`` +---------------------- + +.. versionadded:: 2015.8.0 + A new :ref:`ng ` repo was added. + +Default: ``['https://github.com/saltstack/salt-winrepo-ng.git']`` + +List of git repositories to checkout and include in the winrepo for +2015.8.0 and later minions. + +.. code-block:: yaml + + winrepo_remotes_ng: + - https://github.com/saltstack/salt-winrepo-ng.git + +To specify a specific revision of the repository, prepend a commit ID to the +URL of the repository: + +.. code-block:: yaml + + winrepo_remotes_ng: + - ' https://github.com/saltstack/salt-winrepo-ng.git' + +Replace ```` with the SHA1 hash of a commit ID. Specifying a commit +ID is useful in that it allows one to revert back to a previous version in the +event that an error is introduced in the latest revision of the repo. diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index e0b0198791..f325f8f375 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -195,6 +195,7 @@ execution modules keyboard keystone kmod + kubernetes launchctl layman ldap3 diff --git a/doc/ref/modules/all/salt.modules.kubernetes.rst b/doc/ref/modules/all/salt.modules.kubernetes.rst new file mode 100644 index 0000000000..a0f715d617 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.kubernetes.rst @@ -0,0 +1,6 @@ +======================= +salt.modules.kubernetes +======================= + +.. automodule:: salt.modules.kubernetes + :members: diff --git a/doc/ref/modules/index.rst b/doc/ref/modules/index.rst index 9f81170fe4..3bd62b3c41 100644 --- a/doc/ref/modules/index.rst +++ b/doc/ref/modules/index.rst @@ -405,6 +405,29 @@ similar to the following: return __virtualname__ return False +The ``__virtual__()`` function can return a ``True`` or ``False`` boolean, a tuple, +or a string. If it returns a ``True`` value, this ``__virtualname__`` module-level +attribute can be set as seen in the above example. This is the string that the module +should be referred to as. + +When ``__virtual__()`` returns a tuple, the first item should be a boolean and the +second should be a string. This is typically done when the module should not load. The +first value of the tuple is ``False`` and the second is the error message to display +for why the module did not load. + +For example: + +.. code-block:: python + + def __virtual__(): + ''' + Only load if git exists on the system + ''' + if salt.utils.which('git') is None: + return (False, + 'The git execution module cannot be loaded: git unavailable.') + else: + return True Documentation ============= diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 03de35190e..61e38a2b52 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -135,6 +135,7 @@ state modules keyboard keystone kmod + kubernetes layman ldap libcloud_dns diff --git a/doc/ref/states/all/salt.states.kubernetes.rst b/doc/ref/states/all/salt.states.kubernetes.rst new file mode 100644 index 0000000000..55bd416492 --- /dev/null +++ b/doc/ref/states/all/salt.states.kubernetes.rst @@ -0,0 +1,6 @@ +====================== +salt.states.kubernetes +====================== + +.. automodule:: salt.states.kubernetes + :members: diff --git a/doc/ref/states/requisites.rst b/doc/ref/states/requisites.rst index 6b00b3e856..3e3d29213c 100644 --- a/doc/ref/states/requisites.rst +++ b/doc/ref/states/requisites.rst @@ -519,7 +519,8 @@ runas .. versionadded:: 2017.7.0 -The ``runas`` global option is used to set the user which will be used to run the command in the ``cmd.run`` module. +The ``runas`` global option is used to set the user which will be used to run +the command in the ``cmd.run`` module. .. code-block:: yaml @@ -532,6 +533,26 @@ The ``runas`` global option is used to set the user which will be used to run th In the above state, the pip command run by ``cmd.run`` will be run by the daniel user. +runas_password +~~~~~~~~~~~~~~ + +.. versionadded:: 2017.7.2 + +The ``runas_password`` global option is used to set the password used by the +runas global option. This is required by ``cmd.run`` on Windows when ``runas`` +is specified. It will be set when ``runas_password`` is defined in the state. + +.. code-block:: yaml + + run_script: + cmd.run: + - name: Powershell -NonInteractive -ExecutionPolicy Bypass -File C:\\Temp\\script.ps1 + - runas: frank + - runas_password: supersecret + +In the above state, the Powershell script run by ``cmd.run`` will be run by the +frank user with the password ``supersecret``. + .. _requisites-require-in: .. _requisites-watch-in: .. _requisites-onchanges-in: diff --git a/doc/topics/cloud/action.rst b/doc/topics/cloud/action.rst index 0e10a95653..f36211d01a 100644 --- a/doc/topics/cloud/action.rst +++ b/doc/topics/cloud/action.rst @@ -21,7 +21,7 @@ Or you may specify a map which includes all VMs to perform the action on: $ salt-cloud -a reboot -m /path/to/mapfile -The following is a list of actions currently supported by salt-cloud: +The following is an example list of actions currently supported by ``salt-cloud``: .. code-block:: yaml @@ -36,5 +36,5 @@ The following is a list of actions currently supported by salt-cloud: - start - stop -Another useful reference for viewing more salt-cloud actions is the -:ref:Salt Cloud Feature Matrix +Another useful reference for viewing more ``salt-cloud`` actions is the +:ref:`Salt Cloud Feature Matrix `. diff --git a/doc/topics/cloud/aws.rst b/doc/topics/cloud/aws.rst index 7b809bc6f2..26b06c976a 100644 --- a/doc/topics/cloud/aws.rst +++ b/doc/topics/cloud/aws.rst @@ -78,6 +78,7 @@ parameters are discussed in more detail below. # RHEL -> ec2-user # CentOS -> ec2-user # Ubuntu -> ubuntu + # Debian -> admin # ssh_username: ec2-user diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index 18a4afb41b..d5986340d4 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -371,7 +371,6 @@ both. compute_name: cloudServersOpenStack protocol: ipv4 compute_region: DFW - protocol: ipv4 user: myuser tenant: 5555555 password: mypass diff --git a/doc/topics/cloud/function.rst b/doc/topics/cloud/function.rst index 3368cd820b..c83487d36b 100644 --- a/doc/topics/cloud/function.rst +++ b/doc/topics/cloud/function.rst @@ -26,5 +26,5 @@ gathering information about instances on a provider basis: $ salt-cloud -f list_nodes_full linode $ salt-cloud -f list_nodes_select linode -Another useful reference for viewing salt-cloud functions is the +Another useful reference for viewing ``salt-cloud`` functions is the :ref:`Salt Cloud Feature Matrix `. diff --git a/doc/topics/cloud/index.rst b/doc/topics/cloud/index.rst index 62ac596493..f7297b2aef 100644 --- a/doc/topics/cloud/index.rst +++ b/doc/topics/cloud/index.rst @@ -64,7 +64,9 @@ automatically installed salt-cloud for you. Use your distribution's package manager to install the ``salt-cloud`` package from the same repo that you used to install Salt. These repos will automatically be setup by Salt Bootstrap. -If there is no salt-cloud package, install with ``pip install salt-cloud``. +Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when +installing Salt. The ``-L`` option will install ``salt-cloud`` and the required +``libcloud`` package. .. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap diff --git a/doc/topics/cloud/joyent.rst b/doc/topics/cloud/joyent.rst index 8d4ca2c100..a56d533840 100644 --- a/doc/topics/cloud/joyent.rst +++ b/doc/topics/cloud/joyent.rst @@ -49,7 +49,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the .. code-block:: yaml - joyent_512 + joyent_512: provider: my-joyent-config size: g4-highcpu-512M image: ubuntu-16.04 diff --git a/doc/topics/cloud/qs.rst b/doc/topics/cloud/qs.rst index 18434e249b..9b37c3ea16 100644 --- a/doc/topics/cloud/qs.rst +++ b/doc/topics/cloud/qs.rst @@ -12,7 +12,9 @@ automatically installed salt-cloud for you. Use your distribution's package manager to install the ``salt-cloud`` package from the same repo that you used to install Salt. These repos will automatically be setup by Salt Bootstrap. -If there is no salt-cloud package, install with ``pip install salt-cloud``. +Alternatively, the ``-L`` option can be passed to the `Salt Bootstrap`_ script when +installing Salt. The ``-L`` option will install ``salt-cloud`` and the required +``libcloud`` package. .. _`Salt Bootstrap`: https://github.com/saltstack/salt-bootstrap diff --git a/doc/topics/development/contributing.rst b/doc/topics/development/contributing.rst index 57e3b3c677..fd21d86a23 100644 --- a/doc/topics/development/contributing.rst +++ b/doc/topics/development/contributing.rst @@ -260,6 +260,13 @@ The Salt development team will back-port bug fixes made to ``develop`` to the current release branch if the contributor cannot create the pull request against that branch. +Release Branches +---------------- + +For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases. + +Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well. + Keeping Salt Forks in Sync ========================== diff --git a/doc/topics/installation/eos.rst b/doc/topics/installation/eos.rst new file mode 100644 index 0000000000..c8c7787be7 --- /dev/null +++ b/doc/topics/installation/eos.rst @@ -0,0 +1,154 @@ +========================================= +Arista EOS Salt minion installation guide +========================================= + +The Salt minion for Arista EOS is distributed as a SWIX extension and can be installed directly on the switch. The EOS network operating system is based on old Fedora distributions and the installation of the ``salt-minion`` requires backports. This SWIX extension contains the necessary backports, together with the Salt basecode. + +.. note:: + + This SWIX extension has been tested on Arista DCS-7280SE-68-R, running EOS 4.17.5M and vEOS 4.18.3F. + +Important Notes +=============== + +This package is in beta, make sure to test it carefully before running it in production. + +If confirmed working correctly, please report and add a note on this page with the platform model and EOS version. + +If you want to uninstall this package, please refer to the uninstalling_ section. + +Installation from the Official SaltStack Repository +=================================================== + +Download the swix package and save it to flash. + +.. code-block:: bash + + veos#copy https://salt-eos.netops.life/salt-eos-latest.swix flash: + veos#copy https://salt-eos.netops.life/startup.sh flash: + +Install the Extension +===================== + +Copy the Salt package to extension + +.. code-block:: bash + + veos#copy flash:salt-eos-latest.swix extension: + +Install the SWIX + +.. code-block:: bash + + veos#extension salt-eos-latest.swix force + +Verify the installation + +.. code-block:: bash + + veos#show extensions | include salt-eos + salt-eos-2017-07-19.swix 1.0.11/1.fc25 A, F 27 + +Change the Salt master IP address or FQDN, by edit the variable (SALT_MASTER) + +.. code-block:: bash + + veos#bash vi /mnt/flash/startup.sh + +Make sure you enable the eAPI with unix-socket + +.. code-block:: bash + + veos(config)#management api http-commands + protocol unix-socket + no shutdown + +Post-installation tasks +======================= + +Generate Keys and host record and start Salt minion + +.. code-block:: bash + + veos#bash + #sudo /mnt/flash/startup.sh + +``salt-minion`` should be running + +Copy the installed extensions to boot-extensions + +.. code-block:: bash + + veos#copy installed-extensions boot-extensions + +Apply event-handler to let EOS start salt-minion during boot-up + +.. code-block:: bash + + veos(config)#event-handler boot-up-script + trigger on-boot + action bash sudo /mnt/flash/startup.sh + +For more specific installation details of the ``salt-minion``, please refer to :ref:`Configuring Salt`. + +.. _uninstalling: + +Uninstalling +============ + +If you decide to uninstall this package, the following steps are recommended for safety: + +1. Remove the extension from boot-extensions + +.. code-block:: bash + + veos#bash rm /mnt/flash/boot-extensions + +2. Remove the extension from extensions folder + +.. code-block:: bash + + veos#bash rm /mnt/flash/.extensions/salt-eos-latest.swix + +2. Remove boot-up script + +.. code-block:: bash + + veos(config)#no event-handler boot-up-script + +Additional Information +====================== + +This SWIX extension contains the following RPM packages: + +.. code-block:: text + + libsodium-1.0.11-1.fc25.i686.rpm + libstdc++-6.2.1-2.fc25.i686.rpm + openpgm-5.2.122-6.fc24.i686.rpm + python-Jinja2-2.8-0.i686.rpm + python-PyYAML-3.12-0.i686.rpm + python-babel-0.9.6-5.fc18.noarch.rpm + python-backports-1.0-3.fc18.i686.rpm + python-backports-ssl_match_hostname-3.4.0.2-1.fc18.noarch.rpm + python-backports_abc-0.5-0.i686.rpm + python-certifi-2016.9.26-0.i686.rpm + python-chardet-2.0.1-5.fc18.noarch.rpm + python-crypto-1.4.1-1.noarch.rpm + python-crypto-2.6.1-1.fc18.i686.rpm + python-futures-3.1.1-1.noarch.rpm + python-jtextfsm-0.3.1-0.noarch.rpm + python-kitchen-1.1.1-2.fc18.noarch.rpm + python-markupsafe-0.18-1.fc18.i686.rpm + python-msgpack-python-0.4.8-0.i686.rpm + python-napalm-base-0.24.3-1.noarch.rpm + python-napalm-eos-0.6.0-1.noarch.rpm + python-netaddr-0.7.18-0.noarch.rpm + python-pyeapi-0.7.0-0.noarch.rpm + python-salt-2017.7.0_1414_g2fb986f-1.noarch.rpm + python-singledispatch-3.4.0.3-0.i686.rpm + python-six-1.10.0-0.i686.rpm + python-tornado-4.4.2-0.i686.rpm + python-urllib3-1.5-7.fc18.noarch.rpm + python2-zmq-15.3.0-2.fc25.i686.rpm + zeromq-4.1.4-5.fc25.i686.rpm diff --git a/doc/topics/installation/index.rst b/doc/topics/installation/index.rst index 0ee4b38d7a..2f96830c4e 100644 --- a/doc/topics/installation/index.rst +++ b/doc/topics/installation/index.rst @@ -46,6 +46,7 @@ These guides go into detail how to install Salt on a given platform. arch debian + eos fedora freebsd gentoo diff --git a/doc/topics/jinja/index.rst b/doc/topics/jinja/index.rst index 5548b29646..fd37e9587f 100644 --- a/doc/topics/jinja/index.rst +++ b/doc/topics/jinja/index.rst @@ -335,7 +335,7 @@ Returns: .. versionadded:: 2017.7.0 -Wraps a text around quoutes. +This text will be wrapped in quotes. .. jinja_ref:: regex_search @@ -750,19 +750,43 @@ Returns: Check a whitelist and/or blacklist to see if the value matches it. -Example: +This filter can be used with either a whitelist or a blacklist individually, +or a whitelist and a blacklist can be passed simultaneously. + +If whitelist is used alone, value membership is checked against the +whitelist only. If the value is found, the function returns ``True``. +Otherwise, it returns ``False``. + +If blacklist is used alone, value membership is checked against the +blacklist only. If the value is found, the function returns ``False``. +Otherwise, it returns ``True``. + +If both a whitelist and a blacklist are provided, value membership in the +blacklist will be examined first. If the value is not found in the blacklist, +then the whitelist is checked. If the value isn't found in the whitelist, +the function returns ``False``. + +Whitelist Example: .. code-block:: jinja - {{ 5 | check_whitelist_blacklist(whitelist=[5, 6, 7]) }} - {{ 5 | check_whitelist_blacklist(blacklist=[5, 6, 7]) }} + {{ 5 | check_whitelist_blacklist(whitelist=[5, 6, 7]) }} Returns: .. code-block:: python - True + True +Blacklist Example: + +.. code-block:: jinja + + {{ 5 | check_whitelist_blacklist(blacklist=[5, 6, 7]) }} + +.. code-block:: python + + False .. jinja_ref:: date_format @@ -825,6 +849,13 @@ Example: {{ 'wall of text' | to_bytes }} +.. note:: + + This option may have adverse effects when using the default renderer, ``yaml_jinja``. + This is due to the fact that YAML requires proper handling in regard to special + characters. Please see the section on :ref:`YAML ASCII support ` + in the :ref:`YAML Idiosyncracies ` documentation for more + information. .. jinja_ref:: json_decode_list @@ -876,16 +907,22 @@ Returns: ------------ .. versionadded:: 2017.7.0 +.. versionadded:: Oxygen + Renamed from ``rand_str`` to ``random_hash`` to more accurately describe + what the filter does. -Generate a random string and applies a hash. Default hashing: md5. +Generates a random number between 1 and the number passed to the filter, and +then hashes it. The default hash type is the one specified by the minion's +:conf_minion:`hash_type` config option, but an alternate hash type can be +passed to the filter as an argument. Example: .. code-block:: jinja - {% set passwd_length = 17 %} - {{ passwd_length | rand_str }} - {{ passwd_length | rand_str('sha512') }} + {% set num_range = 99999999 %} + {{ num_range | rand_str }} + {{ num_range | rand_str('sha512') }} Returns: @@ -1186,7 +1223,7 @@ Example: .. code-block:: jinja - {{ ['192.168.0.1', 'foo', 'bar', 'fe80::'] | ipv4 }} + {{ ['192.168.0.1', 'foo', 'bar', 'fe80::'] | ipv6 }} Returns: @@ -1202,7 +1239,12 @@ Returns: .. versionadded:: 2017.7.0 -Return the list of hosts within a networks. +Return the list of hosts within a networks. This utility works for both IPv4 and IPv6. + +.. note:: + + When running this command with a large IPv6 network, the command will + take a long time to gather all of the hosts. Example: @@ -1224,7 +1266,7 @@ Returns: .. versionadded:: 2017.7.0 -Return the size of the network. +Return the size of the network. This utility works for both IPv4 and IPv6. Example: @@ -1284,6 +1326,13 @@ Example: {{ '00:11:22:33:44:55' | mac_str_to_bytes }} +.. note:: + + This option may have adverse effects when using the default renderer, ``yaml_jinja``. + This is due to the fact that YAML requires proper handling in regard to special + characters. Please see the section on :ref:`YAML ASCII support ` + in the :ref:`YAML Idiosyncracies ` documentation for more + information. .. jinja_ref:: dns_check diff --git a/doc/topics/releases/2016.11.7.rst b/doc/topics/releases/2016.11.7.rst new file mode 100644 index 0000000000..2ad821ff22 --- /dev/null +++ b/doc/topics/releases/2016.11.7.rst @@ -0,0 +1,15 @@ +============================ +Salt 2016.11.7 Release Notes +============================ + +Version 2016.11.7 is a bugfix release for :ref:`2016.11.0 `. + +Changes for v2016.11.6..v2016.11.7 +---------------------------------- + +Security Fix +============ + +CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master + +Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com diff --git a/doc/topics/releases/2016.3.7.rst b/doc/topics/releases/2016.3.7.rst index 3c47df7eda..0fca807661 100644 --- a/doc/topics/releases/2016.3.7.rst +++ b/doc/topics/releases/2016.3.7.rst @@ -4,23 +4,12 @@ Salt 2016.3.7 Release Notes Version 2016.3.7 is a bugfix release for :ref:`2016.3.0 `. -New master configuration option `allow_minion_key_revoke`, defaults to True. This option -controls whether a minion can request that the master revoke its key. When True, a minion -can request a key revocation and the master will comply. If it is False, the key will not -be revoked by the msater. +Changes for v2016.3.6..v2016.3.7 +-------------------------------- -New master configuration option `require_minion_sign_messages` -This requires that minions cryptographically sign the messages they -publish to the master. If minions are not signing, then log this information -at loglevel 'INFO' and drop the message without acting on it. +Security Fix +============ -New master configuration option `drop_messages_signature_fail` -Drop messages from minions when their signatures do not validate. -Note that when this option is False but `require_minion_sign_messages` is True -minions MUST sign their messages but the validity of their signatures -is ignored. +CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master -New minion configuration option `minion_sign_messages` -Causes the minion to cryptographically sign the payload of messages it places -on the event bus for the master. The payloads are signed with the minion's -private key so the master can verify the signature with its public key. +Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com diff --git a/doc/topics/releases/2016.3.8.rst b/doc/topics/releases/2016.3.8.rst new file mode 100644 index 0000000000..c5f0c01da8 --- /dev/null +++ b/doc/topics/releases/2016.3.8.rst @@ -0,0 +1,29 @@ +=========================== +Salt 2016.3.8 Release Notes +=========================== + +Version 2016.3.8 is a bugfix release for :ref:`2016.3.0 `. + +Changes for v2016.3.7..v2016.3.8 +-------------------------------- + +New master configuration option `allow_minion_key_revoke`, defaults to True. This option +controls whether a minion can request that the master revoke its key. When True, a minion +can request a key revocation and the master will comply. If it is False, the key will not +be revoked by the msater. + +New master configuration option `require_minion_sign_messages` +This requires that minions cryptographically sign the messages they +publish to the master. If minions are not signing, then log this information +at loglevel 'INFO' and drop the message without acting on it. + +New master configuration option `drop_messages_signature_fail` +Drop messages from minions when their signatures do not validate. +Note that when this option is False but `require_minion_sign_messages` is True +minions MUST sign their messages but the validity of their signatures +is ignored. + +New minion configuration option `minion_sign_messages` +Causes the minion to cryptographically sign the payload of messages it places +on the event bus for the master. The payloads are signed with the minion's +private key so the master can verify the signature with its public key. diff --git a/doc/topics/releases/2017.7.0.rst b/doc/topics/releases/2017.7.0.rst index 52e026e490..a0e257b347 100644 --- a/doc/topics/releases/2017.7.0.rst +++ b/doc/topics/releases/2017.7.0.rst @@ -28,8 +28,6 @@ The following salt-cloud drivers have known issues running with Python 3. These - Joyent -- Any driver that relies on the `apache-libcloud` library such as cloudstack, dimenstiondata, gce, nova, and openstack - - When running under Python 3, users who require Unicode support should ensure that a locale is set on their machines. Users using the `C` locale are advised to switch to a UTF-aware locale to ensure proper functionality with Salt with Python 3. @@ -124,13 +122,12 @@ State Module Changes # After run_something: module.run: - mymodule.something: + - mymodule.something: - name: some name - first_arg: one - second_arg: two - do_stuff: True - Since a lot of users are already using :py:func:`module.run ` states, this new behavior must currently be explicitly turned on, to allow users to take their time updating their SLS @@ -138,6 +135,36 @@ State Module Changes the next feature release of Salt (Oxygen) and the old usage will no longer be supported at that time. + Another feature of the new :py:func:`module.run ` is that + it allows calling many functions in a single batch, such as: + + .. code-block:: yaml + + run_something: + module.run: + - mymodule.function_without_parameters: + - mymodule.another_function: + - myparam + - my_other_param + + In a rare case that you have a function that needs to be called several times but + with the different parameters, an additional feature of "tagging" is to the + rescue. In order to tag a function, use a colon delimeter. For example: + + .. code-block:: yaml + + run_something: + module.run: + - mymodule.same_function:1: + - mymodule.same_function:2: + - myparam + - my_other_param + - mymodule.same_function:3: + - foo: bar + + The example above will run `mymodule.same_function` three times with the + different parameters. + To enable the new behavior for :py:func:`module.run `, add the following to the minion config file: @@ -145,6 +172,7 @@ State Module Changes use_superseded: - module.run + - The default for the ``fingerprint_hash_type`` option used in the ``present`` function in the :mod:`ssh ` state changed from ``md5`` to ``sha256``. @@ -678,6 +706,7 @@ Execution modules - :mod:`salt.modules.grafana4 ` - :mod:`salt.modules.heat ` - :mod:`salt.modules.icinga2 ` +- :mod:`salt.modules.kubernetes ` - :mod:`salt.modules.logmod ` - :mod:`salt.modules.mattermost ` - :mod:`salt.modules.namecheap_dns ` @@ -756,6 +785,7 @@ States - :mod:`salt.states.icinga2 ` - :mod:`salt.states.influxdb_continuous_query ` - :mod:`salt.states.influxdb_retention_policy ` +- :mod:`salt.states.kubernetes ` - :mod:`salt.states.logadm ` - :mod:`salt.states.logrotate ` - :mod:`salt.states.msteams ` @@ -945,3 +975,13 @@ The ``glusterfs`` state had the following function removed: The ``openvswitch_port`` state had the following change: - The ``type`` option was removed from the ``present`` function. Please use ``tunnel_type`` instead. + +Build Notes +=========== + +Windows Installer Packages +-------------------------- + +Windows Installer packages have been patched with the following PR: 42347_ + +.. _42347: https://github.com/saltstack/salt/pull/42347 diff --git a/doc/topics/releases/releasecandidate.rst b/doc/topics/releases/releasecandidate.rst index 0b042b86da..60918d6ac3 100644 --- a/doc/topics/releases/releasecandidate.rst +++ b/doc/topics/releases/releasecandidate.rst @@ -8,7 +8,7 @@ Installing/Testing a Salt Release Candidate It's time for a new feature release of Salt! Follow the instructions below to install the latest release candidate of Salt, and try :ref:`all the shiny new -features `! Be sure to report any bugs you find on `Github +features `! Be sure to report any bugs you find on `Github `_. Installing Using Packages @@ -32,32 +32,12 @@ Builds for a few platforms are available as part of the RC at https://repo.salts Available builds: -- Amazon Linux -- Debian 8 -- macOS -- RHEL 7 -- SmartOS (see below) -- Ubuntu 16.04 +- Ubuntu16 +- Redhat7 - Windows .. FreeBSD -SmartOS -------- -Release candidate builds for SmartOS are available at http://pkg.blackdot.be/extras/salt-2016.11rc/. - -On a base64 2015Q4-x86_64 based native zone the package can be installed by the following: - -.. code-block:: bash - - pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2015Q4_x86_64.tgz - -When using the 2016Q2-tools release on the global zone by the following: - -.. code-block:: bash - - pfexec pkg_add -U https://pkg.blackdot.be/extras/salt-2016.11rc/salt-2016.11.0rc2_2016Q2_TOOLS.tgz - Installing Using Bootstrap ========================== @@ -67,14 +47,14 @@ You can install a release candidate of Salt using `Salt Bootstrap .. code-block:: bash curl -o install_salt.sh -L https://bootstrap.saltstack.com - sudo sh install_salt.sh -P git v2016.11.0rc2 + sudo sh install_salt.sh -P git v2017.7.0rc1 If you want to also install a master using Salt Bootstrap, use the ``-M`` flag: .. code-block:: bash curl -o install_salt.sh -L https://bootstrap.saltstack.com - sudo sh install_salt.sh -P -M git v2016.11.0rc2 + sudo sh install_salt.sh -P -M git v2017.7.0rc1 If you want to install only a master and not a minion using Salt Bootstrap, use the ``-M`` and ``-N`` flags: @@ -82,13 +62,13 @@ the ``-M`` and ``-N`` flags: .. code-block:: bash curl -o install_salt.sh -L https://bootstrap.saltstack.com - sudo sh install_salt.sh -P -M -N git v2016.11.0rc2 + sudo sh install_salt.sh -P -M -N git v2017.7.0rc1 Installing Using PyPI ===================== Installing from the `source archive -`_ on +`_ on `PyPI `_ is fairly straightforward. .. note:: @@ -126,4 +106,4 @@ Then install salt using the following command: .. code-block:: bash - sudo pip install salt==2016.11.0rc2 + sudo pip install salt==2017.7.0rc1 diff --git a/doc/topics/ssh/index.rst b/doc/topics/ssh/index.rst index 8920e9e648..95a2bdecdb 100644 --- a/doc/topics/ssh/index.rst +++ b/doc/topics/ssh/index.rst @@ -64,7 +64,8 @@ Deploy ssh key for salt-ssh =========================== By default, salt-ssh will generate key pairs for ssh, the default path will be -/etc/salt/pki/master/ssh/salt-ssh.rsa +``/etc/salt/pki/master/ssh/salt-ssh.rsa``. The key generation happens when you run +``salt-ssh`` for the first time. You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers. diff --git a/doc/topics/troubleshooting/yaml_idiosyncrasies.rst b/doc/topics/troubleshooting/yaml_idiosyncrasies.rst index e1c6383459..f5a9fed916 100644 --- a/doc/topics/troubleshooting/yaml_idiosyncrasies.rst +++ b/doc/topics/troubleshooting/yaml_idiosyncrasies.rst @@ -28,6 +28,7 @@ hit `Enter`. Also, you can convert tabs to 2 spaces by these commands in Vim: Indentation =========== + The suggested syntax for YAML files is to use 2 spaces for indentation, but YAML will follow whatever indentation system that the individual file uses. Indentation of two spaces works very well for SLS files given the @@ -112,8 +113,24 @@ PyYAML will load these values as boolean ``True`` or ``False``. Un-capitalized versions will also be loaded as booleans (``true``, ``false``, ``yes``, ``no``, ``on``, and ``off``). This can be especially problematic when constructing Pillar data. Make sure that your Pillars which need to use the string versions -of these values are enclosed in quotes. Pillars will be parsed twice by salt, -so you'll need to wrap your values in multiple quotes, for example '"false"'. +of these values are enclosed in quotes. Pillars will be parsed twice by salt, +so you'll need to wrap your values in multiple quotes, including double quotation +marks (``" "``) and single quotation marks (``' '``). Note that spaces are included +in the quotation type examples for clarity. + +Multiple quoting examples looks like this: + +.. code-block:: yaml + + - '"false"' + - "'True'" + - "'YES'" + - '"No"' + +.. note:: + + When using multiple quotes in this manner, they must be different. Using ``"" ""`` + or ``'' ''`` won't work in this case (spaces are included in examples for clarity). The '%' Sign ============ @@ -248,8 +265,10 @@ Alternatively, they can be defined the "old way", or with multiple - require: - user: fred -YAML support only plain ASCII -============================= +.. _yaml_plain_ascii: + +YAML supports only plain ASCII +============================== According to YAML specification, only ASCII characters can be used. diff --git a/doc/topics/tutorials/gitfs.rst b/doc/topics/tutorials/gitfs.rst index a9e6fff0b2..86dfae879d 100644 --- a/doc/topics/tutorials/gitfs.rst +++ b/doc/topics/tutorials/gitfs.rst @@ -166,13 +166,15 @@ Ubuntu 14.04 LTS and Debian Wheezy (7.x) also have a compatible version packaged # apt-get install python-git -If your master is running an older version (such as Ubuntu 12.04 LTS or Debian -Squeeze), then you will need to install GitPython using either pip_ or -easy_install (it is recommended to use pip). Version 0.3.2.RC1 is now marked as -the stable release in PyPI, so it should be a simple matter of running ``pip -install GitPython`` (or ``easy_install GitPython``) as root. +GitPython_ requires the ``git`` CLI utility to work. If installed from a system +package, then git should already be installed, but if installed via pip_ then +it may still be necessary to install git separately. For MacOS users, +GitPython_ comes bundled in with the Salt installer, but git must still be +installed for it to work properly. Git can be installed in several ways, +including by installing XCode_. -.. _`pip`: http://www.pip-installer.org/ +.. _pip: http://www.pip-installer.org/ +.. _XCode: https://developer.apple.com/xcode/ .. warning:: diff --git a/doc/topics/tutorials/http.rst b/doc/topics/tutorials/http.rst index 1eaee62071..e6b20c62a2 100644 --- a/doc/topics/tutorials/http.rst +++ b/doc/topics/tutorials/http.rst @@ -110,7 +110,7 @@ To pass through a file that contains jinja + yaml templating (the default): method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) To pass through a file that contains mako templating: @@ -123,7 +123,7 @@ To pass through a file that contains mako templating: data_file='/srv/salt/somefile.mako', data_render=True, data_renderer='mako', - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) Because this function uses Salt's own rendering system, any Salt renderer can @@ -140,7 +140,7 @@ However, this can be changed to ``master`` if necessary. method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'}, + template_dict={'key1': 'value1', 'key2': 'value2'}, opts=__opts__ ) @@ -149,7 +149,7 @@ However, this can be changed to ``master`` if necessary. method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'}, + template_dict={'key1': 'value1', 'key2': 'value2'}, node='master' ) @@ -170,11 +170,11 @@ a Python dict. header_file='/srv/salt/headers.jinja', header_render=True, header_renderer='jinja', - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) Because much of the data that would be templated between headers and data may be -the same, the ``template_data`` is the same for both. Correcting possible +the same, the ``template_dict`` is the same for both. Correcting possible variable name collisions is up to the user. Authentication diff --git a/doc/topics/tutorials/pillar.rst b/doc/topics/tutorials/pillar.rst index e0c97f26bd..3ec2c1ddea 100644 --- a/doc/topics/tutorials/pillar.rst +++ b/doc/topics/tutorials/pillar.rst @@ -75,7 +75,7 @@ The default location for the pillar is in /srv/pillar. .. note:: - The pillar location can be configured via the `pillar_roots` option inside + The pillar location can be configured via the ``pillar_roots`` option inside the master configuration file. It must not be in a subdirectory of the state tree or file_roots. If the pillar is under file_roots, any pillar targeting can be bypassed by minions. @@ -242,7 +242,7 @@ set in the minion's pillar, then the default of ``httpd`` will be used. .. note:: Under the hood, pillar is just a Python dict, so Python dict methods such - as `get` and `items` can be used. + as ``get`` and ``items`` can be used. Pillar Makes Simple States Grow Easily ====================================== @@ -303,6 +303,18 @@ Where the vimrc source location can now be changed via pillar: Ensuring that the right vimrc is sent out to the correct minions. +The pillar top file must include a reference to the new sls pillar file: + +``/srv/pillar/top.sls``: + +.. code-block:: yaml + + base: + '*': + - pkg + - edit.vim + + Setting Pillar Data on the Command Line ======================================= diff --git a/doc/topics/utils/index.rst b/doc/topics/utils/index.rst index 48728174f5..19a0974d29 100644 --- a/doc/topics/utils/index.rst +++ b/doc/topics/utils/index.rst @@ -54,7 +54,7 @@ types like so: salt '*' mymodule.observe_the_awesomeness ''' - print __utils__['foo.bar']() + return __utils__['foo.bar']() Utility modules, like any other kind of Salt extension, support using a :ref:`__virtual__ function ` to conditionally load them, @@ -81,11 +81,56 @@ the ``foo`` utility module with a ``__virtual__`` function. def bar(): return 'baz' +Also you could even write your utility modules in object oriented fashion: + +.. code-block:: python + + # -*- coding: utf-8 -*- + ''' + My OOP-style utils module + ------------------------- + + This module contains common functions for use in my other custom types. + ''' + + class Foo(object): + + def __init__(self): + pass + + def bar(self): + return 'baz' + +And import them into other custom modules: + +.. code-block:: python + + # -*- coding: utf-8 -*- + ''' + My awesome execution module + --------------------------- + ''' + + import mymodule + + def observe_the_awesomeness(): + ''' + Prints information from my utility module + + CLI Example: + + .. code-block:: bash + + salt '*' mymodule.observe_the_awesomeness + ''' + foo = mymodule.Foo() + return foo.bar() + These are, of course, contrived examples, but they should serve to show some of the possibilities opened up by writing utility modules. Keep in mind though -that States still have access to all of the execution modules, so it is not +that states still have access to all of the execution modules, so it is not necessary to write a utility module to make a function available to both a -state and an execution module. One good use case for utililty modules is one +state and an execution module. One good use case for utility modules is one where it is necessary to invoke the same function from a custom :ref:`outputter `/returner, as well as an execution module. diff --git a/pkg/osx/build.sh b/pkg/osx/build.sh index c411840a91..7850d48cd8 100755 --- a/pkg/osx/build.sh +++ b/pkg/osx/build.sh @@ -86,9 +86,9 @@ sudo $PKGRESOURCES/build_env.sh $PYVER # Install Salt ############################################################################ echo -n -e "\033]0;Build: Install Salt\007" -sudo rm -rm $SRCDIR/build -sudo rm -rm $SRCDIR/dist -sudo $PYTHON $SRCDIR/setup.py install +sudo rm -rf $SRCDIR/build +sudo rm -rf $SRCDIR/dist +sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install ############################################################################ # Build Package diff --git a/pkg/osx/pkg-scripts/postinstall b/pkg/osx/pkg-scripts/postinstall index 89404e2158..f521666a6f 100755 --- a/pkg/osx/pkg-scripts/postinstall +++ b/pkg/osx/pkg-scripts/postinstall @@ -15,91 +15,119 @@ # This script is run as a part of the macOS Salt Installation # ############################################################################### -echo "Post install started on:" > /tmp/postinstall.txt -date >> /tmp/postinstall.txt + +############################################################################### +# Define Variables +############################################################################### +# Get Minor Version +OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]') +MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) +# Path Variables +INSTALL_DIR="/opt/salt" +BIN_DIR="$INSTALL_DIR/bin" +CONFIG_DIR="/etc/salt" +TEMP_DIR="/tmp" +SBIN_DIR="/usr/local/sbin" + +############################################################################### +# Set up logging and error handling +############################################################################### +echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt" trap 'quit_on_error $LINENO $BASH_COMMAND' ERR quit_on_error() { - echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt + echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt" exit -1 } ############################################################################### # Check for existing minion config, copy if it doesn't exist ############################################################################### -if [ ! -f /etc/salt/minion ]; then - echo "Config copy: Started..." >> /tmp/postinstall.txt - cp /etc/salt/minion.dist /etc/salt/minion - echo "Config copy: Successful" >> /tmp/postinstall.txt +if [ ! -f "$CONFIG_DIR/minion" ]; then + echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt" + cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion" + echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt" fi ############################################################################### # Create symlink to salt-config.sh ############################################################################### -# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt -if [ ! -d "/usr/local/sbin" ]; then - mkdir /usr/local/sbin +if [ ! -d "$SBIN_DIR" ]; then + echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt" + mkdir "$SBIN_DIR" + echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt" fi -ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config +echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt" +ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config" +echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt" ############################################################################### # Add salt to paths.d ############################################################################### -# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt if [ ! -d "/etc/paths.d" ]; then + echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt" mkdir /etc/paths.d + echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt" fi -sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt' -sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt' +echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt" +sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt" +sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt" +echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt" ############################################################################### # Register Salt as a service ############################################################################### setup_services_maverick() { - echo "Using old (< 10.10) launchctl interface" >> /tmp/postinstall.txt + echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Stop running service..." >> /tmp/postinstall.txt + echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt" fi; + echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt" launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt" - echo "Service start: Successful" >> /tmp/postinstall.txt - - echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt - + echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt" return 0 } setup_services_yosemite_and_later() { - echo "Using new (>= 10.10) launchctl interface" >> /tmp/postinstall.txt + echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt" + echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt" launchctl enable system/com.saltstack.salt.minion - echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt + echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt" + + echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt" launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist + echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Service is running" >> /tmp/postinstall.txt + echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt" else - echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt + echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt" launchctl kickstart -kp system/com.saltstack.salt.minion + echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt" fi - echo "Service start: Successful" >> /tmp/postinstall.txt - - echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt + echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt" + echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt" launchctl disable system/com.saltstack.salt.master launchctl disable system/com.saltstack.salt.syndic launchctl disable system/com.saltstack.salt.api + echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt" + + return 0 } -OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]') -MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) - -echo "Service start: Enabling service..." >> /tmp/postinstall.txt +echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt" case $MINOR in 9 ) setup_services_maverick; @@ -108,7 +136,9 @@ case $MINOR in setup_services_yosemite_and_later; ;; esac +echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt" -echo "Post install completed successfully" >> /tmp/postinstall.txt +echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt" exit 0 diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index e9a12c7c9a..3eb4235107 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -6,7 +6,8 @@ # Date: December 2015 # # Description: This script stops the salt minion service before attempting to -# install Salt on macOS +# install Salt on macOS. It also removes the /opt/salt/bin +# directory, symlink to salt-config, and salt from paths.d. # # Requirements: # - None @@ -15,12 +16,29 @@ # This script is run as a part of the macOS Salt Installation # ############################################################################### -echo "Preinstall started on:" > /tmp/preinstall.txt -date >> /tmp/preinstall.txt + +############################################################################### +# Define Variables +############################################################################### +# Get Minor Version +OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]') +MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) +# Path Variables +INSTALL_DIR="/opt/salt" +BIN_DIR="$INSTALL_DIR/bin" +CONFIG_DIR="/etc/salt" +TEMP_DIR="/tmp" +SBIN_DIR="/usr/local/sbin" + +############################################################################### +# Set up logging and error handling +############################################################################### +echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt" trap 'quit_on_error $LINENO $BASH_COMMAND' ERR quit_on_error() { - echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt + echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt" exit -1 } @@ -31,24 +49,58 @@ MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) # Stop the service ############################################################################### stop_service_maverick() { - echo "Using old (< 10.10) launchctl interface" >> /tmp/preinstall.txt + echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Stop service: Started..." >> /tmp/preinstall.txt + echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist - echo "Stop service: Successful" >> /tmp/preinstall.txt + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then + echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then + echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then + echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" fi } stop_service_yosemite_and_later() { - echo "Using new (>= 10.10) launchctl interface" >> /tmp/preinstall.txt + echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Stop service: Started..." >> /tmp/preinstall.txt + echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt" launchctl disable system/com.saltstack.salt.minion launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist - echo "Stop service: Successful" >> /tmp/preinstall.txt + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then + echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.master + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then + echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.syndic + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then + echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.api + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" fi } +echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt" case $MINOR in 9 ) stop_service_maverick; @@ -57,6 +109,36 @@ case $MINOR in stop_service_yosemite_and_later; ;; esac -echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt +echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt" + +############################################################################### +# Remove the Symlink to salt-config.sh +############################################################################### +if [ -L "$SBIN_DIR/salt-config" ]; then + echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt" + rm "$SBIN_DIR/salt-config" + echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" +fi + +############################################################################### +# Remove the $INSTALL_DIR directory +############################################################################### +if [ -d "$INSTALL_DIR" ]; then + echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt" + rm -rf "$INSTALL_DIR" + echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" +fi + +############################################################################### +# Remove the salt from the paths.d +############################################################################### +if [ ! -f "/etc/paths.d/salt" ]; then + echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt" + rm "/etc/paths.d/salt" + echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" +fi + +echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt" exit 0 diff --git a/pkg/osx/req.txt b/pkg/osx/req.txt index 2224830a5a..338454f456 100644 --- a/pkg/osx/req.txt +++ b/pkg/osx/req.txt @@ -7,7 +7,7 @@ CherryPy==11.0.0 click==6.7 enum34==1.1.6 gitdb==0.6.4 -GitPython==2.1.5 +GitPython==2.1.1 idna==2.5 ipaddress==1.0.18 Jinja2==2.9.6 diff --git a/pkg/windows/build.bat b/pkg/windows/build.bat index 9dde8b2e72..59fafde137 100644 --- a/pkg/windows/build.bat +++ b/pkg/windows/build.bat @@ -89,7 +89,7 @@ if Defined x ( if %Python%==2 ( Set "PyDir=C:\Python27" ) else ( - Set "PyDir=C:\Program Files\Python35" + Set "PyDir=C:\Python35" ) Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts" @@ -110,6 +110,13 @@ if not %errorLevel%==0 ( ) @echo. +:: Remove build and dist directories +@echo %0 :: Remove build and dist directories... +@echo --------------------------------------------------------------------- +rd /s /q "%SrcDir%\build" +rd /s /q "%SrcDir%\dist" +@echo. + :: Install Current Version of salt @echo %0 :: Install Current Version of salt... @echo --------------------------------------------------------------------- diff --git a/pkg/windows/build_env_2.ps1 b/pkg/windows/build_env_2.ps1 index 98a922ca3d..b186517812 100644 --- a/pkg/windows/build_env_2.ps1 +++ b/pkg/windows/build_env_2.ps1 @@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") { DownloadFileWithProgress $url $file Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ." - $p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru + $p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru } #------------------------------------------------------------------------------ @@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower()))) #============================================================================== # Update PIP and SetupTools -# caching depends on environmant variable SALT_PIP_LOCAL_CACHE +# caching depends on environment variable SALT_PIP_LOCAL_CACHE #============================================================================== Write-Output " ----------------------------------------------------------------" Write-Output " - $script_name :: Updating PIP and SetupTools . . ." @@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) { #============================================================================== # Install pypi resources using pip -# caching depends on environmant variable SALT_REQ_LOCAL_CACHE +# caching depends on environment variable SALT_REQ_LOCAL_CACHE #============================================================================== Write-Output " ----------------------------------------------------------------" Write-Output " - $script_name :: Installing pypi resources using pip . . ." @@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) { Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install" } +#============================================================================== +# Move PyWin32 DLL's to site-packages\win32 +#============================================================================== +Write-Output " - $script_name :: Moving PyWin32 DLLs . . ." +Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force + +# Remove pywin32_system32 directory +Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ." +Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32" + +# Remove pythonwin directory +Write-Output " - $script_name :: Removing pythonwin Directory . . ." +Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse + +# Remove PyWin32 PostInstall and testall Scripts +Write-Output " - $script_name :: Removing PyWin32 scripts . . ." +Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse + #============================================================================== # Install PyYAML with CLoader # This has to be a compiled binary to get the CLoader diff --git a/pkg/windows/build_env_3.ps1 b/pkg/windows/build_env_3.ps1 index 33f95871ae..0dcbafd996 100644 --- a/pkg/windows/build_env_3.ps1 +++ b/pkg/windows/build_env_3.ps1 @@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") { DownloadFileWithProgress $url $file Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ." - $p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru + $p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru } #------------------------------------------------------------------------------ @@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i # Move DLL's to Python Root Write-Output " - $script_name :: Moving PyWin32 DLLs . . ." -Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force +Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force # Remove pywin32_system32 directory Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ." @@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32" Write-Output " - $script_name :: Removing pythonwin Directory . . ." Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse +# Remove PyWin32 PostInstall and testall Scripts +Write-Output " - $script_name :: Removing PyWin32 scripts . . ." +Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse + #============================================================================== # Fix PyCrypto #============================================================================== diff --git a/pkg/windows/build_pkg.bat b/pkg/windows/build_pkg.bat index b6b52f3bda..95b185bfa7 100644 --- a/pkg/windows/build_pkg.bat +++ b/pkg/windows/build_pkg.bat @@ -56,7 +56,7 @@ if %Python%==2 ( Set "PyVerMajor=2" Set "PyVerMinor=7" ) else ( - Set "PyDir=C:\Program Files\Python35" + Set "PyDir=C:\Python35" Set "PyVerMajor=3" Set "PyVerMinor=5" ) @@ -108,9 +108,9 @@ xcopy /E /Q "%PyDir%" "%BinDir%\" @echo Copying configs to buildenv\conf... @echo ---------------------------------------------------------------------- @echo xcopy /E /Q "%SrcDir%\conf\master" "%CnfDir%\" -xcopy /Q "%SrcDir%\conf\master" "%CnfDir%\" +xcopy /Q /Y "%SrcDir%\conf\master" "%CnfDir%\" @echo xcopy /E /Q "%SrcDir%\conf\minion" "%CnfDir%\" -xcopy /Q "%SrcDir%\conf\minion" "%CnfDir%\" +xcopy /Q /Y "%SrcDir%\conf\minion" "%CnfDir%\" @echo. @echo Copying VCRedist to Prerequisites @@ -582,6 +582,10 @@ If Exist "%BinDir%\Scripts\salt-run*"^ If Exist "%BldDir%\salt-run.bat"^ del /Q "%BldDir%\salt-run.bat" 1>nul +:: Remove the master config file +if Exist "%CnfDir%\master"^ + del /Q "%CnfDir%\master" 1>nul + :: Make the Salt Minion Installer makensis.exe /DSaltVersion=%Version% /DPythonVersion=%Python% "%InsDir%\Salt-Minion-Setup.nsi" @echo. diff --git a/pkg/windows/buildenv/salt-call.bat b/pkg/windows/buildenv/salt-call.bat index 095f51e4c1..55f7cfac3b 100644 --- a/pkg/windows/buildenv/salt-call.bat +++ b/pkg/windows/buildenv/salt-call.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-call :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-cp.bat b/pkg/windows/buildenv/salt-cp.bat index 29274320b1..61fd1ce444 100644 --- a/pkg/windows/buildenv/salt-cp.bat +++ b/pkg/windows/buildenv/salt-cp.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-cp :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-key.bat b/pkg/windows/buildenv/salt-key.bat index 471e3626b2..4928b696d5 100644 --- a/pkg/windows/buildenv/salt-key.bat +++ b/pkg/windows/buildenv/salt-key.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-key :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-master.bat b/pkg/windows/buildenv/salt-master.bat index 9a124ffd46..134875c072 100644 --- a/pkg/windows/buildenv/salt-master.bat +++ b/pkg/windows/buildenv/salt-master.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-master :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-minion-debug.bat b/pkg/windows/buildenv/salt-minion-debug.bat index ad0ebafee0..2e6b5c5bcf 100644 --- a/pkg/windows/buildenv/salt-minion-debug.bat +++ b/pkg/windows/buildenv/salt-minion-debug.bat @@ -12,5 +12,4 @@ Set Script=%SaltDir%\bin\Scripts\salt-minion net stop salt-minion :: Launch Script -"%Python%" "%Script%" -l debug - +"%Python%" -E -s "%Script%" -l debug diff --git a/pkg/windows/buildenv/salt-minion.bat b/pkg/windows/buildenv/salt-minion.bat index 0a1aafa0c0..0eb041219c 100644 --- a/pkg/windows/buildenv/salt-minion.bat +++ b/pkg/windows/buildenv/salt-minion.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-minion :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-run.bat b/pkg/windows/buildenv/salt-run.bat index 5d62775d5e..a8766fc9b0 100644 --- a/pkg/windows/buildenv/salt-run.bat +++ b/pkg/windows/buildenv/salt-run.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-run :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt.bat b/pkg/windows/buildenv/salt.bat index 125005b23e..5b07c73281 100644 --- a/pkg/windows/buildenv/salt.bat +++ b/pkg/windows/buildenv/salt.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/clean_env.bat b/pkg/windows/clean_env.bat index fb6e63a661..7c5e497802 100644 --- a/pkg/windows/clean_env.bat +++ b/pkg/windows/clean_env.bat @@ -16,9 +16,10 @@ if %errorLevel%==0 ( ) echo. +:CheckPython2 if exist "\Python27" goto RemovePython2 -if exist "\Program Files\Python35" goto RemovePython3 -goto eof + +goto CheckPython3 :RemovePython2 rem Uninstall Python 2.7 @@ -47,25 +48,30 @@ goto eof goto eof +:CheckPython3 +if exist "\Python35" goto RemovePython3 + +goto eof + :RemovePython3 echo %0 :: Uninstalling Python 3 ... echo --------------------------------------------------------------------- :: 64 bit if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" ( echo %0 :: - 3.5.3 64bit - "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall + "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive ) :: 32 bit if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" ( echo %0 :: - 3.5.3 32bit - "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall + "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive ) rem wipe the Python directory - echo %0 :: Removing the C:\Program Files\Python35 Directory ... + echo %0 :: Removing the C:\Python35 Directory ... echo --------------------------------------------------------------------- - rd /s /q "C:\Program Files\Python35" + rd /s /q "C:\Python35" if %errorLevel%==0 ( echo Successful ) else ( diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index 39f55852c0..46fb821fb8 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -379,13 +379,12 @@ Section -Post WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\" ; Register the Salt-Minion Service - nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" - nsExec::Exec "nssm.exe set salt-minion AppEnvironmentExtra PYTHONHOME=" + nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com" nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START" nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1" - - RMDir /R "$INSTDIR\var\cache\salt" ; removing cache from old version + nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000" + nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000" Call updateMinionConfig diff --git a/pkg/windows/modules/get-settings.psm1 b/pkg/windows/modules/get-settings.psm1 index 292732cb83..5c57738fd3 100644 --- a/pkg/windows/modules/get-settings.psm1 +++ b/pkg/windows/modules/get-settings.psm1 @@ -19,9 +19,9 @@ Function Get-Settings { "Python2Dir" = "C:\Python27" "Scripts2Dir" = "C:\Python27\Scripts" "SitePkgs2Dir" = "C:\Python27\Lib\site-packages" - "Python3Dir" = "C:\Program Files\Python35" - "Scripts3Dir" = "C:\Program Files\Python35\Scripts" - "SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages" + "Python3Dir" = "C:\Python35" + "Scripts3Dir" = "C:\Python35\Scripts" + "SitePkgs3Dir" = "C:\Python35\Lib\site-packages" "DownloadDir" = "$env:Temp\DevSalt" } # The script deletes the DownLoadDir (above) for each install. diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index f90488e153..73e4c98f8a 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -200,7 +200,7 @@ class LoadAuth(object): ''' if not self.authenticate_eauth(load): return {} - fstr = '{0}.auth'.format(load['eauth']) + hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) tok = str(hash_type(os.urandom(512)).hexdigest()) t_path = os.path.join(self.opts['token_dir'], tok) @@ -224,8 +224,9 @@ class LoadAuth(object): acl_ret = self.__get_acl(load) tdata['auth_list'] = acl_ret - if 'groups' in load: - tdata['groups'] = load['groups'] + groups = self.get_groups(load) + if groups: + tdata['groups'] = groups try: with salt.utils.files.set_umask(0o177): @@ -345,7 +346,7 @@ class LoadAuth(object): return False return True - def get_auth_list(self, load): + def get_auth_list(self, load, token=None): ''' Retrieve access list for the user specified in load. The list is built by eauth module or from master eauth configuration. @@ -353,30 +354,37 @@ class LoadAuth(object): list if the user has no rights to execute anything on this master and returns non-empty list if user is allowed to execute particular functions. ''' + # Get auth list from token + if token and self.opts['keep_acl_in_token'] and 'auth_list' in token: + return token['auth_list'] # Get acl from eauth module. auth_list = self.__get_acl(load) if auth_list is not None: return auth_list - if load['eauth'] not in self.opts['external_auth']: + eauth = token['eauth'] if token else load['eauth'] + if eauth not in self.opts['external_auth']: # No matching module is allowed in config log.warning('Authorization failure occurred.') return None - name = self.load_name(load) # The username we are attempting to auth with - groups = self.get_groups(load) # The groups this user belongs to - eauth_config = self.opts['external_auth'][load['eauth']] - if groups is None or groups is False: + if token: + name = token['name'] + groups = token.get('groups') + else: + name = self.load_name(load) # The username we are attempting to auth with + groups = self.get_groups(load) # The groups this user belongs to + eauth_config = self.opts['external_auth'][eauth] + if not groups: groups = [] group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups # First we need to know if the user is allowed to proceed via any of their group memberships. group_auth_match = False for group_config in group_perm_keys: - group_config = group_config.rstrip('%') - for group in groups: - if group == group_config: - group_auth_match = True + if group_config.rstrip('%') in groups: + group_auth_match = True + break # If a group_auth_match is set it means only that we have a # user which matches at least one or more of the groups defined # in the configuration file. diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py index 396c1d00a2..3065429815 100644 --- a/salt/auth/ldap.py +++ b/salt/auth/ldap.py @@ -306,7 +306,7 @@ def groups(username, **kwargs): ''' group_list = [] - bind = _bind(username, kwargs['password'], + bind = _bind(username, kwargs.get('password'), anonymous=_config('anonymous', mandatory=False)) if bind: log.debug('ldap bind to determine group membership succeeded!') @@ -371,7 +371,7 @@ def groups(username, **kwargs): search_results = bind.search_s(search_base, ldap.SCOPE_SUBTREE, search_string, - [_config('accountattributename'), 'cn']) + [_config('accountattributename'), 'cn', _config('groupattribute')]) for _, entry in search_results: if username in entry[_config('accountattributename')]: group_list.append(entry['cn'][0]) diff --git a/salt/cache/__init__.py b/salt/cache/__init__.py index 4c66dd3d64..94d7a36f1e 100644 --- a/salt/cache/__init__.py +++ b/salt/cache/__init__.py @@ -224,7 +224,7 @@ class Cache(object): fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) - def ls(self, bank): + def list(self, bank): ''' Lists entries stored in the specified bank. @@ -240,11 +240,9 @@ class Cache(object): Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' - fun = '{0}.ls'.format(self.driver) + fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs) - list = ls - def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. diff --git a/salt/cache/consul.py b/salt/cache/consul.py index b545c96ead..d8f1b32a74 100644 --- a/salt/cache/consul.py +++ b/salt/cache/consul.py @@ -61,7 +61,7 @@ api = None # Define the module's virtual name __virtualname__ = 'consul' -__func_alias__ = {'list': 'ls'} +__func_alias__ = {'list_': 'list'} def __virtual__(): @@ -139,7 +139,7 @@ def flush(bank, key=None): ) -def ls(bank): +def list_(bank): ''' Return an iterable object containing all entries stored in the specified bank. ''' diff --git a/salt/cache/localfs.py b/salt/cache/localfs.py index 68cd5aee5c..45dfa6d086 100644 --- a/salt/cache/localfs.py +++ b/salt/cache/localfs.py @@ -23,7 +23,7 @@ import salt.utils.atomicfile log = logging.getLogger(__name__) -__func_alias__ = {'list': 'ls'} +__func_alias__ = {'list_': 'list'} def __cachedir(kwargs=None): @@ -143,7 +143,7 @@ def flush(bank, key=None, cachedir=None): return True -def ls(bank, cachedir): +def list_(bank, cachedir): ''' Return an iterable object containing all entries stored in the specified bank. ''' diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py index fba83e9792..b02a0851e5 100644 --- a/salt/cache/redis_cache.py +++ b/salt/cache/redis_cache.py @@ -114,9 +114,7 @@ from salt.exceptions import SaltCacheError # ----------------------------------------------------------------------------- __virtualname__ = 'redis' -__func_alias__ = { - 'list_': 'list' -} +__func_alias__ = {'list_': 'list'} log = logging.getLogger(__file__) @@ -145,6 +143,9 @@ def __virtual__(): # helper functions -- will not be exported # ----------------------------------------------------------------------------- +def init_kwargs(kwargs): + return {} + def _get_redis_cache_opts(): ''' diff --git a/salt/cli/cp.py b/salt/cli/cp.py index dd1d17e5e8..489b5fa09a 100644 --- a/salt/cli/cp.py +++ b/salt/cli/cp.py @@ -21,7 +21,7 @@ import salt.client import salt.utils.gzip_util import salt.utils.itertools import salt.utils.minions -from salt.utils import parsers, to_bytes +from salt.utils import parsers, to_bytes, print_cli from salt.utils.verify import verify_log import salt.output @@ -101,10 +101,69 @@ class SaltCP(object): empty_dirs.update(empty_dirs_) return files, sorted(empty_dirs) + def _file_dict(self, fn_): + ''' + Take a path and return the contents of the file as a string + ''' + if not os.path.isfile(fn_): + err = 'The referenced file, {0} is not available.'.format(fn_) + sys.stderr.write(err + '\n') + sys.exit(42) + with salt.utils.fopen(fn_, 'r') as fp_: + data = fp_.read() + return {fn_: data} + + def _load_files(self): + ''' + Parse the files indicated in opts['src'] and load them into a python + object for transport + ''' + files = {} + for fn_ in self.opts['src']: + if os.path.isfile(fn_): + files.update(self._file_dict(fn_)) + elif os.path.isdir(fn_): + print_cli(fn_ + ' is a directory, only files are supported in non-chunked mode. ' + 'Use "--chunked" command line argument.') + sys.exit(1) + return files + def run(self): ''' Make the salt client call ''' + if self.opts['chunked']: + ret = self.run_chunked() + else: + ret = self.run_oldstyle() + + salt.output.display_output( + ret, + self.opts.get('output', 'nested'), + self.opts) + + def run_oldstyle(self): + ''' + Make the salt client call in old-style all-in-one call method + ''' + arg = [self._load_files(), self.opts['dest']] + local = salt.client.get_local_client(self.opts['conf_file']) + args = [self.opts['tgt'], + 'cp.recv', + arg, + self.opts['timeout'], + ] + + selected_target_option = self.opts.get('selected_target_option', None) + if selected_target_option is not None: + args.append(selected_target_option) + + return local.cmd(*args) + + def run_chunked(self): + ''' + Make the salt client call in the new fasion chunked multi-call way + ''' files, empty_dirs = self._list_files() dest = self.opts['dest'] gzip = self.opts['gzip'] @@ -166,7 +225,7 @@ class SaltCP(object): ) args = [ tgt, - 'cp.recv', + 'cp.recv_chunked', [remote_path, chunk, append, gzip, mode], timeout, ] @@ -212,14 +271,11 @@ class SaltCP(object): else '', tgt, ) - args = [tgt, 'cp.recv', [remote_path, None], timeout] + args = [tgt, 'cp.recv_chunked', [remote_path, None], timeout] if selected_target_option is not None: args.append(selected_target_option) for minion_id, minion_ret in six.iteritems(local.cmd(*args)): ret.setdefault(minion_id, {})[remote_path] = minion_ret - salt.output.display_output( - ret, - self.opts.get('output', 'nested'), - self.opts) + return ret diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 1073223ae1..9ec5fc3666 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -544,6 +544,7 @@ class LocalClient(object): {'stewart': {...}} ''' if 'expr_form' in kwargs: + import salt salt.utils.warn_until( 'Fluorine', 'The target type should be passed using the \'tgt_type\' ' @@ -738,7 +739,7 @@ class LocalClient(object): ret[mid] = (data if full_return else data.get('ret', {})) - for failed in list(set(pub_data['minions']) ^ set(ret)): + for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 09680a0561..f5a29a9cbf 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -405,8 +405,6 @@ class SyncClientMixin(object): ) data['success'] = False - namespaced_event.fire_event(data, 'ret') - if self.store_job: try: salt.utils.job.store_job( @@ -424,6 +422,9 @@ class SyncClientMixin(object): log.error('Could not store job cache info. ' 'Job details for this run may be unavailable.') + # Outputters _can_ mutate data so write to the job cache first! + namespaced_event.fire_event(data, 'ret') + # if we fired an event, make sure to delete the event object. # This will ensure that we call destroy, which will do the 0MQ linger log.info('Runner completed: {0}'.format(data['jid'])) diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index 22c770ea16..a25e8838b5 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -467,6 +467,8 @@ class SSH(object): for default in self.defaults: if default not in self.targets[host]: self.targets[host][default] = self.defaults[default] + if 'host' not in self.targets[host]: + self.targets[host]['host'] = host args = ( que, self.opts, diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index d7f1be2fd4..73327724ef 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -730,18 +730,9 @@ class Cloud(object): continue for vm_name, details in six.iteritems(vms): - # If VM was created with use_fqdn with either of the softlayer drivers, - # we need to strip the VM name and only search for the short hostname. - if driver == 'softlayer' or driver == 'softlayer_hw': - ret = [] - for name in names: - name = name.split('.')[0] - ret.append(name) - if vm_name not in ret: - continue # XXX: The logic below can be removed once the aws driver # is removed - elif vm_name not in names: + if vm_name not in names: continue elif driver == 'ec2' and 'aws' in handled_drivers and \ diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index 8d346c9a0e..6184a89c49 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -407,13 +407,14 @@ def list_nodes_full(conn=None, call=None): # pylint: disable=unused-argument for group in list_resource_groups(): nodes = compconn.virtual_machines.list(group) for node in nodes: + private_ips, public_ips = __get_ips_from_node(group, node) ret[node.name] = object_to_dict(node) ret[node.name]['id'] = node.id ret[node.name]['name'] = node.name ret[node.name]['size'] = node.hardware_profile.vm_size ret[node.name]['state'] = node.provisioning_state - ret[node.name]['private_ips'] = node.network_profile.network_interfaces - ret[node.name]['public_ips'] = node.network_profile.network_interfaces + ret[node.name]['private_ips'] = private_ips + ret[node.name]['public_ips'] = public_ips ret[node.name]['storage_profile']['data_disks'] = [] ret[node.name]['resource_group'] = group for disk in node.storage_profile.data_disks: @@ -433,6 +434,30 @@ def list_nodes_full(conn=None, call=None): # pylint: disable=unused-argument return ret +def __get_ips_from_node(resource_group, node): + ''' + List private and public IPs from a VM interface + ''' + global netconn # pylint: disable=global-statement,invalid-name + if not netconn: + netconn = get_conn(NetworkManagementClient) + + private_ips = [] + public_ips = [] + for node_iface in node.network_profile.network_interfaces: + node_iface_name = node_iface.id.split('/')[-1] + network_interface = netconn.network_interfaces.get(resource_group, node_iface_name) + for ip_configuration in network_interface.ip_configurations: + if ip_configuration.private_ip_address: + private_ips.append(ip_configuration.private_ip_address) + if ip_configuration.public_ip_address and ip_configuration.public_ip_address.id: + public_iface_name = ip_configuration.public_ip_address.id.split('/')[-1] + public_iface = netconn.public_ip_addresses.get(resource_group, public_iface_name) + public_ips.append(public_iface.ip_address) + + return private_ips, public_ips + + def list_resource_groups(conn=None, call=None): # pylint: disable=unused-argument ''' List resource groups associated with the account diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 0f52e3d1fc..865806eeba 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -1030,10 +1030,18 @@ def ssh_interface(vm_): Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' - return config.get_cloud_config_value( + ret = config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) + if ret not in ('public_ips', 'private_ips'): + log.warning(( + 'Invalid ssh_interface: {0}. ' + 'Allowed options are ("public_ips", "private_ips"). ' + 'Defaulting to "public_ips".' + ).format(ret)) + ret = 'public_ips' + return ret def get_ssh_gateway_config(vm_): @@ -3420,34 +3428,7 @@ def list_nodes_full(location=None, call=None): 'or --function.' ) - if not location: - ret = {} - locations = set( - get_location(vm_) for vm_ in six.itervalues(__opts__['profiles']) - if _vm_provider_driver(vm_) - ) - - # If there aren't any profiles defined for EC2, check - # the provider config file, or use the default location. - if not locations: - locations = [get_location()] - - for loc in locations: - ret.update(_list_nodes_full(loc)) - return ret - - return _list_nodes_full(location) - - -def _vm_provider_driver(vm_): - alias, driver = vm_['driver'].split(':') - if alias not in __opts__['providers']: - return None - - if driver not in __opts__['providers'][alias]: - return None - - return driver == 'ec2' + return _list_nodes_full(location or get_location()) def _extract_name_tag(item): diff --git a/salt/cloud/clouds/joyent.py b/salt/cloud/clouds/joyent.py index 3a2f119f9f..5602e24e8c 100644 --- a/salt/cloud/clouds/joyent.py +++ b/salt/cloud/clouds/joyent.py @@ -1071,10 +1071,10 @@ def query(action=None, timenow = datetime.datetime.utcnow() timestamp = timenow.strftime('%a, %d %b %Y %H:%M:%S %Z').strip() with salt.utils.fopen(ssh_keyfile, 'r') as kh_: - rsa_key = RSA.importKey(kh_) + rsa_key = RSA.importKey(kh_.read()) rsa_ = PKCS1_v1_5.new(rsa_key) hash_ = SHA256.new() - hash_.update(timestamp) + hash_.update(timestamp.encode(__salt_system_encoding__)) signed = base64.b64encode(rsa_.sign(hash_)) keyid = '/{0}/keys/{1}'.format(user.split('/')[0], ssh_keyname) @@ -1085,7 +1085,7 @@ def query(action=None, 'Date': timestamp, 'Authorization': 'Signature keyId="{0}",algorithm="rsa-sha256" {1}'.format( keyid, - signed + signed.decode(__salt_system_encoding__) ), } diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py index 6cf43eb0af..25cc7eac41 100644 --- a/salt/cloud/clouds/nova.py +++ b/salt/cloud/clouds/nova.py @@ -728,12 +728,18 @@ def request_instance(vm_=None, call=None): else: pool = floating_ip_conf.get('pool', 'public') - for fl_ip, opts in six.iteritems(conn.floating_ip_list()): - if opts['fixed_ip'] is None and opts['pool'] == pool: - floating_ip = fl_ip - break - if floating_ip is None: + try: floating_ip = conn.floating_ip_create(pool)['ip'] + except Exception: + log.info('A new IP address was unable to be allocated. ' + 'An IP address will be pulled from the already allocated list, ' + 'This will cause a race condition when building in parallel.') + for fl_ip, opts in six.iteritems(conn.floating_ip_list()): + if opts['fixed_ip'] is None and opts['pool'] == pool: + floating_ip = fl_ip + break + if floating_ip is None: + log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name'])) def __query_node_data(vm_): try: diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index 96c1902cfb..690ad2037b 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -135,6 +135,14 @@ Alternatively, one could use the private IP to connect by specifying: ssh_interface: private_ips +.. note:: + + When using floating ips from networks, if the OpenStack driver is unable to + allocate a new ip address for the server, it will check that for + unassociated ip addresses in the floating ip pool. If SaltCloud is running + in parallel mode, it is possible that more than one server will attempt to + use the same ip address. + ''' # Import python libs @@ -855,40 +863,43 @@ def _assign_floating_ips(vm_, conn, kwargs): pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection ) - for idx in pool.list_floating_ips(): - if idx.node_id is None: - floating.append(idx) + try: + floating.append(pool.create_floating_ip()) + except Exception as e: + log.debug('Cannot allocate IP from floating pool \'%s\'. Checking for unassociated ips.', + net['floating']) + for idx in pool.list_floating_ips(): + if idx.node_id is None: + floating.append(idx) + break if not floating: - try: - floating.append(pool.create_floating_ip()) - except Exception as e: - raise SaltCloudSystemExit( - 'Floating pool \'{0}\' does not have any more ' - 'please create some more or use a different ' - 'pool.'.format(net['floating']) - ) + raise SaltCloudSystemExit( + 'There are no more floating IP addresses ' + 'available, please create some more' + ) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some - # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool( '', conn.connection ) - for idx in pool.list_floating_ips(): - if idx.node_id is None: - floating.append(idx) + try: + floating.append(pool.create_floating_ip()) + except Exception as e: + log.debug('Cannot allocate IP from the default floating pool. Checking for unassociated ips.') + for idx in pool.list_floating_ips(): + if idx.node_id is None: + floating.append(idx) + break if not floating: - try: - floating.append(pool.create_floating_ip()) - except Exception as e: - raise SaltCloudSystemExit( - 'There are no more floating IP addresses ' - 'available, please create some more' - ) + log.warning( + 'There are no more floating IP addresses ' + 'available, please create some more if necessary' + ) except Exception as e: if str(e).startswith('404'): pass diff --git a/salt/cloud/clouds/softlayer.py b/salt/cloud/clouds/softlayer.py index 457447b431..9b9343a1e0 100644 --- a/salt/cloud/clouds/softlayer.py +++ b/salt/cloud/clouds/softlayer.py @@ -508,7 +508,7 @@ def list_nodes_full(mask='mask[id]', call=None): conn = get_conn(service='SoftLayer_Account') response = conn.getVirtualGuests() for node_id in response: - hostname = node_id['hostname'].split('.')[0] + hostname = node_id['hostname'] ret[hostname] = node_id __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret @@ -594,9 +594,6 @@ def destroy(name, call=None): transport=__opts__['transport'] ) - # If the VM was created with use_fqdn, the short hostname will be used instead. - name = name.split('.')[0] - node = show_instance(name, call='action') conn = get_conn() response = conn.deleteObject(id=node['id']) diff --git a/salt/cloud/clouds/softlayer_hw.py b/salt/cloud/clouds/softlayer_hw.py index 34dae95cca..030391fe6d 100644 --- a/salt/cloud/clouds/softlayer_hw.py +++ b/salt/cloud/clouds/softlayer_hw.py @@ -526,9 +526,6 @@ def destroy(name, call=None): transport=__opts__['transport'] ) - # If the VM was created with use_fqdn, the short hostname will be used instead. - name = name.split('.')[0] - node = show_instance(name, call='action') conn = get_conn(service='SoftLayer_Ticket') response = conn.createCancelServerTicket( diff --git a/salt/cloud/clouds/virtualbox.py b/salt/cloud/clouds/virtualbox.py index 903722fd39..4266ce5f0e 100644 --- a/salt/cloud/clouds/virtualbox.py +++ b/salt/cloud/clouds/virtualbox.py @@ -24,7 +24,6 @@ import logging # Import salt libs from salt.exceptions import SaltCloudSystemExit import salt.config as config -import salt.utils.cloud as cloud # Import Third Party Libs try: @@ -136,7 +135,7 @@ def create(vm_info): ) log.debug("Going to fire event: starting create") - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_info['name']), @@ -151,7 +150,7 @@ def create(vm_info): 'clone_from': vm_info['clonefrom'] } - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_info['name']), @@ -174,10 +173,10 @@ def create(vm_info): vm_info['key_filename'] = key_filename vm_info['ssh_host'] = ip - res = cloud.bootstrap(vm_info, __opts__) + res = __utils__['cloud.bootstrap'](vm_info) vm_result.update(res) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'created machine', 'salt/cloud/{0}/created'.format(vm_info['name']), @@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None): "private_ips", "public_ips", ] - return cloud.list_nodes_select( + return __utils__['cloud.list_nodes_select']( list_nodes_full('function'), attributes, call, ) @@ -278,7 +277,7 @@ def list_nodes_select(call=None): """ Return a list of the VMs that are on the provider, with select fields """ - return cloud.list_nodes_select( + return __utils__['cloud.list_nodes_select']( list_nodes_full('function'), __opts__['query.selection'], call, ) @@ -306,7 +305,7 @@ def destroy(name, call=None): if not vb_machine_exists(name): return "{0} doesn't exist and can't be deleted".format(name) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), @@ -317,7 +316,7 @@ def destroy(name, call=None): vb_destroy_machine(name) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), diff --git a/salt/cloud/deploy/bootstrap-salt.sh b/salt/cloud/deploy/bootstrap-salt.sh index ededd58508..ddda8e6398 100755 --- a/salt/cloud/deploy/bootstrap-salt.sh +++ b/salt/cloud/deploy/bootstrap-salt.sh @@ -18,7 +18,7 @@ #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2017.05.24" +__ScriptVersion="2017.08.17" __ScriptName="bootstrap-salt.sh" __ScriptFullName="$0" @@ -121,8 +121,7 @@ __check_command_exists() { #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __check_pip_allowed -# DESCRIPTION: Simple function to let the users know that -P needs to be -# used. +# DESCRIPTION: Simple function to let the users know that -P needs to be used. #---------------------------------------------------------------------------------------------------------------------- __check_pip_allowed() { if [ $# -eq 1 ]; then @@ -155,10 +154,16 @@ __check_config_dir() { __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" CC_DIR_NAME="/tmp/${CC_DIR_BASE}" ;; + *://*) + echoerror "Unsupported URI scheme for $CC_DIR_NAME" + echo "null" + return + ;; *) if [ ! -e "${CC_DIR_NAME}" ]; then + echoerror "The configuration directory or archive $CC_DIR_NAME does not exist." echo "null" - return 0 + return fi ;; esac @@ -187,6 +192,28 @@ __check_config_dir() { echo "${CC_DIR_NAME}" } +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_unparsed_options +# DESCRIPTION: Checks the placed after the install arguments +#---------------------------------------------------------------------------------------------------------------------- +__check_unparsed_options() { + shellopts="$1" + # grep alternative for SunOS + if [ -f /usr/xpg4/bin/grep ]; then + grep='/usr/xpg4/bin/grep' + else + grep='grep' + fi + unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) + if [ "$unparsed_options" != "" ]; then + __usage + echo + echoerror "options are only allowed before install arguments" + echo + exit 1 + fi +} + #---------------------------------------------------------------------------------------------------------------------- # Handle command line arguments @@ -243,6 +270,10 @@ _REPO_URL="repo.saltstack.com" _PY_EXE="" _INSTALL_PY="$BS_FALSE" +# Defaults for install arguments +ITYPE="stable" + + #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __usage # DESCRIPTION: Display usage information. @@ -380,19 +411,7 @@ do v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;; n ) _COLORS=0; __detect_color_support ;; D ) _ECHO_DEBUG=$BS_TRUE ;; - - c ) _TEMP_CONFIG_DIR=$(__check_config_dir "$OPTARG") - # If the configuration directory does not exist, error out - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - echoerror "Unsupported URI scheme for $OPTARG" - exit 1 - fi - if [ ! -d "$_TEMP_CONFIG_DIR" ]; then - echoerror "The configuration directory ${_TEMP_CONFIG_DIR} does not exist." - exit 1 - fi - ;; - + c ) _TEMP_CONFIG_DIR="$OPTARG" ;; g ) _SALT_REPO_URL=$OPTARG ;; G ) echowarn "The '-G' option is DEPRECATED and will be removed in the future stable release!" @@ -401,14 +420,7 @@ do ;; w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; - - k ) _TEMP_KEYS_DIR="$OPTARG" - # If the configuration directory does not exist, error out - if [ ! -d "$_TEMP_KEYS_DIR" ]; then - echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." - exit 1 - fi - ;; + k ) _TEMP_KEYS_DIR="$OPTARG" ;; s ) _SLEEP=$OPTARG ;; M ) _INSTALL_MASTER=$BS_TRUE ;; S ) _INSTALL_SYNDIC=$BS_TRUE ;; @@ -451,205 +463,28 @@ done shift $((OPTIND-1)) -__check_unparsed_options() { - shellopts="$1" - # grep alternative for SunOS - if [ -f /usr/xpg4/bin/grep ]; then - grep='/usr/xpg4/bin/grep' - else - grep='grep' - fi - unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) - if [ "$unparsed_options" != "" ]; then - __usage - echo - echoerror "options are only allowed before install arguments" - echo - exit 1 - fi -} +# Define our logging file and pipe paths +LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" +LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" - -# Check that we're actually installing one of minion/master/syndic -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echowarn "Nothing to install or configure" - exit 0 -fi - -# Check that we're installing a minion if we're being passed a master address -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then - echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." +# Create our logging pipe +# On FreeBSD we have to use mkfifo instead of mknod +mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echoerror "Failed to create the named pipe required to log" exit 1 fi -# Check that we're installing a minion if we're being passed a minion id -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then - echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." - exit 1 -fi +# What ever is written to the logpipe gets written to the logfile +tee < "$LOGPIPE" "$LOGFILE" & -# Check that we're installing or configuring a master if we're being passed a master config json dict -if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then - if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." - exit 1 - fi -fi +# Close STDOUT, reopen it directing it to the logpipe +exec 1>&- +exec 1>"$LOGPIPE" +# Close STDERR, reopen it directing it to the logpipe +exec 2>&- +exec 2>"$LOGPIPE" -# Check that we're installing or configuring a minion if we're being passed a minion config json dict -if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then - if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." - exit 1 - fi -fi - -# Define installation type -if [ "$#" -eq 0 ];then - ITYPE="stable" -else - __check_unparsed_options "$*" - ITYPE=$1 - shift -fi - -# Check installation type -if [ "$(echo "$ITYPE" | egrep '(stable|testing|daily|git)')" = "" ]; then - echoerror "Installation type \"$ITYPE\" is not known..." - exit 1 -fi - -# If doing a git install, check what branch/tag/sha will be checked out -if [ "$ITYPE" = "git" ]; then - if [ "$#" -eq 0 ];then - GIT_REV="develop" - else - __check_unparsed_options "$*" - GIT_REV="$1" - shift - fi - - # Disable shell warning about unbound variable during git install - STABLE_REV="latest" - -# If doing stable install, check if version specified -elif [ "$ITYPE" = "stable" ]; then - if [ "$#" -eq 0 ];then - STABLE_REV="latest" - else - __check_unparsed_options "$*" - - if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11)$')" != "" ]; then - STABLE_REV="$1" - shift - elif [ "$(echo "$1" | egrep '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then - STABLE_REV="archive/$1" - shift - else - echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, latest, \$MAJOR.\$MINOR.\$PATCH)" - exit 1 - fi - fi -fi - -# -a and -V only work from git -if [ "$ITYPE" != "git" ]; then - if [ $_PIP_ALL -eq $BS_TRUE ]; then - echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" - exit 1 - fi - if [ "$_VIRTUALENV_DIR" != "null" ]; then - echoerror "Virtualenv installs via -V is only possible when installing Salt via git" - exit 1 - fi -fi - -# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltstack.com. -if [ "$_CUSTOM_REPO_URL" != "null" ]; then - _REPO_URL="$_CUSTOM_REPO_URL" - - # Check for -r since -R is being passed. Set -r with a warning. - if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then - echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." - _DISABLE_REPOS=$BS_TRUE - fi -fi - -# Check for any unparsed arguments. Should be an error. -if [ "$#" -gt 0 ]; then - __check_unparsed_options "$*" - __usage - echo - echoerror "Too many arguments." - exit 1 -fi - -# Check the _DISABLE_SSL value and set HTTP or HTTPS. -if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then - HTTP_VAL="http" -else - HTTP_VAL="https" -fi - -# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. -if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then - SETUP_PY_INSTALL_ARGS="-q" -else - SETUP_PY_INSTALL_ARGS="" -fi - -# whoami alternative for SunOS -if [ -f /usr/xpg4/bin/id ]; then - whoami='/usr/xpg4/bin/id -un' -else - whoami='whoami' -fi - -# Root permissions are required to run this script -if [ "$($whoami)" != "root" ]; then - echoerror "Salt requires root privileges to install. Please re-run this script as root." - exit 1 -fi - -# Export the http_proxy configuration to our current environment -if [ "${_HTTP_PROXY}" != "" ]; then - export http_proxy="$_HTTP_PROXY" - export https_proxy="$_HTTP_PROXY" -fi - -# Let's discover how we're being called -# shellcheck disable=SC2009 -CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) - -if [ "${CALLER}x" = "${0}x" ]; then - CALLER="shell pipe" -fi - -# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 -if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then - # shellcheck disable=SC2016 - echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' - _DISABLE_SALT_CHECKS=$BS_TRUE -fi - -# Because -a can only be installed into virtualenv -if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then - usage - # Could possibly set up a default virtualenv location when -a flag is passed - echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" - exit 1 -fi - -# Make sure virtualenv directory does not already exist -if [ -d "${_VIRTUALENV_DIR}" ]; then - echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" - exit 1 -fi - -echoinfo "Running version: ${__ScriptVersion}" -echoinfo "Executed by: ${CALLER}" -echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" -#echowarn "Running the unstable version of ${__ScriptName}" #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __exit_cleanup @@ -684,8 +519,10 @@ __exit_cleanup() { fi # Remove the logging pipe when the script exits - echodebug "Removing the logging pipe $LOGPIPE" - rm -f "$LOGPIPE" + if [ -p "$LOGPIPE" ]; then + echodebug "Removing the logging pipe $LOGPIPE" + rm -f "$LOGPIPE" + fi # Kill tee when exiting, CentOS, at least requires this # shellcheck disable=SC2009 @@ -713,28 +550,192 @@ __exit_cleanup() { trap "__exit_cleanup" EXIT INT -# Define our logging file and pipe paths -LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" -LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" +# Let's discover how we're being called +# shellcheck disable=SC2009 +CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) -# Create our logging pipe -# On FreeBSD we have to use mkfifo instead of mknod -mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1 -if [ $? -ne 0 ]; then - echoerror "Failed to create the named pipe required to log" +if [ "${CALLER}x" = "${0}x" ]; then + CALLER="shell pipe" +fi + +echoinfo "Running version: ${__ScriptVersion}" +echoinfo "Executed by: ${CALLER}" +echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" +#echowarn "Running the unstable version of ${__ScriptName}" + +# Define installation type +if [ "$#" -gt 0 ];then + __check_unparsed_options "$*" + ITYPE=$1 + shift +fi + +# Check installation type +if [ "$(echo "$ITYPE" | egrep '(stable|testing|daily|git)')" = "" ]; then + echoerror "Installation type \"$ITYPE\" is not known..." exit 1 fi -# What ever is written to the logpipe gets written to the logfile -tee < "$LOGPIPE" "$LOGFILE" & +# If doing a git install, check what branch/tag/sha will be checked out +if [ "$ITYPE" = "git" ]; then + if [ "$#" -eq 0 ];then + GIT_REV="develop" + else + GIT_REV="$1" + shift + fi -# Close STDOUT, reopen it directing it to the logpipe -exec 1>&- -exec 1>"$LOGPIPE" -# Close STDERR, reopen it directing it to the logpipe -exec 2>&- -exec 2>"$LOGPIPE" + # Disable shell warning about unbound variable during git install + STABLE_REV="latest" +# If doing stable install, check if version specified +elif [ "$ITYPE" = "stable" ]; then + if [ "$#" -eq 0 ];then + STABLE_REV="latest" + else + if [ "$(echo "$1" | egrep '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7)$')" != "" ]; then + STABLE_REV="$1" + shift + elif [ "$(echo "$1" | egrep '^([0-9]*\.[0-9]*\.[0-9]*)$')" != "" ]; then + STABLE_REV="archive/$1" + shift + else + echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, latest, \$MAJOR.\$MINOR.\$PATCH)" + exit 1 + fi + fi +fi + +# Check for any unparsed arguments. Should be an error. +if [ "$#" -gt 0 ]; then + __usage + echo + echoerror "Too many arguments." + exit 1 +fi + +# whoami alternative for SunOS +if [ -f /usr/xpg4/bin/id ]; then + whoami='/usr/xpg4/bin/id -un' +else + whoami='whoami' +fi + +# Root permissions are required to run this script +if [ "$($whoami)" != "root" ]; then + echoerror "Salt requires root privileges to install. Please re-run this script as root." + exit 1 +fi + +# Check that we're actually installing one of minion/master/syndic +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echowarn "Nothing to install or configure" + exit 1 +fi + +# Check that we're installing a minion if we're being passed a master address +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing a minion if we're being passed a minion id +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then + echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing or configuring a master if we're being passed a master config json dict +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check that we're installing or configuring a minion if we're being passed a minion config json dict +if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." + exit 1 + fi +fi + +# If the configuration directory or archive does not exist, error out +if [ "$_TEMP_CONFIG_DIR" != "null" ]; then + _TEMP_CONFIG_DIR="$(__check_config_dir "$_TEMP_CONFIG_DIR")" + [ "$_TEMP_CONFIG_DIR" = "null" ] && exit 1 +fi + +# If the pre-seed keys directory does not exist, error out +if [ "$_TEMP_KEYS_DIR" != "null" ] && [ ! -d "$_TEMP_KEYS_DIR" ]; then + echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." + exit 1 +fi + +# -a and -V only work from git +if [ "$ITYPE" != "git" ]; then + if [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" + exit 1 + fi + if [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "Virtualenv installs via -V is only possible when installing Salt via git" + exit 1 + fi +fi + +# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltstack.com. +if [ "$_CUSTOM_REPO_URL" != "null" ]; then + _REPO_URL="$_CUSTOM_REPO_URL" + + # Check for -r since -R is being passed. Set -r with a warning. + if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then + echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." + _DISABLE_REPOS=$BS_TRUE + fi +fi + +# Check the _DISABLE_SSL value and set HTTP or HTTPS. +if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then + HTTP_VAL="http" +else + HTTP_VAL="https" +fi + +# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. +if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-q" +else + SETUP_PY_INSTALL_ARGS="" +fi + +# Export the http_proxy configuration to our current environment +if [ "${_HTTP_PROXY}" != "" ]; then + export http_proxy="$_HTTP_PROXY" + export https_proxy="$_HTTP_PROXY" +fi + +# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 +if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then + # shellcheck disable=SC2016 + echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' + _DISABLE_SALT_CHECKS=$BS_TRUE +fi + +# Because -a can only be installed into virtualenv +if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then + usage + # Could possibly set up a default virtualenv location when -a flag is passed + echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" + exit 1 +fi + +# Make sure virtualenv directory does not already exist +if [ -d "${_VIRTUALENV_DIR}" ]; then + echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" + exit 1 +fi # Handle the insecure flags if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then @@ -856,8 +857,10 @@ __derive_debian_numeric_version() { elif [ "$INPUT_VERSION" = "jessie/sid" ]; then NUMERIC_VERSION=$(__parse_version_string "8.0") elif [ "$INPUT_VERSION" = "stretch/sid" ]; then - # Let's start detecting the upcoming Debian 9 (Stretch) NUMERIC_VERSION=$(__parse_version_string "9.0") + elif [ "$INPUT_VERSION" = "buster/sid" ]; then + # Let's start detecting the upcoming Debian 10 (Buster) release + NUMERIC_VERSION=$(__parse_version_string "10.0") else echowarn "Unable to parse the Debian Version (codename: '$INPUT_VERSION')" fi @@ -1089,11 +1092,11 @@ __gather_linux_system_info() { #--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_python_and_deps() -# DESCRIPTION: Install a different version of python and its dependencies on a host. Currently this has only been -# tested on Centos 6 and is considered experimental. +# NAME: __install_python() +# DESCRIPTION: Install a different version of python on a host. Currently this has only been tested on CentOS 6 and +# is considered experimental. #---------------------------------------------------------------------------------------------------------------------- -__install_python_and_deps() { +__install_python() { if [ "$_PY_EXE" = "" ]; then echoerror "Must specify -x with -y to install a specific python version" exit 1 @@ -1103,7 +1106,7 @@ __install_python_and_deps() { __PACKAGES="${PY_PKG_V}" - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + if [ ${_DISABLE_REPOS} -eq ${BS_FALSE} ]; then echoinfo "Attempting to install a repo to help provide a separate python package" echoinfo "$DISTRO_NAME_L" case "$DISTRO_NAME_L" in @@ -1112,7 +1115,7 @@ __install_python_and_deps() { ;; *) echoerror "Installing a repo to provide a python package is only supported on Redhat/CentOS. - If a repo is already available please try running script with -r" + If a repo is already available, please try running script with -r." exit 1 ;; esac @@ -1123,13 +1126,6 @@ __install_python_and_deps() { echoinfo "Installing ${__PACKAGES}" __yum_install_noinput "${__PACKAGES}" || return 1 - - _PIP_PACKAGES="tornado PyYAML msgpack-python jinja2 pycrypto zmq" - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" - fi - - __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 } @@ -1243,22 +1239,6 @@ __gather_system_info() { } -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __get_dpkg_architecture -# DESCRIPTION: Determine primary architecture for packages to install on Debian and derivatives -#---------------------------------------------------------------------------------------------------------------------- -__get_dpkg_architecture() { - if __check_command_exists dpkg; then - DPKG_ARCHITECTURE="$(dpkg --print-architecture)" - else - echoerror "dpkg: command not found." - return 1 - fi - - return 0 -} - - #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __ubuntu_derivatives_translation # DESCRIPTION: Map Ubuntu derivatives to their Ubuntu base versions. @@ -1305,6 +1285,63 @@ __ubuntu_derivatives_translation() { } +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_dpkg_architecture +# DESCRIPTION: Determine the primary architecture for packages to install on Debian and derivatives +# and issue all necessary error messages. +#---------------------------------------------------------------------------------------------------------------------- +__check_dpkg_architecture() { + if __check_command_exists dpkg; then + DPKG_ARCHITECTURE="$(dpkg --print-architecture)" + else + echoerror "dpkg: command not found." + return 1 + fi + + __REPO_ARCH="$DPKG_ARCHITECTURE" + __return_code=0 + + case $DPKG_ARCHITECTURE in + "i386") + error_msg="$_REPO_URL likely doesn't have all required 32-bit packages for $DISTRO_NAME $DISTRO_MAJOR_VERSION." + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + __REPO_ARCH="amd64" + ;; + "amd64") + error_msg="" + ;; + "armhf") + if [ "$DISTRO_NAME_L" = "ubuntu" ] || [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + error_msg="Support for armhf packages at $_REPO_URL is limited to Debian/Raspbian 8 platforms." + __return_code=1 + else + error_msg="" + fi + ;; + *) + error_msg="$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." + __return_code=1 + ;; + esac + + if [ "${error_msg}" != "" ]; then + echoerror "${error_msg}" + if [ "$ITYPE" != "git" ]; then + echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.11.5." + echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository." + echoerror "For example:" + echoerror " sh ${__ScriptName} -r -P git v2016.11.5" + fi + fi + + if [ "${__return_code}" -eq 0 ]; then + return 0 + else + return 1 + fi +} + + #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __ubuntu_codename_translation # DESCRIPTION: Map Ubuntu major versions to their corresponding codenames @@ -1403,18 +1440,172 @@ __debian_derivatives_translation() { #--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_and_refresh_suse_pkg_repo -# DESCRIPTION: Check if zypper knows about systemsmanagement_saltstack repo yet. -# If it doesn't, add the repo and refresh with the SUSE_PKG_URL. +# NAME: __debian_codename_translation +# DESCRIPTION: Map Debian major versions to their corresponding code names #---------------------------------------------------------------------------------------------------------------------- -__check_and_refresh_suse_pkg_repo() { - # Check to see if systemsmanagement_saltstack exists - __zypper repos | grep systemsmanagement_saltstack >/dev/null 2>&1 +# shellcheck disable=SC2034 +__debian_codename_translation() { - if [ $? -eq 1 ]; then - # zypper does not yet know anything about systemsmanagement_saltstack - __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 - fi + case $DISTRO_MAJOR_VERSION in + "7") + DISTRO_CODENAME="wheezy" + ;; + "8") + DISTRO_CODENAME="jessie" + ;; + "9") + DISTRO_CODENAME="stretch" + ;; + "10") + DISTRO_CODENAME="buster" + ;; + *) + DISTRO_CODENAME="jessie" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_end_of_life_versions +# DESCRIPTION: Check for end of life distribution versions +#---------------------------------------------------------------------------------------------------------------------- +__check_end_of_life_versions() { + case "${DISTRO_NAME_L}" in + debian) + # Debian versions below 7 are not supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.debian.org/DebianReleases" + exit 1 + fi + ;; + + ubuntu) + # Ubuntu versions not supported + # + # < 14.04 + # = 14.10 + # = 15.04, 15.10 + if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \ + ([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.ubuntu.com/Releases" + exit 1 + fi + ;; + + opensuse) + # openSUSE versions not supported + # + # <= 12.1 + if ([ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 1 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 12 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://en.opensuse.org/Lifetime" + exit 1 + fi + ;; + + suse) + # SuSE versions not supported + # + # < 11 SP2 + SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release ) + if [ "${SUSE_PATCHLEVEL}" = "" ]; then + SUSE_PATCHLEVEL="00" + fi + if ([ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + echoerror "Versions lower than SuSE 11 SP2 are not supported." + echoerror "Please consider upgrading to the next stable" + exit 1 + fi + ;; + + fedora) + # Fedora lower than 24 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 24 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://fedoraproject.org/wiki/Releases" + exit 1 + fi + ;; + + centos) + # CentOS versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://wiki.centos.org/Download" + exit 1 + fi + ;; + + red_hat*linux) + # Red Hat (Enterprise) Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://access.redhat.com/support/policy/updates/errata/" + exit 1 + fi + ;; + + oracle*linux) + # Oracle Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" + exit 1 + fi + ;; + + scientific*linux) + # Scientific Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://www.scientificlinux.org/downloads/sl-versions/" + exit 1 + fi + ;; + + cloud*linux) + # Cloud Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" + exit 1 + fi + ;; + + amazon*linux*ami) + # Amazon Linux versions lower than 2012.0X no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://aws.amazon.com/amazon-linux-ami/" + exit 1 + fi + ;; + + freebsd) + # FreeBSD versions lower than 9.1 are not supported. + if ([ "$DISTRO_MAJOR_VERSION" -eq 9 ] && [ "$DISTRO_MINOR_VERSION" -lt 01 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 9 ]; then + echoerror "Versions lower than FreeBSD 9.1 are not supported." + exit 1 + fi + ;; + + *) + ;; + esac } @@ -1429,6 +1620,37 @@ echoinfo " OS Version: ${OS_VERSION}" echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" echo +# Simplify distro name naming on functions +DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -re 's/([[:space:]])+/_/g') + +# Simplify version naming on functions +if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then + DISTRO_MAJOR_VERSION="" + DISTRO_MINOR_VERSION="" + PREFIXED_DISTRO_MAJOR_VERSION="" + PREFIXED_DISTRO_MINOR_VERSION="" +else + DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') + PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" + if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MAJOR_VERSION="" + fi + PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" + if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MINOR_VERSION="" + fi +fi + +# For Ubuntu derivatives, pretend to be their Ubuntu base version +__ubuntu_derivatives_translation + +# For Debian derivates, pretend to be their Debian base version +__debian_derivatives_translation + +# Fail soon for end of life versions +__check_end_of_life_versions + echodebug "Binaries will be searched using the following \$PATH: ${PATH}" # Let users know that we'll use a proxy @@ -1469,37 +1691,14 @@ if [ $_START_DAEMONS -eq $BS_FALSE ]; then echoinfo "Daemons will not be started" fi -# Simplify distro name naming on functions -DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -re 's/([[:space:]])+/_/g') - -# For Ubuntu derivatives, pretend to be their Ubuntu base version -__ubuntu_derivatives_translation - -# For Debian derivates, pretend to be their Debian base version -__debian_derivatives_translation - -# Simplify version naming on functions -if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then - DISTRO_MAJOR_VERSION="" - DISTRO_MINOR_VERSION="" - PREFIXED_DISTRO_MAJOR_VERSION="" - PREFIXED_DISTRO_MINOR_VERSION="" -else - DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') - PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" - if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then - PREFIXED_DISTRO_MAJOR_VERSION="" - fi - PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" - if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then - PREFIXED_DISTRO_MINOR_VERSION="" - fi +if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then + # For ubuntu versions, obtain the codename from the release version + __ubuntu_codename_translation +elif [ "${DISTRO_NAME_L}" = "debian" ]; then + # For debian versions, obtain the codename from the release version + __debian_codename_translation fi -# For ubuntu versions, obtain the codename from the release version -__ubuntu_codename_translation - # Only Ubuntu has daily packages, let's let users know about that if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$ITYPE" = "daily" ]); then echoerror "${DISTRO_NAME} does not have daily packages support" @@ -1525,14 +1724,18 @@ if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$_VIRTUALENV_DIR" != "null" ]); the fi # Only Ubuntu has support for pip installing all packages -if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]);then +if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]); then echoerror "${DISTRO_NAME} does not have -a support" exit 1 fi -# Starting from Ubuntu 16.10, gnupg-curl has been renamed to gnupg1-curl. +# Starting from Debian 9 and Ubuntu 16.10, gnupg-curl has been renamed to gnupg1-curl. GNUPG_CURL="gnupg-curl" -if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then +if [ "$DISTRO_NAME_L" = "debian" ]; then + if [ "$DISTRO_MAJOR_VERSION" -gt 8 ]; then + GNUPG_CURL="gnupg1-curl" + fi +elif [ "$DISTRO_NAME_L" = "ubuntu" ]; then if [ "${DISTRO_VERSION}" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then GNUPG_CURL="gnupg1-curl" fi @@ -1624,7 +1827,9 @@ __rpm_import_gpg() { __yum_install_noinput() { ENABLE_EPEL_CMD="" - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Skip Amazon Linux for the first round, since EPEL is no longer required. + # See issue #724 + if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" fi @@ -1768,152 +1973,6 @@ __git_clone_and_checkout() { } -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_end_of_life_versions -# DESCRIPTION: Check for end of life distribution versions -#---------------------------------------------------------------------------------------------------------------------- -__check_end_of_life_versions() { - case "${DISTRO_NAME_L}" in - debian) - # Debian versions bellow 6 are not supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://wiki.debian.org/DebianReleases" - exit 1 - fi - ;; - - ubuntu) - # Ubuntu versions not supported - # - # < 12.04 - # 13.x, 15.x - # 12.10, 14.10 - if [ "$DISTRO_MAJOR_VERSION" -lt 12 ] || \ - [ "$DISTRO_MAJOR_VERSION" -eq 13 ] || \ - [ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \ - ([ "$DISTRO_MAJOR_VERSION" -lt 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]); then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://wiki.ubuntu.com/Releases" - exit 1 - fi - ;; - - opensuse) - # openSUSE versions not supported - # - # <= 12.1 - if ([ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 1 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 12 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://en.opensuse.org/Lifetime" - exit 1 - fi - ;; - - suse) - # SuSE versions not supported - # - # < 11 SP2 - SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release ) - if [ "${SUSE_PATCHLEVEL}" = "" ]; then - SUSE_PATCHLEVEL="00" - fi - if ([ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then - echoerror "Versions lower than SuSE 11 SP2 are not supported." - echoerror "Please consider upgrading to the next stable" - exit 1 - fi - ;; - - fedora) - # Fedora lower than 18 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 23 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://fedoraproject.org/wiki/Releases" - exit 1 - fi - ;; - - centos) - # CentOS versions lower than 5 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://wiki.centos.org/Download" - exit 1 - fi - ;; - - red_hat*linux) - # Red Hat (Enterprise) Linux versions lower than 5 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://access.redhat.com/support/policy/updates/errata/" - exit 1 - fi - ;; - - oracle*linux) - # Oracle Linux versions lower than 5 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" - exit 1 - fi - ;; - - scientific*linux) - # Scientific Linux versions lower than 5 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://www.scientificlinux.org/downloads/sl-versions/" - exit 1 - fi - ;; - - cloud*linux) - # Cloud Linux versions lower than 5 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" - exit 1 - fi - ;; - - amazon*linux*ami) - # Amazon Linux versions lower than 2012.0X no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://aws.amazon.com/amazon-linux-ami/" - exit 1 - fi - ;; - - freebsd) - # FreeBSD versions lower than 9.1 are not supported. - if ([ "$DISTRO_MAJOR_VERSION" -eq 9 ] && [ "$DISTRO_MINOR_VERSION" -lt 01 ]) || [ "$DISTRO_MAJOR_VERSION" -lt 9 ]; then - echoerror "Versions lower than FreeBSD 9.1 are not supported." - exit 1 - fi - ;; - - *) - ;; - esac -} -# Fail soon for end of life versions -__check_end_of_life_versions - - #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __copyfile # DESCRIPTION: Simple function to copy files. Overrides if asked. @@ -2479,38 +2538,55 @@ __enable_universe_repository() { return 0 } -install_ubuntu_deps() { - if [ "$DISTRO_MAJOR_VERSION" -gt 12 ]; then - # Above Ubuntu 12.04 add-apt-repository is in a different package - __apt_get_install_noinput software-properties-common || return 1 +__install_saltstack_ubuntu_repository() { + + # Workaround for latest non-LTS ubuntu + if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems." + UBUNTU_VERSION=16.04 + UBUNTU_CODENAME="xenial" else - __apt_get_install_noinput python-software-properties || return 1 + UBUNTU_VERSION=$DISTRO_VERSION + UBUNTU_CODENAME=$DISTRO_CODENAME fi + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/apt/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" + echo "deb $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __apt_get_install_noinput apt-transport-https ca-certificates || return 1 + fi + + __apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + apt-get update +} + +install_ubuntu_deps() { if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then - __enable_universe_repository || return 1 - - # Versions starting with 2015.5.6 and 2015.8.1 are hosted at repo.saltstack.com - if [ "$(echo "$STABLE_REV" | egrep '^(2015\.5|2015\.8|2016\.3|2016\.11|latest|archive\/)')" = "" ]; then - if [ "$DISTRO_MAJOR_VERSION" -lt 14 ]; then - echoinfo "Installing Python Requests/Chardet from Chris Lea's PPA repository" - add-apt-repository -y "ppa:chris-lea/python-requests" || return 1 - add-apt-repository -y "ppa:chris-lea/python-chardet" || return 1 - add-apt-repository -y "ppa:chris-lea/python-urllib3" || return 1 - add-apt-repository -y "ppa:chris-lea/python-crypto" || return 1 - fi - + # Install add-apt-repository + if ! __check_command_exists add-apt-repository; then + __apt_get_install_noinput software-properties-common || return 1 fi + __enable_universe_repository || return 1 + apt-get update fi - # Minimal systems might not have upstart installed, install it - __PACKAGES="upstart" + __PACKAGES='' + + if [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # Minimal systems might not have upstart installed, install it + __PACKAGES="upstart" + fi if [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then __PACKAGES="${__PACKAGES} python2.7" fi + if [ "$_VIRTUALENV_DIR" != "null" ]; then __PACKAGES="${__PACKAGES} python-virtualenv" fi @@ -2564,59 +2640,10 @@ install_ubuntu_stable_deps() { __apt_get_upgrade_noinput || return 1 fi - if [ ${_DISABLE_REPOS} -eq $BS_FALSE ]; then - __get_dpkg_architecture || return 1 - __REPO_ARCH="$DPKG_ARCHITECTURE" + __check_dpkg_architecture || return 1 - if [ "$DPKG_ARCHITECTURE" = "i386" ]; then - echoerror "$_REPO_URL likely doesn't have all required 32-bit packages for Ubuntu $DISTRO_MAJOR_VERSION (yet?)." - - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - __REPO_ARCH="amd64" - elif [ "$DPKG_ARCHITECTURE" != "amd64" ]; then - echoerror "$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." - if [ "$ITYPE" != "git" ]; then - echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.3.1" - exit 1 - fi - fi - - # Versions starting with 2015.5.6, 2015.8.1 and 2016.3.0 are hosted at repo.saltstack.com - if [ "$(echo "$STABLE_REV" | egrep '^(2015\.5|2015\.8|2016\.3|2016\.11|latest|archive\/)')" != "" ]; then - # Workaround for latest non-LTS ubuntu - if [ "$DISTRO_VERSION" = "16.10" ] || [ "$DISTRO_MAJOR_VERSION" -gt 16 ]; then - echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages from latest LTS release. You may experience problems." - UBUNTU_VERSION=16.04 - UBUNTU_CODENAME="xenial" - else - UBUNTU_VERSION=$DISTRO_VERSION - UBUNTU_CODENAME=$DISTRO_CODENAME - fi - - # SaltStack's stable Ubuntu repository: - SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/apt/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" - echo "deb $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list - - # Make sure https transport is available - if [ "$HTTP_VAL" = "https" ] ; then - __apt_get_install_noinput apt-transport-https ca-certificates || return 1 - fi - - __apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1 - else - # Alternate PPAs: salt16, salt17, salt2014-1, salt2014-7 - if [ ! "$(echo "$STABLE_REV" | egrep '^(1\.6|1\.7)$')" = "" ]; then - STABLE_PPA="saltstack/salt$(echo "$STABLE_REV" | tr -d .)" - elif [ ! "$(echo "$STABLE_REV" | egrep '^(2014\.1|2014\.7)$')" = "" ]; then - STABLE_PPA="saltstack/salt$(echo "$STABLE_REV" | tr . -)" - else - STABLE_PPA="saltstack/salt" - fi - - add-apt-repository -y "ppa:$STABLE_PPA" || return 1 - fi - - apt-get update + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_ubuntu_repository || return 1 fi install_ubuntu_deps || return 1 @@ -2625,13 +2652,6 @@ install_ubuntu_stable_deps() { install_ubuntu_daily_deps() { install_ubuntu_stable_deps || return 1 - if [ "$DISTRO_MAJOR_VERSION" -gt 12 ]; then - __apt_get_install_noinput software-properties-common || return 1 - else - # Ubuntu 12.04 needs python-software-properties to get add-apt-repository binary - __apt_get_install_noinput python-software-properties || return 1 - fi - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then __enable_universe_repository || return 1 @@ -2716,11 +2736,13 @@ install_ubuntu_stable() { # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 + return 0 } install_ubuntu_daily() { install_ubuntu_stable || return 1 + return 0 } @@ -2735,6 +2757,7 @@ install_ubuntu_git() { else python setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 fi + return 0 } @@ -2760,6 +2783,8 @@ install_ubuntu_stable_post() { update-rc.d salt-$fname defaults fi done + + return 0 } install_ubuntu_git_post() { @@ -2772,7 +2797,7 @@ install_ubuntu_git_post() { [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -2796,10 +2821,10 @@ install_ubuntu_git_post() { /sbin/initctl reload-configuration || return 1 fi # No upstart support in Ubuntu!? - elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/debian/salt-${fname}.init" ]; then + elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" ]; then echodebug "There's NO upstart support!?" - echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/debian/salt-${fname}.init to /etc/init.d/salt-$fname" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/debian/salt-${fname}.init" "/etc/init.d/salt-$fname" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init to /etc/init.d/salt-$fname" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-$fname" chmod +x /etc/init.d/salt-$fname # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -2810,6 +2835,8 @@ install_ubuntu_git_post() { echoerror "Neither upstart nor init.d was setup for salt-$fname" fi done + + return 0 } install_ubuntu_restart_daemons() { @@ -2862,6 +2889,7 @@ install_ubuntu_restart_daemons() { /etc/init.d/salt-$fname stop > /dev/null 2>&1 /etc/init.d/salt-$fname start done + return 0 } @@ -2895,6 +2923,33 @@ install_ubuntu_check_services() { # # Debian Install Functions # +__install_saltstack_debian_repository() { + if [ "$DISTRO_MAJOR_VERSION" -eq 10 ]; then + # Packages for Debian 10 at repo.saltstack.com are not yet available + # Set up repository for Debian 9 for Debian 10 for now until support + # is available at repo.saltstack.com for Debian 10. + echowarn "Debian 10 distribution detected, but stable packages requested. Trying packages from Debian 9. You may experience problems." + DEBIAN_RELEASE="9" + DEBIAN_CODENAME="stretch" + else + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + fi + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/apt/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}" + echo "deb $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/saltstack.list" + + if [ "$HTTP_VAL" = "https" ] ; then + __apt_get_install_noinput apt-transport-https ca-certificates || return 1 + fi + + __apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + apt-get update + +} + install_debian_deps() { if [ $_START_DAEMONS -eq $BS_FALSE ]; then echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." @@ -2915,95 +2970,7 @@ install_debian_deps() { __apt_get_upgrade_noinput || return 1 fi - # Install procps and pciutils which allows for Docker bootstraps. See #366#issuecomment-39666813 - __PACKAGES="procps pciutils" - __PIP_PACKAGES="" - - # YAML module is used for generating custom master/minion configs - __PACKAGES="${__PACKAGES} python-yaml" - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # shellcheck disable=SC2089 - __PIP_PACKAGES="${__PIP_PACKAGES} 'apache-libcloud>=$_LIBCLOUD_MIN_VERSION'" - fi - - if [ "${__PIP_PACKAGES}" != "" ]; then - # shellcheck disable=SC2086,SC2090 - pip install -U ${__PIP_PACKAGES} || return 1 - fi - - return 0 -} - -install_debian_7_deps() { - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - apt-get update - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - # Try to update GPG keys first if allowed - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ "${_DISABLE_REPOS}" -eq $BS_FALSE ]; then - __get_dpkg_architecture || return 1 - - __REPO_ARCH="$DPKG_ARCHITECTURE" - - if [ "$DPKG_ARCHITECTURE" = "i386" ]; then - echoerror "$_REPO_URL likely doesn't have all required 32-bit packages for Debian $DISTRO_MAJOR_VERSION (yet?)." - - if [ "$ITYPE" != "git" ]; then - echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.3.1" - fi - - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - __REPO_ARCH="amd64" - elif [ "$DPKG_ARCHITECTURE" != "amd64" ]; then - echoerror "$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." - exit 1 - fi - - # Versions starting with 2015.8.7 and 2016.3.0 are hosted at repo.saltstack.com - if [ "$(echo "$STABLE_REV" | egrep '^(2015\.8|2016\.3|2016\.11|latest|archive\/201[5-6]\.)')" != "" ]; then - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/apt/debian/${DISTRO_MAJOR_VERSION}/${__REPO_ARCH}/${STABLE_REV}" - echo "deb $SALTSTACK_DEBIAN_URL wheezy main" > "/etc/apt/sources.list.d/saltstack.list" - - if [ "$HTTP_VAL" = "https" ] ; then - __apt_get_install_noinput apt-transport-https ca-certificates || return 1 - fi - - __apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1 - elif [ -n "$STABLE_REV" ]; then - echoerror "Installation of Salt ${STABLE_REV#*/} packages not supported by ${__ScriptName} ${__ScriptVersion} on Debian $DISTRO_MAJOR_VERSION." - - return 1 - fi - - apt-get update - else - echowarn "Packages from $_REPO_URL are required to install Salt version 2015.8 or higher on Debian $DISTRO_MAJOR_VERSION." - fi + __check_dpkg_architecture || return 1 # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 __PACKAGES='procps pciutils' @@ -3011,87 +2978,17 @@ install_debian_7_deps() { # YAML module is used for generating custom master/minion configs __PACKAGES="${__PACKAGES} python-yaml" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + # Debian 9 needs the dirmgr package in order to import the GPG key later + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} dirmngr" fi - return 0 -} - -install_debian_8_deps() { - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - apt-get update - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - # Try to update GPG keys first if allowed - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ ${_DISABLE_REPOS} -eq $BS_FALSE ]; then - __get_dpkg_architecture || return 1 - - __REPO_ARCH="$DPKG_ARCHITECTURE" - - if [ "$DPKG_ARCHITECTURE" = "i386" ]; then - echoerror "$_REPO_URL likely doesn't have all required 32-bit packages for Debian $DISTRO_MAJOR_VERSION (yet?)." - - if [ "$ITYPE" != "git" ]; then - echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2016.3.1" - fi - - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - __REPO_ARCH="amd64" - elif [ "$DPKG_ARCHITECTURE" != "amd64" ] && [ "$DPKG_ARCHITECTURE" != "armhf" ]; then - echoerror "$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." - echoerror "Try git installation mode with pip and disable SaltStack apt repository, for example:" - echoerror " sh ${__ScriptName} -r -P git v2016.3.1" - - exit 1 - fi - - # Versions starting with 2015.5.6, 2015.8.1 and 2016.3.0 are hosted at repo.saltstack.com - if [ "$(echo "$STABLE_REV" | egrep '^(2015\.5|2015\.8|2016\.3|2016\.11|latest|archive\/201[5-6]\.)')" != "" ]; then - SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/apt/debian/${DISTRO_MAJOR_VERSION}/${__REPO_ARCH}/${STABLE_REV}" - echo "deb $SALTSTACK_DEBIAN_URL jessie main" > "/etc/apt/sources.list.d/saltstack.list" - - if [ "$HTTP_VAL" = "https" ] ; then - __apt_get_install_noinput apt-transport-https ca-certificates || return 1 - fi - - __apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1 - elif [ -n "$STABLE_REV" ]; then - echoerror "Installation of Salt ${STABLE_REV#*/} packages not supported by ${__ScriptName} ${__ScriptVersion} on Debian $DISTRO_MAJOR_VERSION." - - return 1 - fi - - apt-get update - fi - - # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 - __PACKAGES='procps pciutils' - # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 - # YAML module is used for generating custom master/minion configs - __PACKAGES="${__PACKAGES} python-yaml" + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_debian_repository || return 1 + fi if [ "${_EXTRA_PACKAGES}" != "" ]; then echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" @@ -3135,14 +3032,14 @@ install_debian_git_deps() { } install_debian_7_git_deps() { - install_debian_7_deps || return 1 + install_debian_deps || return 1 install_debian_git_deps || return 1 return 0 } install_debian_8_git_deps() { - install_debian_8_deps || return 1 + install_debian_deps || return 1 if ! __check_command_exists git; then __apt_get_install_noinput git || return 1 @@ -3204,6 +3101,45 @@ install_debian_8_git_deps() { return 0 } +install_debian_9_git_deps() { + install_debian_deps || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="libzmq5 lsb-release python-apt python-backports-abc python-crypto" + __PACKAGES="${__PACKAGES} python-jinja2 python-msgpack python-requests python-systemd" + __PACKAGES="${__PACKAGES} python-tornado python-yaml python-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_debian_10_git_deps() { + install_debian_9_git_deps || return 1 + return 0 +} + install_debian_stable() { __PACKAGES="" @@ -3236,9 +3172,14 @@ install_debian_8_stable() { return 0 } +install_debian_9_stable() { + install_debian_stable || return 1 + return 0 +} + install_debian_git() { if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 else python setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 fi @@ -3254,6 +3195,11 @@ install_debian_8_git() { return 0 } +install_debian_9_git() { + install_debian_git || return 1 + return 0 +} + install_debian_git_post() { for fname in api master minion syndic; do # Skip if not meant to be installed @@ -3267,9 +3213,9 @@ install_debian_git_post() { if [ -f /bin/systemctl ]; then if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ ([ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.service" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.service" /lib/systemd/system - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" else # workaround before adding Debian-specific unit files to the Salt main repo __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system @@ -3286,9 +3232,9 @@ install_debian_git_post() { # Install initscripts for Debian 7 "Wheezy" elif [ ! -f "/etc/init.d/salt-$fname" ] || \ ([ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]); then - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-$fname.init" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.init" "/etc/init.d/salt-${fname}" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.init" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-${fname}" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" else # Make sure wget is available __check_command_exists wget || __apt_get_install_noinput wget || return 1 @@ -3442,23 +3388,7 @@ install_fedora_git_deps() { __git_clone_and_checkout || return 1 - __PACKAGES="systemd-python" - __PIP_PACKAGES="" - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the develop branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __check_pip_allowed "You need to allow pip based installations (-P) in order to install tornado" - - # Install pip and pip dependencies - if ! __check_command_exists pip; then - __PACKAGES="${__PACKAGES} python-setuptools python-pip gcc python-devel" - fi - - __PIP_PACKAGES="${__PIP_PACKAGES} tornado" - fi - fi + __PACKAGES="python2-tornado systemd-python" if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then __PACKAGES="${__PACKAGES} python-libcloud python-netaddr" @@ -3467,11 +3397,6 @@ install_fedora_git_deps() { # shellcheck disable=SC2086 dnf install -y ${__PACKAGES} || return 1 - if [ "${__PIP_PACKAGES}" != "" ]; then - # shellcheck disable=SC2086,SC2090 - pip install -U ${__PIP_PACKAGES} || return 1 - fi - # Let's trigger config_salt() if [ "$_TEMP_CONFIG_DIR" = "null" ]; then _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" @@ -3563,13 +3488,6 @@ __install_epel_repository() { return 0 fi - # Check if epel-release is already installed and flag it accordingly - rpm --nodigest --nosignature -q epel-release > /dev/null 2>&1 - if [ $? -eq 0 ]; then - _EPEL_REPOS_INSTALLED=$BS_TRUE - return 0 - fi - # Download latest 'epel-release' package for the distro version directly epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" rpm -Uvh --force "$epel_repo_url" || return 1 @@ -3754,8 +3672,29 @@ install_centos_git_deps() { __PACKAGES="${__PACKAGES} python-libcloud" fi - if [ "${_INSTALL_PY}" = "${BS_TRUE}" ]; then - __install_python_and_deps || return 1 + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + + if [ "${_PY_EXE}" != "" ]; then + # If "-x" is defined, install dependencies with pip based on the Python version given. + _PIP_PACKAGES="jinja2 msgpack-python pycrypto PyYAML tornado zmq" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + for SINGLE_PACKAGE in $_PIP_PACKAGES; do + __REQUIRED_VERSION="$(grep "${SINGLE_PACKAGE}" "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_VERSION}" != "" ]; then + _PIP_PACKAGES=$(echo "$_PIP_PACKAGES" | sed "s/${SINGLE_PACKAGE}/${__REQUIRED_VERSION}/") + fi + done + fi + + if [ "$_INSTALL_CLOUD" -eq "${BS_TRUE}" ]; then + _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" + fi + + __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 else # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 @@ -4460,44 +4399,37 @@ daemons_running_alpine_linux() { install_amazon_linux_ami_deps() { # Shim to figure out if we're using old (rhel) or new (aws) rpms. _USEAWS=$BS_FALSE + pkg_append="python" repo_rev="$(echo "${STABLE_REV}" | sed 's|.*\/||g')" - if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$'; then - _USEAWS=$BS_TRUE - elif echo "$repo_rev" | egrep -q '^[0-9]+$' && [ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ]; then + if echo "$repo_rev" | egrep -q '^(latest|2016\.11)$' || \ + ( echo "$repo_rev" | egrep -q '^[0-9]+$' && [ "$(echo "$repo_rev" | cut -c1-4)" -gt 2016 ] ); then _USEAWS=$BS_TRUE + pkg_append="python27" fi # We need to install yum-utils before doing anything else when installing on # Amazon Linux ECS-optimized images. See issue #974. - yum -y install yum-utils + __yum_install_noinput yum-utils - ENABLE_EPEL_CMD="" - if [ $_DISABLE_REPOS -eq $BS_TRUE ]; then - ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 fi - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then - # enable the EPEL repo - /usr/bin/yum-config-manager --enable epel || return 1 - - # exclude Salt and ZeroMQ packages from EPEL - /usr/bin/yum-config-manager epel --setopt "epel.exclude=zeromq* salt* python-zmq*" --save || return 1 - + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then __REPO_FILENAME="saltstack-repo.repo" # Set a few vars to make life easier. if [ $_USEAWS -eq $BS_TRUE ]; then - base_url="$HTTP_VAL://repo.saltstack.com/yum/amazon/latest/\$basearch/$repo_rev/" + base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/latest/\$basearch/$repo_rev/" gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" repo_name="SaltStack repo for Amazon Linux" - pkg_append="python27" else - base_url="$HTTP_VAL://repo.saltstack.com/yum/redhat/6/\$basearch/$repo_rev/" + base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/6/\$basearch/$repo_rev/" gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" repo_name="SaltStack repo for RHEL/CentOS 6" - pkg_append="python" fi # This should prob be refactored to use __install_saltstack_rhel_repository() @@ -4515,21 +4447,19 @@ baseurl=$base_url _eof fi - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi fi - #ordereddict removed. - #Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 which is already installed + + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed __PACKAGES="${pkg_append}-PyYAML ${pkg_append}-crypto ${pkg_append}-msgpack ${pkg_append}-zmq ${pkg_append}-jinja2 ${pkg_append}-requests" # shellcheck disable=SC2086 - yum -y install ${__PACKAGES} ${ENABLE_EPEL_CMD} || return 1 + __yum_install_noinput ${__PACKAGES} || return 1 if [ "${_EXTRA_PACKAGES}" != "" ]; then echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" # shellcheck disable=SC2086 - yum install -y ${_EXTRA_PACKAGES} ${ENABLE_EPEL_CMD} || return 1 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 fi } @@ -4549,13 +4479,8 @@ install_amazon_linux_ami_git_deps() { install_amazon_linux_ami_deps || return 1 - ENABLE_EPEL_CMD="" - if [ $_DISABLE_REPOS -eq $BS_TRUE ]; then - ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" - fi - if ! __check_command_exists git; then - yum -y install git ${ENABLE_EPEL_CMD} || return 1 + __yum_install_noinput git || return 1 fi __git_clone_and_checkout || return 1 @@ -4579,7 +4504,7 @@ install_amazon_linux_ami_git_deps() { if [ "${__PACKAGES}" != "" ]; then # shellcheck disable=SC2086 - yum -y install ${__PACKAGES} ${ENABLE_EPEL_CMD} || return 1 + __yum_install_noinput ${__PACKAGES} || return 1 fi if [ "${__PIP_PACKAGES}" != "" ]; then @@ -4660,7 +4585,7 @@ install_arch_linux_stable_deps() { pacman -Su --noconfirm --needed python2-yaml if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - pacman -Su --noconfirm --needed apache-libcloud || return 1 + pacman -Su --noconfirm --needed python2-apache-libcloud || return 1 fi if [ "${_EXTRA_PACKAGES}" != "" ]; then @@ -5458,6 +5383,16 @@ install_smartos_restart_daemons() { # __ZYPPER_REQUIRES_REPLACE_FILES=-1 +__check_and_refresh_suse_pkg_repo() { + # Check to see if systemsmanagement_saltstack exists + __zypper repos | grep systemsmanagement_saltstack >/dev/null 2>&1 + + if [ $? -eq 1 ]; then + # zypper does not yet know anything about systemsmanagement_saltstack + __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 + fi +} + __set_suse_pkg_repo() { suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then @@ -6151,11 +6086,15 @@ install_suse_check_services() { # # Gentoo Install Functions. # +__autounmask() { + emerge --autounmask-write --autounmask-only "${@}"; return $? +} + __emerge() { if [ "$_GENTOO_USE_BINHOST" -eq $BS_TRUE ]; then - emerge --autounmask-write --getbinpkg "${@}"; return $? + emerge --getbinpkg "${@}"; return $? fi - emerge --autounmask-write "${@}"; return $? + emerge "${@}"; return $? } __gentoo_config_protection() { @@ -6165,6 +6104,9 @@ __gentoo_config_protection() { # cfg-update and then restart the bootstrapping script, so instead we allow # at this point to modify certain config files directly export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use" + + # emerge currently won't write to files that aren't there, so we need to ensure their presence + touch /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use } __gentoo_pre_dep() { @@ -6193,15 +6135,21 @@ __gentoo_post_dep() { __gentoo_config_protection if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __autounmask 'dev-python/libcloud' __emerge -v 'dev-python/libcloud' fi + __autounmask 'dev-python/requests' + __autounmask 'app-admin/salt' + __emerge -vo 'dev-python/requests' __emerge -vo 'app-admin/salt' if [ "${_EXTRA_PACKAGES}" != "" ]; then echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" # shellcheck disable=SC2086 + __autounmask ${_EXTRA_PACKAGES} || return 1 + # shellcheck disable=SC2086 __emerge -v ${_EXTRA_PACKAGES} || return 1 fi } @@ -6437,12 +6385,12 @@ config_salt() { # Copy the minion's keys if found if [ -f "$_TEMP_CONFIG_DIR/minion.pem" ]; then - __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_CONFIG_ONLY" || return 1 + __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 chmod 400 "$_PKI_DIR/minion/minion.pem" || return 1 CONFIGURED_ANYTHING=$BS_TRUE fi if [ -f "$_TEMP_CONFIG_DIR/minion.pub" ]; then - __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_CONFIG_ONLY" || return 1 + __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 chmod 664 "$_PKI_DIR/minion/minion.pub" || return 1 CONFIGURED_ANYTHING=$BS_TRUE fi diff --git a/salt/cloud/libcloudfuncs.py b/salt/cloud/libcloudfuncs.py index 3191251c99..f6465b8f66 100644 --- a/salt/cloud/libcloudfuncs.py +++ b/salt/cloud/libcloudfuncs.py @@ -150,7 +150,7 @@ def avail_locations(conn=None, call=None): ret[img_name] = {} for attr in dir(img): - if attr.startswith('_'): + if attr.startswith('_') or attr == 'driver': continue attr_value = getattr(img, attr) @@ -187,7 +187,7 @@ def avail_images(conn=None, call=None): ret[img_name] = {} for attr in dir(img): - if attr.startswith('_'): + if attr.startswith('_') or attr in ('driver', 'get_uuid'): continue attr_value = getattr(img, attr) if isinstance(attr_value, string_types) and not six.PY3: @@ -222,7 +222,7 @@ def avail_sizes(conn=None, call=None): ret[size_name] = {} for attr in dir(size): - if attr.startswith('_'): + if attr.startswith('_') or attr in ('driver', 'get_uuid'): continue try: diff --git a/salt/config/__init__.py b/salt/config/__init__.py index b5b7f2a1f7..bd632b4116 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -55,7 +55,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S' _DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S' _DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s' _DFLT_LOG_FMT_LOGFILE = ( - '%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s' + '%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s' ) _DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*'] @@ -352,7 +352,7 @@ VALID_OPTS = { # The TCP port on which minion events should be pulled if ipc_mode is TCP 'tcp_pull_port': int, - # The TCP port on which events for the master should be pulled if ipc_mode is TCP + # The TCP port on which events for the master should be published if ipc_mode is TCP 'tcp_master_pub_port': int, # The TCP port on which events for the master should be pulled if ipc_mode is TCP @@ -1633,7 +1633,8 @@ DEFAULT_PROXY_MINION_OPTS = { 'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'proxy'), 'add_proxymodule_to_opts': False, 'proxy_merge_grains_in_module': True, - 'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include'], + 'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'proxy', 'extmods'), + 'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'], 'default_include': 'proxy.d/*.conf', # By default, proxies will preserve the connection. @@ -2282,7 +2283,7 @@ def syndic_config(master_config_path, 'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules', 'autosign_file', 'autoreject_file', 'token_dir' ] - for config_key in ('syndic_log_file', 'log_file', 'key_logfile'): + for config_key in ('log_file', 'key_logfile', 'syndic_log_file'): # If this is not a URI and instead a local path if urlparse(opts.get(config_key, '')).scheme == '': prepend_root_dirs.append(config_key) diff --git a/salt/crypt.py b/salt/crypt.py index 9629c08d01..f4f65940d5 100644 --- a/salt/crypt.py +++ b/salt/crypt.py @@ -373,17 +373,18 @@ class AsyncAuth(object): loop_instance_map = AsyncAuth.instance_map[io_loop] key = cls.__key(opts) - if key not in loop_instance_map: + auth = loop_instance_map.get(key) + if auth is None: log.debug('Initializing new AsyncAuth for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller - new_auth = object.__new__(cls) - new_auth.__singleton_init__(opts, io_loop=io_loop) - loop_instance_map[key] = new_auth + auth = object.__new__(cls) + auth.__singleton_init__(opts, io_loop=io_loop) + loop_instance_map[key] = auth else: log.debug('Re-using AsyncAuth for {0}'.format(key)) - return loop_instance_map[key] + return auth @classmethod def __key(cls, opts, io_loop=None): @@ -1009,14 +1010,15 @@ class SAuth(AsyncAuth): Only create one instance of SAuth per __key() ''' key = cls.__key(opts) - if key not in SAuth.instances: + auth = SAuth.instances.get(key) + if auth is None: log.debug('Initializing new SAuth for {0}'.format(key)) - new_auth = object.__new__(cls) - new_auth.__singleton_init__(opts) - SAuth.instances[key] = new_auth + auth = object.__new__(cls) + auth.__singleton_init__(opts) + SAuth.instances[key] = auth else: log.debug('Re-using SAuth for {0}'.format(key)) - return SAuth.instances[key] + return auth @classmethod def __key(cls, opts, io_loop=None): diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 9ca6c582fb..d47a5c3aa6 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -1055,12 +1055,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) else: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1102,12 +1097,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) elif 'eauth' in load: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1217,12 +1207,7 @@ class LocalFuncs(object): return '' # Get acl from eauth module. - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( diff --git a/salt/engines/docker_events.py b/salt/engines/docker_events.py index b5b65b5a56..3985213e6f 100644 --- a/salt/engines/docker_events.py +++ b/salt/engines/docker_events.py @@ -50,8 +50,8 @@ def start(docker_url='unix://var/run/docker.sock', .. code-block:: yaml engines: - docker_events: - docker_url: unix://var/run/docker.sock + - docker_events: + docker_url: unix://var/run/docker.sock The config above sets up engines to listen for events from the Docker daemon and publish diff --git a/salt/engines/hipchat.py b/salt/engines/hipchat.py index a069a4f748..66cc6d8bdc 100644 --- a/salt/engines/hipchat.py +++ b/salt/engines/hipchat.py @@ -14,25 +14,25 @@ keys make the engine interactive. .. code-block:: yaml engines: - - hipchat: - api_url: http://api.hipchat.myteam.com - token: 'XXXXXX' - room: 'salt' - control: True - valid_users: - - SomeUser - valid_commands: - - test.ping - - cmd.run - - list_jobs - - list_commands - aliases: - list_jobs: - cmd: jobs.list_jobs - list_commands: - cmd: pillar.get salt:engines:hipchat:valid_commands target=saltmaster tgt_type=list - max_rooms: 0 - wait_time: 1 + - hipchat: + api_url: http://api.hipchat.myteam.com + token: 'XXXXXX' + room: 'salt' + control: True + valid_users: + - SomeUser + valid_commands: + - test.ping + - cmd.run + - list_jobs + - list_commands + aliases: + list_jobs: + cmd: jobs.list_jobs + list_commands: + cmd: pillar.get salt:engines:hipchat:valid_commands target=saltmaster + max_rooms: 0 + wait_time: 1 ''' from __future__ import absolute_import diff --git a/salt/engines/http_logstash.py b/salt/engines/http_logstash.py index 3e4b89ad78..78edc0d1b9 100644 --- a/salt/engines/http_logstash.py +++ b/salt/engines/http_logstash.py @@ -12,13 +12,13 @@ them onto a logstash endpoint via HTTP requests. engines: - http_logstash: - url: http://blabla.com/salt-stuff - tags: - - salt/job/*/new - - salt/job/*/ret/* - funs: - - probes.results - - bgp.config + url: http://blabla.com/salt-stuff + tags: + - salt/job/*/new + - salt/job/*/ret/* + funs: + - probes.results + - bgp.config ''' from __future__ import absolute_import diff --git a/salt/engines/logentries.py b/salt/engines/logentries.py index 0fe422edfe..9dd0fc3735 100644 --- a/salt/engines/logentries.py +++ b/salt/engines/logentries.py @@ -24,6 +24,9 @@ master config. :configuration: Example configuration + + .. code-block:: yaml + engines: - logentries: endpoint: data.logentries.com diff --git a/salt/engines/logstash.py b/salt/engines/logstash.py index d4ad095c7f..d56db4d951 100644 --- a/salt/engines/logstash.py +++ b/salt/engines/logstash.py @@ -8,6 +8,9 @@ them onto a logstash endpoint. :configuration: Example configuration + + .. code-block:: yaml + engines: - logstash: host: log.my_network.com diff --git a/salt/engines/reactor.py b/salt/engines/reactor.py index b95a8213e3..43829f0a27 100644 --- a/salt/engines/reactor.py +++ b/salt/engines/reactor.py @@ -7,10 +7,10 @@ Example Config in Master or Minion config .. code-block:: yaml engines: - reactor: - refresh_interval: 60 - worker_threads: 10 - worker_hwm: 10000 + - reactor: + refresh_interval: 60 + worker_threads: 10 + worker_hwm: 10000 reactor: - 'salt/cloud/*/destroyed': diff --git a/salt/engines/redis_sentinel.py b/salt/engines/redis_sentinel.py index 8f4e807313..596f610d38 100644 --- a/salt/engines/redis_sentinel.py +++ b/salt/engines/redis_sentinel.py @@ -8,6 +8,9 @@ events based on the channels they are subscribed to. :configuration: Example configuration + + .. code-block:: yaml + engines: - redis_sentinel: hosts: diff --git a/salt/engines/slack.py b/salt/engines/slack.py index 1709364832..73c234c4ed 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -12,44 +12,43 @@ prefaced with a ``!``. .. code-block:: yaml engines: - slack: - token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' - control: True - valid_users: - - garethgreenaway - valid_commands: - - test.ping - - cmd.run - - list_jobs - - list_commands - aliases: - list_jobs: - cmd: jobs.list_jobs - list_commands: - cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list + - slack: + token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' + control: True + valid_users: + - garethgreenaway + valid_commands: + - test.ping + - cmd.run + - list_jobs + - list_commands + aliases: + list_jobs: + cmd: jobs.list_jobs + list_commands: + cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list :configuration: Example configuration using groups .. versionadded: 2017.7.0 engines: - slack: - token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' - control: True - groups: - gods: - users: - - garethgreenaway - commands: - - test.ping - - cmd.run - - list_jobs - - list_commands - aliases: + - slack: + token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' + control: True + groups: + gods: + users: + - garethgreenaway + commands: + - test.ping + - cmd.run + - list_jobs + - list_commands + aliases: list_jobs: - cmd: jobs.list_jobs + cmd: jobs.list_jobs list_commands: - cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list - + cmd: pillar.get salt:engines:slack:valid_commands target=saltmaster tgt_type=list :depends: slackclient ''' @@ -62,6 +61,7 @@ import logging import time import re import yaml +import ast try: import slackclient @@ -182,11 +182,20 @@ def start(token, if 'aliases' in groups[group]: aliases.update(groups[group]['aliases']) + if 'user' not in _m: + if 'message' in _m and 'user' in _m['message']: + log.debug('Message was edited, ' + 'so we look for user in ' + 'the original message.') + _user = _m['message']['user'] + else: + _user = _m['user'] + # Ensure the user is allowed to run commands if valid_users: - log.debug('{0} {1}'.format(all_users, _m['user'])) - if _m['user'] not in valid_users and all_users.get(_m['user'], None) not in valid_users: - channel.send_message('{0} not authorized to run Salt commands'.format(all_users[_m['user']])) + log.debug('{0} {1}'.format(all_users, _user)) + if _user not in valid_users and all_users.get(_user, None) not in valid_users: + channel.send_message('{0} not authorized to run Salt commands'.format(all_users[_user])) return # Trim the ! from the front @@ -220,7 +229,7 @@ def start(token, # Ensure the command is allowed if valid_commands: if cmd not in valid_commands: - channel.send_message('{0} is not allowed to use command {1}.'.format(all_users[_m['user']], cmd)) + channel.send_message('{0} is not allowed to use command {1}.'.format(all_users[_user], cmd)) return # Parse args and kwargs @@ -246,6 +255,10 @@ def start(token, tgt_type = kwargs['tgt_type'] del kwargs['tgt_type'] + # Check for pillar string representation of dict and convert it to dict + if 'pillar' in kwargs: + kwargs.update(pillar=ast.literal_eval(kwargs['pillar'])) + ret = {} if cmd in runner_functions: @@ -255,7 +268,7 @@ def start(token, # Default to trying to run as a client module. else: local = salt.client.LocalClient() - ret = local.cmd('{0}'.format(target), cmd, args, kwargs, tgt_type='{0}'.format(tgt_type)) + ret = local.cmd('{0}'.format(target), cmd, arg=args, kwarg=kwargs, tgt_type='{0}'.format(tgt_type)) if ret: return_text = json.dumps(ret, sort_keys=True, indent=1) diff --git a/salt/executors/sudo.py b/salt/executors/sudo.py index d3321847a5..825fee979a 100644 --- a/salt/executors/sudo.py +++ b/salt/executors/sudo.py @@ -73,7 +73,7 @@ class SudoExecutor(ModuleExecutorBase): '-c', salt.syspaths.CONFIG_DIR, '--', data.get('fun')] - if data['fun'] == 'state.sls': + if data['fun'] in ('state.sls', 'state.highstate', 'state.apply'): kwargs['concurrent'] = True for arg in args: self.cmd.append(_cmd_quote(str(arg))) diff --git a/salt/ext/ipaddress.py b/salt/ext/ipaddress.py index d2dfc5ee50..75011f7606 100644 --- a/salt/ext/ipaddress.py +++ b/salt/ext/ipaddress.py @@ -28,9 +28,6 @@ bytes = bytearray # Python 2 does not support exception chaining. # s/ from None$// -# Python 2 ranges need to fit in a C long -# 'fix' hosts() for IPv6Network - # When checking for instances of int, also allow Python 2's long. _builtin_isinstance = isinstance @@ -2259,7 +2256,7 @@ class IPv6Network(_BaseV6, _BaseNetwork): """ network = int(self.network_address) broadcast = int(self.broadcast_address) - for x in range(1, broadcast - network + 1): + for x in long_range(1, broadcast - network + 1): yield self._address_class(network + x) @property diff --git a/salt/ext/win_inet_pton.py b/salt/ext/win_inet_pton.py index ffb47d440a..8c4d7dde3e 100644 --- a/salt/ext/win_inet_pton.py +++ b/salt/ext/win_inet_pton.py @@ -9,6 +9,7 @@ from __future__ import absolute_import import socket import ctypes import os +import ipaddress class sockaddr(ctypes.Structure): @@ -31,6 +32,24 @@ else: def inet_pton(address_family, ip_string): + # Verify IP Address + # This will catch IP Addresses such as 10.1.2 + if address_family == socket.AF_INET: + try: + ipaddress.ip_address(ip_string.decode()) + except ValueError: + raise socket.error('illegal IP address string passed to inet_pton') + return socket.inet_aton(ip_string) + + # Verify IP Address + # The `WSAStringToAddressA` function handles notations used by Berkeley + # software which includes 3 part IP Addresses such as `10.1.2`. That's why + # the above check is needed to enforce more strict IP Address validation as + # used by the `inet_pton` function in Unix. + # See the following: + # https://stackoverflow.com/a/29286098 + # Docs for the `inet_addr` function on MSDN + # https://msdn.microsoft.com/en-us/library/windows/desktop/ms738563.aspx addr = sockaddr() addr.sa_family = address_family addr_size = ctypes.c_int(ctypes.sizeof(addr)) diff --git a/salt/fileclient.py b/salt/fileclient.py index a3faa10cc1..2b4484211c 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -621,6 +621,13 @@ class Client(object): def on_header(hdr): if write_body[1] is not False and write_body[2] is None: + if not hdr.strip() and 'Content-Type' not in write_body[1]: + # We've reached the end of the headers and not yet + # found the Content-Type. Reset the values we're + # tracking so that we properly follow the redirect. + write_body[0] = None + write_body[1] = False + return # Try to find out what content type encoding is used if # this is a text file write_body[1].parse_line(hdr) # pylint: disable=no-member @@ -1257,7 +1264,7 @@ class RemoteClient(Client): if not os.path.isfile(path): msg = 'specified file {0} is not present to generate hash: {1}' log.warning(msg.format(path, err)) - return {} + return {}, None else: ret = {} hash_type = self.opts.get('hash_type', 'md5') diff --git a/salt/fileserver/svnfs.py b/salt/fileserver/svnfs.py index 6cdbab98fe..ff6c91edd8 100644 --- a/salt/fileserver/svnfs.py +++ b/salt/fileserver/svnfs.py @@ -2,7 +2,7 @@ ''' Subversion Fileserver Backend -After enabling this backend, branches, and tags in a remote subversion +After enabling this backend, branches and tags in a remote subversion repository are exposed to salt as different environments. To enable this backend, add ``svn`` to the :conf_master:`fileserver_backend` option in the Master config file. @@ -697,7 +697,7 @@ def file_hash(load, fnd): def _file_lists(load, form): ''' - Return a dict containing the file lists for files, dirs, emtydirs and symlinks + Return a dict containing the file lists for files, dirs, emptydirs and symlinks ''' if 'env' in load: salt.utils.warn_until( diff --git a/salt/grains/core.py b/salt/grains/core.py index 9b4235da10..a7e1a22d2a 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -792,6 +792,8 @@ def _virtual(osdata): grains['virtual_subtype'] = 'ovirt' elif 'Google' in output: grains['virtual'] = 'gce' + elif 'BHYVE' in output: + grains['virtual'] = 'bhyve' except IOError: pass elif osdata['kernel'] == 'FreeBSD': @@ -984,28 +986,20 @@ def _windows_platform_data(): os_release = platform.release() info = salt.utils.win_osinfo.get_os_version_info() + server = {'Vista': '2008Server', + '7': '2008ServerR2', + '8': '2012Server', + '8.1': '2012ServerR2', + '10': '2016Server'} # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function # started reporting the Desktop version instead of the Server version on # Server versions of Windows, so we need to look those up - # Check for Python >=2.7.12 or >=3.5.2 - ver = pythonversion()['pythonversion'] - if ((six.PY2 and - salt.utils.compare_versions(ver, '>=', [2, 7, 12, 'final', 0])) - or - (six.PY3 and - salt.utils.compare_versions(ver, '>=', [3, 5, 2, 'final', 0]))): - # (Product Type 1 is Desktop, Everything else is Server) - if info['ProductType'] > 1: - server = {'Vista': '2008Server', - '7': '2008ServerR2', - '8': '2012Server', - '8.1': '2012ServerR2', - '10': '2016Server'} - os_release = server.get(os_release, - 'Grain not found. Update lookup table ' - 'in the `_windows_platform_data` ' - 'function in `grains\\core.py`') + # So, if you find a Server Platform that's a key in the server + # dictionary, then lookup the actual Server Release. + # (Product Type 1 is Desktop, Everything else is Server) + if info['ProductType'] > 1 and os_release in server: + os_release = server[os_release] service_pack = None if info['ServicePackMajor'] > 0: @@ -2359,6 +2353,10 @@ def _zpool_data(grains): if salt.utils.is_windows() or 'proxyminion' in __opts__: return {} + # quickly return if NetBSD (ZFS still under development) + if salt.utils.is_netbsd(): + return {} + # quickly return if no zpool and zfs command if not salt.utils.which('zpool'): return {} diff --git a/salt/grains/metadata.py b/salt/grains/metadata.py index 2372aac6c7..dabffa2052 100644 --- a/salt/grains/metadata.py +++ b/salt/grains/metadata.py @@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`. from __future__ import absolute_import # Import python libs +import json import os import socket @@ -47,14 +48,28 @@ def _search(prefix="latest/"): Recursively look up all grains in the metadata server ''' ret = {} - for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'): + linedata = http.query(os.path.join(HOST, prefix)) + if 'body' not in linedata: + return ret + for line in linedata['body'].split('\n'): if line.endswith('/'): ret[line[:-1]] = _search(prefix=os.path.join(prefix, line)) + elif prefix == 'latest/': + # (gtmanfred) The first level should have a forward slash since + # they have stuff underneath. This will not be doubled up though, + # because lines ending with a slash are checked first. + ret[line] = _search(prefix=os.path.join(prefix, line + '/')) elif '=' in line: key, value = line.split('=') ret[value] = _search(prefix=os.path.join(prefix, key)) else: - ret[line] = http.query(os.path.join(HOST, prefix, line))['body'] + retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None) + # (gtmanfred) This try except block is slightly faster than + # checking if the string starts with a curly brace + try: + ret[line] = json.loads(retdata) + except ValueError: + ret[line] = retdata return ret diff --git a/salt/grains/napalm.py b/salt/grains/napalm.py index 6ac52464c8..fcfbdcfe9f 100644 --- a/salt/grains/napalm.py +++ b/salt/grains/napalm.py @@ -447,8 +447,8 @@ def optional_args(proxy=None): device2: True ''' - opt_args = _get_device_grain('optional_args', proxy=proxy) - if _FORBIDDEN_OPT_ARGS: + opt_args = _get_device_grain('optional_args', proxy=proxy) or {} + if opt_args and _FORBIDDEN_OPT_ARGS: for arg in _FORBIDDEN_OPT_ARGS: opt_args.pop(arg, None) return {'optional_args': opt_args} diff --git a/salt/key.py b/salt/key.py index 88e2acf0e4..73a1d639fb 100644 --- a/salt/key.py +++ b/salt/key.py @@ -496,7 +496,7 @@ class Key(object): if minion not in minions and minion not in preserve_minions: shutil.rmtree(os.path.join(m_cache, minion)) cache = salt.cache.factory(self.opts) - clist = cache.ls(self.ACC) + clist = cache.list(self.ACC) if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: @@ -974,7 +974,7 @@ class RaetKey(Key): if minion not in minions: shutil.rmtree(os.path.join(m_cache, minion)) cache = salt.cache.factory(self.opts) - clist = cache.ls(self.ACC) + clist = cache.list(self.ACC) if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: diff --git a/salt/loader.py b/salt/loader.py index e11c3238ff..1c03e475be 100644 --- a/salt/loader.py +++ b/salt/loader.py @@ -194,7 +194,7 @@ def minion_mods( generated modules in __context__ :param dict utils: Utility functions which should be made available to - Salt modules in __utils__. See `utils_dir` in + Salt modules in __utils__. See `utils_dirs` in salt.config for additional information about configuration. @@ -1094,7 +1094,8 @@ class LazyLoader(salt.utils.lazy.LazyDict): virtual_funcs = [] self.virtual_funcs = virtual_funcs - self.disabled = set(self.opts.get('disable_{0}s'.format(self.tag), [])) + self.disabled = set(self.opts.get('disable_{0}{1}'.format( + self.tag, '' if self.tag[-1] == 's' else 's'), [])) self.refresh_file_mapping() diff --git a/salt/master.py b/salt/master.py index 649a89a072..b913aeb1e5 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1705,12 +1705,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.runner_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', @@ -1774,12 +1769,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.wheel_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', message=('Authentication failure of type "token" occurred for ' @@ -1900,12 +1890,7 @@ class ClearFuncs(object): return '' # Get acl - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( diff --git a/salt/minion.py b/salt/minion.py index 3c5046ee93..c9cfc6cb1f 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1251,7 +1251,7 @@ class Minion(MinionBase): ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) - def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True): + def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' @@ -1270,10 +1270,6 @@ class Minion(MinionBase): else: return - def timeout_handler(*_): - log.info('fire_master failed: master could not be contacted. Request timed out.') - return True - if sync: try: self._send_req_sync(load, timeout) @@ -1284,6 +1280,12 @@ class Minion(MinionBase): log.info('fire_master failed: {0}'.format(traceback.format_exc())) return False else: + if timeout_handler is None: + def handle_timeout(*_): + log.info('fire_master failed: master could not be contacted. Request timed out.') + return True + timeout_handler = handle_timeout + with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @@ -1985,8 +1987,9 @@ class Minion(MinionBase): elif tag.startswith('_minion_mine'): self._mine_send(tag, data) elif tag.startswith('fire_master'): - log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) - self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) + if self.connected: + log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) + self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')): # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: @@ -2205,13 +2208,15 @@ class Minion(MinionBase): if ping_interval > 0 and self.connected: def ping_master(): try: - if not self._fire_master('ping', 'minion_ping'): + def ping_timeout_handler(*_): if not self.opts.get('auth_safemode', True): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay {0}s'.format(delay)) # regular sys.exit raises an exception -- which isn't sufficient in a thread os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE) + + self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) @@ -2226,7 +2231,7 @@ class Minion(MinionBase): except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: - self._fire_master(events=beacons) + self._fire_master(events=beacons, sync=False) self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop) diff --git a/salt/modules/apache.py b/salt/modules/apache.py index 5d2261175a..2d7a79339c 100644 --- a/salt/modules/apache.py +++ b/salt/modules/apache.py @@ -446,11 +446,15 @@ def config(name, config, edit=True): salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]" ''' + configs = [] for entry in config: key = next(six.iterkeys(entry)) - configs = _parse_config(entry[key], key) - if edit: - with salt.utils.fopen(name, 'w') as configfile: - configfile.write('# This file is managed by Salt.\n') - configfile.write(configs) - return configs + configs.append(_parse_config(entry[key], key)) + + # Python auto-correct line endings + configstext = "\n".join(configs) + if edit: + with salt.utils.fopen(name, 'w') as configfile: + configfile.write('# This file is managed by Salt.\n') + configfile.write(configstext) + return configstext diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index 8e16306ce0..04ddbaf9a2 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -139,19 +139,6 @@ def _reconstruct_ppa_name(owner_name, ppa_name): return 'ppa:{0}/{1}'.format(owner_name, ppa_name) -def _get_repo(**kwargs): - ''' - Check the kwargs for either 'fromrepo' or 'repo' and return the value. - 'fromrepo' takes precedence over 'repo'. - ''' - for key in ('fromrepo', 'repo'): - try: - return kwargs[key] - except KeyError: - pass - return '' - - def _check_apt(): ''' Abort if python-apt is not installed @@ -246,18 +233,11 @@ def latest_version(*names, **kwargs): ''' refresh = salt.utils.is_true(kwargs.pop('refresh', True)) show_installed = salt.utils.is_true(kwargs.pop('show_installed', False)) - if 'repo' in kwargs: - # Remember to kill _get_repo() too when removing this warning. - salt.utils.warn_until( - 'Hydrogen', - 'The \'repo\' argument to apt.latest_version is deprecated, and ' - 'will be removed in Salt {version}. Please use \'fromrepo\' ' - 'instead.' + raise SaltInvocationError( + 'The \'repo\' argument is invalid, use \'fromrepo\' instead' ) - fromrepo = _get_repo(**kwargs) - kwargs.pop('fromrepo', None) - kwargs.pop('repo', None) + fromrepo = kwargs.pop('fromrepo', None) cache_valid_time = kwargs.pop('cache_valid_time', 0) if len(names) == 0: @@ -1402,9 +1382,10 @@ def _get_upgradable(dist_upgrade=True, **kwargs): cmd.append('dist-upgrade') else: cmd.append('upgrade') - fromrepo = _get_repo(**kwargs) - if fromrepo: - cmd.extend(['-o', 'APT::Default-Release={0}'.format(fromrepo)]) + try: + cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])]) + except KeyError: + pass call = __salt__['cmd.run_all'](cmd, python_shell=False, diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index d521e786f3..26065a8d37 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -202,45 +202,48 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio has_classifier = classifier is not None and classifier != "" if snapshot_version is None: - snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + try: + snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + if packaging not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if packaging not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) + if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) - - snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + except CommandExecutionError as err: + log.error('Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.', version) + snapshot_version = version group_url = __get_group_id_subpath(group_id) diff --git a/salt/modules/boto_efs.py b/salt/modules/boto_efs.py index 0bb20c31d2..f68f9d9c96 100644 --- a/salt/modules/boto_efs.py +++ b/salt/modules/boto_efs.py @@ -158,7 +158,7 @@ def create_file_system(name, import os import base64 creation_token = base64.b64encode(os.urandom(46), ['-', '_']) - tags = {"Key": "Name", "Value": name} + tags = [{"Key": "Name", "Value": name}] client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) @@ -223,10 +223,23 @@ def create_mount_target(filesystemid, client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) - return client.create_mount_point(FileSystemId=filesystemid, - SubnetId=subnetid, - IpAddress=ipaddress, - SecurityGroups=securitygroups) + if ipaddress is None and securitygroups is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid) + + if ipaddress is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + SecurityGroups=securitygroups) + if securitygroups is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + IpAddress=ipaddress) + + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + IpAddress=ipaddress, + SecurityGroups=securitygroups) def create_tags(filesystemid, diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py index 954be55b8d..f9410e45c6 100644 --- a/salt/modules/boto_elbv2.py +++ b/salt/modules/boto_elbv2.py @@ -2,7 +2,7 @@ ''' Connection module for Amazon ALB -.. versionadded:: TBD +.. versionadded:: 2017.7.0 :configuration: This module accepts explicit elb credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index e2b5256019..926547849c 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -1927,7 +1927,7 @@ def list_policy_versions(policy_name, return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions') except boto.exception.BotoServerError as e: log.debug(e) - msg = 'Failed to list {0} policy vesions.' + msg = 'Failed to list {0} policy versions.' log.error(msg.format(policy_name)) return [] diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index b576174a6d..9f3364c594 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -294,6 +294,9 @@ def _run(cmd, if runas is None and '__context__' in globals(): runas = __context__.get('runas') + if password is None and '__context__' in globals(): + password = __context__.get('runas_password') + # Set the default working directory to the home directory of the user # salt-minion is running as. Defaults to home directory of user under which # the minion is running. diff --git a/salt/modules/consul.py b/salt/modules/consul.py index 049831b082..33d746c6ad 100644 --- a/salt/modules/consul.py +++ b/salt/modules/consul.py @@ -1978,6 +1978,8 @@ def acl_create(consul_url=None, **kwargs): Create a new ACL token. :param consul_url: The Consul server URL. + :param id: Unique identifier for the ACL to create + leave it blank to let consul server generate one :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the @@ -2008,6 +2010,9 @@ def acl_create(consul_url=None, **kwargs): else: raise SaltInvocationError('Required argument "name" is missing.') + if 'id' in kwargs: + data['ID'] = kwargs['id'] + if 'type' in kwargs: data['Type'] = kwargs['type'] @@ -2126,7 +2131,7 @@ def acl_delete(consul_url=None, **kwargs): ret['res'] = False return ret - function = 'acl/delete/{0}'.format(kwargs['id']) + function = 'acl/destroy/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, data=data, method='PUT', diff --git a/salt/modules/cp.py b/salt/modules/cp.py index 83cce72d65..58e344d644 100644 --- a/salt/modules/cp.py +++ b/salt/modules/cp.py @@ -58,7 +58,36 @@ def _gather_pillar(pillarenv, pillar_override): return ret -def recv(dest, chunk, append=False, compressed=True, mode=None): +def recv(files, dest): + ''' + Used with salt-cp, pass the files dict, and the destination. + + This function receives small fast copy files from the master via salt-cp. + It does not work via the CLI. + ''' + ret = {} + for path, data in six.iteritems(files): + if os.path.basename(path) == os.path.basename(dest) \ + and not os.path.isdir(dest): + final = dest + elif os.path.isdir(dest): + final = os.path.join(dest, os.path.basename(path)) + elif os.path.isdir(os.path.dirname(dest)): + final = dest + else: + return 'Destination unavailable' + + try: + with salt.utils.fopen(final, 'w+') as fp_: + fp_.write(data) + ret[final] = True + except IOError: + ret[final] = False + + return ret + + +def recv_chunked(dest, chunk, append=False, compressed=True, mode=None): ''' This function receives files copied to the minion using ``salt-cp`` and is not intended to be used directly on the CLI. diff --git a/salt/modules/disk.py b/salt/modules/disk.py index 968a9e9b7a..d1f6c63731 100644 --- a/salt/modules/disk.py +++ b/salt/modules/disk.py @@ -22,6 +22,10 @@ import salt.utils.decorators as decorators from salt.utils.decorators import depends from salt.exceptions import CommandExecutionError +__func_alias__ = { + 'format_': 'format' +} + log = logging.getLogger(__name__) HAS_HDPARM = salt.utils.which('hdparm') is not None diff --git a/salt/modules/dnsutil.py b/salt/modules/dnsutil.py index 36b1469a4b..390fbef9b9 100644 --- a/salt/modules/dnsutil.py +++ b/salt/modules/dnsutil.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- ''' -Compendium of generic DNS utilities +Compendium of generic DNS utilities. + +.. note:: + + Some functions in the ``dnsutil`` execution module depend on ``dig``. ''' from __future__ import absolute_import @@ -232,7 +236,7 @@ def check_ip(ip_addr): .. code-block:: bash - salt ns1 dig.check_ip 127.0.0.1 + salt ns1 dnsutil.check_ip 127.0.0.1 ''' if _has_dig(): return __salt__['dig.check_ip'](ip_addr) @@ -242,7 +246,7 @@ def check_ip(ip_addr): def A(host, nameserver=None): ''' - Return the A record(s) for `host`. + Return the A record(s) for ``host``. Always returns a list. @@ -267,7 +271,7 @@ def A(host, nameserver=None): def AAAA(host, nameserver=None): ''' - Return the AAAA record(s) for `host`. + Return the AAAA record(s) for ``host``. Always returns a list. @@ -302,7 +306,7 @@ def NS(domain, resolve=True, nameserver=None): .. code-block:: bash - salt ns1 dig.NS google.com + salt ns1 dnsutil.NS google.com ''' if _has_dig(): @@ -323,7 +327,7 @@ def SPF(domain, record='SPF', nameserver=None): .. code-block:: bash - salt ns1 dig.SPF google.com + salt ns1 dnsutil.SPF google.com ''' if _has_dig(): return __salt__['dig.SPF'](domain, record, nameserver) @@ -346,7 +350,7 @@ def MX(domain, resolve=False, nameserver=None): .. code-block:: bash - salt ns1 dig.MX google.com + salt ns1 dnsutil.MX google.com ''' if _has_dig(): return __salt__['dig.MX'](domain, resolve, nameserver) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index c61130a6be..9987ec467c 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -232,6 +232,7 @@ except ImportError: # pylint: enable=import-error HAS_NSENTER = bool(salt.utils.which('nsenter')) +HUB_PREFIX = 'docker.io/' # Set up logging log = logging.getLogger(__name__) @@ -559,6 +560,21 @@ def _prep_pull(): __context__['docker._pull_status'] = [x[:12] for x in images(all=True)] +def _scrub_links(links, name): + ''' + Remove container name from HostConfig:Links values to enable comparing + container configurations correctly. + ''' + if isinstance(links, list): + ret = [] + for l in links: + ret.append(l.replace('/{0}/'.format(name), '/', 1)) + else: + ret = links + + return ret + + def _size_fmt(num): ''' Format bytes as human-readable file sizes @@ -783,7 +799,7 @@ def get_client_args(): salt myminion docker.get_client_args ''' - return salt.utils.docker.get_client_args() + return __utils__['docker.get_client_args']() def _get_create_kwargs(image, @@ -884,8 +900,20 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict][item] val2 = result2[conf_dict].get(item) - if val1 != val2: - ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + if item in ('OomKillDisable',) or (val1 is None or val2 is None): + if bool(val1) != bool(val2): + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + elif item == 'Image': + image1 = inspect_image(val1)['Id'] + image2 = inspect_image(val2)['Id'] + if image1 != image2: + ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} + else: + if item == 'Links': + val1 = _scrub_links(val1, first) + val2 = _scrub_links(val2, second) + if val1 != val2: + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} # Check for optionally-present items that were in the second container # and not the first. for item in result2[conf_dict]: @@ -895,8 +923,20 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict].get(item) val2 = result2[conf_dict][item] - if val1 != val2: - ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + if item in ('OomKillDisable',) or (val1 is None or val2 is None): + if bool(val1) != bool(val2): + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + elif item == 'Image': + image1 = inspect_image(val1)['Id'] + image2 = inspect_image(val2)['Id'] + if image1 != image2: + ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} + else: + if item == 'Links': + val1 = _scrub_links(val1, first) + val2 = _scrub_links(val2, second) + if val1 != val2: + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} return ret @@ -978,6 +1018,10 @@ def login(*registries): cmd = ['docker', 'login', '-u', username, '-p', password] if registry.lower() != 'hub': cmd.append(registry) + log.debug( + 'Attempting to login to docker registry \'%s\' as user \'%s\'', + registry, username + ) login_cmd = __salt__['cmd.run_all']( cmd, python_shell=False, @@ -1443,6 +1487,43 @@ def list_tags(): return sorted(ret) +def resolve_tag(name, tags=None): + ''' + .. versionadded:: 2017.7.2,Oxygen + + Given an image tag, check the locally-pulled tags (using + :py:func:`docker.list_tags `) and return + the matching tag. This helps disambiguate differences on some platforms + where images from the Docker Hub are prefixed with ``docker.io/``. If an + image name with no tag is passed, a tag of ``latest`` is assumed. + + If the specified image is not pulled locally, this function will return + ``False``. + + tags + An optional Python list of tags to check against. If passed, then + :py:func:`docker.list_tags ` will not + be run to get a list of tags. This is useful when resolving a number of + tags at the same time. + + CLI Examples: + + .. code-block:: bash + + salt myminion docker.resolve_tag busybox + salt myminion docker.resolve_tag busybox:latest + ''' + tag_name = ':'.join(salt.utils.docker.get_repo_tag(name)) + if tags is None: + tags = list_tags() + if tag_name in tags: + return tag_name + full_name = HUB_PREFIX + tag_name + if not name.startswith(HUB_PREFIX) and full_name in tags: + return full_name + return False + + def logs(name): ''' Returns the logs for the container. Equivalent to running the ``docker @@ -1803,7 +1884,7 @@ def create(image, generate one for you (it will be included in the return data). skip_translate - This function translates Salt CLI input into the format which + This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular @@ -2071,9 +2152,9 @@ def create(image, - ``dns_search="[foo1.domain.tld, foo2.domain.tld]"`` domainname - Set custom DNS search domains + The domain name to use for the container - Example: ``domainname=domain.tld,domain2.tld`` + Example: ``domainname=domain.tld`` entrypoint Entrypoint for the container. Either a string (e.g. ``"mycmd --arg1 @@ -3810,7 +3891,6 @@ def save(name, if os.path.exists(path) and not overwrite: raise CommandExecutionError('{0} already exists'.format(path)) - compression = kwargs.get('compression') if compression is None: if path.endswith('.tar.gz') or path.endswith('.tgz'): compression = 'gzip' @@ -3848,8 +3928,9 @@ def save(name, saved_path = salt.utils.files.mkstemp() else: saved_path = path - - cmd = ['docker', 'save', '-o', saved_path, inspect_image(name)['Id']] + # use the image name if its valid if not use the image id + image_to_save = name if name in inspect_image(name)['RepoTags'] else inspect_image(name)['Id'] + cmd = ['docker', 'save', '-o', saved_path, image_to_save] time_started = time.time() result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: @@ -3916,7 +3997,7 @@ def save(name, ret['Size_Human'] = _size_fmt(ret['Size']) # Process push - if kwargs.get(push, False): + if kwargs.get('push', False): ret['Push'] = __salt__['cp.push'](path) return ret diff --git a/salt/modules/file.py b/salt/modules/file.py index 4a735594cd..b8f1cdb00c 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -710,18 +710,21 @@ def check_hash(path, file_hash): hash The hash to check against the file specified in the ``path`` argument. - For versions 2016.11.4 and newer, the hash can be specified without an + + .. versionchanged:: 2016.11.4 + + For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type - in the format ``:`` (e.g. - ``md5:e138491e9d5b97023cea823fe17bac22``). + in the format ``=`` (e.g. + ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 - salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22 + salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) @@ -1898,6 +1901,7 @@ def replace(path, show_changes=True, ignore_if_missing=False, preserve_inode=True, + backslash_literal=False, ): ''' .. versionadded:: 0.17.0 @@ -1998,6 +2002,14 @@ def replace(path, filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). + backslash_literal : False + .. versionadded:: 2016.11.7 + + Interpret backslashes as literal backslashes for the repl and not + escape characters. This will help when using append/prepend so that + the backslashes are not interpreted for the repl on the second run of + the state. + If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the @@ -2094,7 +2106,10 @@ def replace(path, if re.search(cpattern, r_data): return True # `with` block handles file closure else: - result, nrepl = re.subn(cpattern, repl, r_data, count) + result, nrepl = re.subn(cpattern, + repl.replace('\\', '\\\\') if backslash_literal else repl, + r_data, + count) # found anything? (even if no change) if nrepl > 0: @@ -2148,8 +2163,10 @@ def replace(path, r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) - result, nrepl = re.subn(cpattern, repl, - r_data, count) + result, nrepl = re.subn(cpattern, + repl.replace('\\', '\\\\') if backslash_literal else repl, + r_data, + count) try: w_file.write(salt.utils.to_str(result)) except (OSError, IOError) as exc: diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index 5680f00829..eceaba5bb5 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -24,6 +24,8 @@ import salt.utils.kickstart import salt.syspaths from salt.exceptions import SaltInvocationError +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -325,6 +327,8 @@ def _bootstrap_yum( ''' if pkgs is None: pkgs = [] + elif isinstance(pkgs, six.string_types): + pkgs = pkgs.split(',') default_pkgs = ('yum', 'centos-release', 'iputils') for pkg in default_pkgs: @@ -333,6 +337,8 @@ def _bootstrap_yum( if exclude_pkgs is None: exclude_pkgs = [] + elif isinstance(exclude_pkgs, six.string_types): + exclude_pkgs = exclude_pkgs.split(',') for pkg in exclude_pkgs: pkgs.remove(pkg) @@ -393,15 +399,27 @@ def _bootstrap_deb( if repo_url is None: repo_url = 'http://ftp.debian.org/debian/' + if not salt.utils.which('debootstrap'): + log.error('Required tool debootstrap is not installed.') + return False + + if isinstance(pkgs, (list, tuple)): + pkgs = ','.join(pkgs) + if isinstance(exclude_pkgs, (list, tuple)): + exclude_pkgs = ','.join(exclude_pkgs) + deb_args = [ 'debootstrap', '--foreign', '--arch', - _cmd_quote(arch), - '--include', - ] + pkgs + [ - '--exclude', - ] + exclude_pkgs + [ + _cmd_quote(arch)] + + if pkgs: + deb_args += ['--include', _cmd_quote(pkgs)] + if exclude_pkgs: + deb_args += ['--exclude', _cmd_quote(exclude_pkgs)] + + deb_args += [ _cmd_quote(flavor), _cmd_quote(root), _cmd_quote(repo_url), @@ -469,6 +487,8 @@ def _bootstrap_pacman( if pkgs is None: pkgs = [] + elif isinstance(pkgs, six.string_types): + pkgs = pkgs.split(',') default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub') for pkg in default_pkgs: @@ -477,6 +497,8 @@ def _bootstrap_pacman( if exclude_pkgs is None: exclude_pkgs = [] + elif isinstance(exclude_pkgs, six.string_types): + exclude_pkgs = exclude_pkgs.split(',') for pkg in exclude_pkgs: pkgs.remove(pkg) diff --git a/salt/modules/git.py b/salt/modules/git.py index 38d86d4f1c..304276187b 100644 --- a/salt/modules/git.py +++ b/salt/modules/git.py @@ -185,15 +185,24 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None, identity = [identity] # try each of the identities, independently + tmp_identity_file = None for id_file in identity: if 'salt://' in id_file: - _id_file = id_file - id_file = __salt__['cp.cache_file'](id_file, saltenv) + with salt.utils.files.set_umask(0o077): + tmp_identity_file = salt.utils.mkstemp() + _id_file = id_file + id_file = __salt__['cp.get_file'](id_file, + tmp_identity_file, + saltenv) if not id_file: log.error('identity {0} does not exist.'.format(_id_file)) + __salt__['file.remove'](tmp_identity_file) continue else: - __salt__['file.set_mode'](id_file, '0600') + if user: + os.chown(id_file, + __salt__['file.user_to_uid'](user), + -1) else: if not __salt__['file.file_exists'](id_file): missing_keys.append(id_file) @@ -264,6 +273,11 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None, if not salt.utils.is_windows() and 'GIT_SSH' in env: os.remove(env['GIT_SSH']) + # Cleanup the temporary identity file + if tmp_identity_file and os.path.exists(tmp_identity_file): + log.debug('Removing identity file {0}'.format(tmp_identity_file)) + __salt__['file.remove'](tmp_identity_file) + # If the command was successful, no need to try additional IDs if result['retcode'] == 0: return result diff --git a/salt/modules/htpasswd.py b/salt/modules/htpasswd.py index 02f48457c2..cbb09e99ea 100644 --- a/salt/modules/htpasswd.py +++ b/salt/modules/htpasswd.py @@ -35,8 +35,6 @@ def useradd(pwfile, user, password, opts='', runas=None): Add a user to htpasswd file using the htpasswd command. If the htpasswd file does not exist, it will be created. - .. deprecated:: 2016.3.0 - pwfile Path to htpasswd file diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py index b87a46b82f..b8ebe7e804 100644 --- a/salt/modules/inspectlib/collector.py +++ b/salt/modules/inspectlib/collector.py @@ -29,6 +29,7 @@ from salt.modules.inspectlib.entities import (AllowedDir, IgnoredDir, Package, PayloadFile, PackageCfgFile) import salt.utils +import salt.utils.path from salt.utils import fsutils from salt.utils import reinit_crypto from salt.exceptions import CommandExecutionError @@ -311,7 +312,7 @@ class Inspector(EnvLoader): continue if not valid or not os.path.exists(obj) or not os.access(obj, os.R_OK): continue - if os.path.islink(obj): + if salt.utils.path.islink(obj): links.append(obj) elif os.path.isdir(obj): dirs.append(obj) diff --git a/salt/modules/inspectlib/kiwiproc.py b/salt/modules/inspectlib/kiwiproc.py index 136cacf00c..40b4f9c0bf 100644 --- a/salt/modules/inspectlib/kiwiproc.py +++ b/salt/modules/inspectlib/kiwiproc.py @@ -17,11 +17,14 @@ # Import python libs from __future__ import absolute_import import os -import grp -import pwd from xml.dom import minidom import platform import socket +try: + import grp + import pwd +except ImportError: + pass # Import salt libs import salt.utils diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py index 37d5842eea..563713ac92 100644 --- a/salt/modules/iptables.py +++ b/salt/modules/iptables.py @@ -493,8 +493,11 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None, after_jump.append('--{0} {1}'.format(after_jump_argument, value)) del kwargs[after_jump_argument] - for key, value in kwargs.items(): + for key in kwargs: negation = maybe_add_negation(key) + # don't use .items() since maybe_add_negation removes the prefix from + # the value in the kwargs, thus we need to fetch it after that has run + value = kwargs[key] flag = '-' if len(key) == 1 else '--' value = '' if value in (None, '') else ' {0}'.format(value) rule.append('{0}{1}{2}{3}'.format(negation, flag, key, value)) @@ -1452,6 +1455,8 @@ def _parser(): add_arg('--or-mark', dest='or-mark', action='append') add_arg('--xor-mark', dest='xor-mark', action='append') add_arg('--set-mark', dest='set-mark', action='append') + add_arg('--nfmask', dest='nfmask', action='append') + add_arg('--ctmask', dest='ctmask', action='append') ## CONNSECMARK add_arg('--save', dest='save', action='append') add_arg('--restore', dest='restore', action='append') diff --git a/salt/modules/jenkins.py b/salt/modules/jenkins.py index d327d09292..0165fdf1bf 100644 --- a/salt/modules/jenkins.py +++ b/salt/modules/jenkins.py @@ -37,7 +37,7 @@ import salt.utils # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -from salt.exceptions import SaltInvocationError +from salt.exceptions import CommandExecutionError, SaltInvocationError # pylint: enable=import-error,no-name-in-module log = logging.getLogger(__name__) @@ -89,9 +89,22 @@ def _connect(): password=jenkins_password) +def _retrieve_config_xml(config_xml, saltenv): + ''' + Helper to cache the config XML and raise a CommandExecutionError if we fail + to do so. If we successfully cache the file, return the cached path. + ''' + ret = __salt__['cp.cache_file'](config_xml, saltenv) + + if not ret: + raise CommandExecutionError('Failed to retrieve {0}'.format(config_xml)) + + return ret + + def run(script): ''' - .. versionadded:: Carbon + .. versionadded:: 2017.7.0 Execute a groovy script on the jenkins master @@ -166,7 +179,7 @@ def job_exists(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if server.job_exists(name): @@ -190,12 +203,12 @@ def get_job_info(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exist.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) job_info = server.get_job_info(name) if job_info: @@ -219,17 +232,19 @@ def build_job(name=None, parameters=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exist.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name)) try: server.build_job(name, parameters) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error building job \'{0}\': {1}'.format(name, err) + ) return True @@ -254,15 +269,15 @@ def create_job(name=None, ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') if job_exists(name): - raise SaltInvocationError('Job `{0}` already exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' already exists'.format(name)) if not config_xml: config_xml = jenkins.EMPTY_CONFIG_XML else: - config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv) + config_xml_file = _retrieve_config_xml(config_xml, saltenv) with salt.utils.fopen(config_xml_file) as _fp: config_xml = _fp.read() @@ -271,7 +286,9 @@ def create_job(name=None, try: server.create_job(name, config_xml) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error creating job \'{0}\': {1}'.format(name, err) + ) return config_xml @@ -296,12 +313,12 @@ def update_job(name=None, ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') if not config_xml: config_xml = jenkins.EMPTY_CONFIG_XML else: - config_xml_file = __salt__['cp.cache_file'](config_xml, saltenv) + config_xml_file = _retrieve_config_xml(config_xml, saltenv) with salt.utils.fopen(config_xml_file) as _fp: config_xml = _fp.read() @@ -310,7 +327,9 @@ def update_job(name=None, try: server.reconfig_job(name, config_xml) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error updating job \'{0}\': {1}'.format(name, err) + ) return config_xml @@ -329,17 +348,19 @@ def delete_job(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) try: server.delete_job(name) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error deleting job \'{0}\': {1}'.format(name, err) + ) return True @@ -358,17 +379,19 @@ def enable_job(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) try: server.enable_job(name) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error enabling job \'{0}\': {1}'.format(name, err) + ) return True @@ -388,17 +411,19 @@ def disable_job(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) try: server.disable_job(name) except jenkins.JenkinsException as err: - raise SaltInvocationError('Something went wrong {0}.'.format(err)) + raise CommandExecutionError( + 'Encountered error disabling job \'{0}\': {1}'.format(name, err) + ) return True @@ -418,12 +443,12 @@ def job_status(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) return server.get_job_info('empty')['buildable'] @@ -444,12 +469,12 @@ def get_job_config(name=None): ''' if not name: - raise SaltInvocationError('Required parameter `name` is missing.') + raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): - raise SaltInvocationError('Job `{0}` does not exists.'.format(name)) + raise CommandExecutionError('Job \'{0}\' does not exist'.format(name)) job_info = server.get_job_config(name) return job_info diff --git a/salt/modules/kerberos.py b/salt/modules/kerberos.py index a5a77a5891..284033daaf 100644 --- a/salt/modules/kerberos.py +++ b/salt/modules/kerberos.py @@ -262,7 +262,7 @@ def create_keytab(name, keytab, enctypes=None): .. code-block:: bash - salt 'kdc.example.com' host/host1.example.com host1.example.com.keytab + salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab ''' ret = {} diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index b68b83057b..2e17b11444 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -9,20 +9,37 @@ Module for handling kubernetes calls. kubernetes.user: admin kubernetes.password: verybadpass kubernetes.api_url: 'http://127.0.0.1:8080' + kubernetes.certificate-authority-data: '...' + kubernetes.client-certificate-data: '....n + kubernetes.client-key-data: '...' + kubernetes.certificate-authority-file: '/path/to/ca.crt' + kubernetes.client-certificate-file: '/path/to/client.crt' + kubernetes.client-key-file: '/path/to/client.key' + These settings can be also overrided by adding `api_url`, `api_user`, -or `api_password` parameters when calling a function: +`api_password`, `api_certificate_authority_file`, `api_client_certificate_file` +or `api_client_key_file` parameters when calling a function: + +The data format for `kubernetes.*-data` values is the same as provided in `kubeconfig`. +It's base64 encoded certificates/keys in one line. + +For an item only one field should be provided. Either a `data` or a `file` entry. +In case both are provided the `file` entry is prefered. .. code-block:: bash salt '*' kubernetes.nodes api_url=http://k8s-api-server:port api_user=myuser api_password=pass +.. versionadded: 2017.7.0 ''' # Import Python Futures from __future__ import absolute_import +import os.path import base64 import logging import yaml +import tempfile from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems @@ -34,12 +51,18 @@ try: import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError + try: + # There is an API change in Kubernetes >= 2.0.0. + from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment + from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec + except ImportError: + from kubernetes.client import AppsV1beta1Deployment + from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False - log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' @@ -64,16 +87,31 @@ def _setup_conn(**kwargs): 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') + ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') + client_cert = __salt__['config.option']('kubernetes.client-certificate-data') + client_key = __salt__['config.option']('kubernetes.client-key-data') + ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') + client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') + client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided - if kwargs.get('api_url'): - host = kwargs['api_url'] + if 'api_url' in kwargs: + host = kwargs.get('api_url') - if kwargs.get('api_user'): - username = kwargs['api_user'] + if 'api_user' in kwargs: + username = kwargs.get('api_user') - if kwargs.get('api_password'): - password = kwargs['api_password'] + if 'api_password' in kwargs: + password = kwargs.get('api_password') + + if 'api_certificate_authority_file' in kwargs: + ca_cert_file = kwargs.get('api_certificate_authority_file') + + if 'api_client_certificate_file' in kwargs: + client_cert_file = kwargs.get('api_client_certificate_file') + + if 'api_client_key_file' in kwargs: + client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or @@ -86,6 +124,45 @@ def _setup_conn(**kwargs): kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password + if ca_cert_file: + kubernetes.client.configuration.ssl_ca_cert = ca_cert_file + elif ca_cert: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: + ca.write(base64.b64decode(ca_cert)) + kubernetes.client.configuration.ssl_ca_cert = ca.name + else: + kubernetes.client.configuration.ssl_ca_cert = None + + if client_cert_file: + kubernetes.client.configuration.cert_file = client_cert_file + elif client_cert: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: + c.write(base64.b64decode(client_cert)) + kubernetes.client.configuration.cert_file = c.name + else: + kubernetes.client.configuration.cert_file = None + + if client_key_file: + kubernetes.client.configuration.key_file = client_key_file + if client_key: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: + k.write(base64.b64decode(client_key)) + kubernetes.client.configuration.key_file = k.name + else: + kubernetes.client.configuration.key_file = None + + +def _cleanup(**kwargs): + ca = kubernetes.client.configuration.ssl_ca_cert + cert = kubernetes.client.configuration.cert_file + key = kubernetes.client.configuration.key_file + if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): + salt.utils.safe_rm(cert) + if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): + salt.utils.safe_rm(key) + if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): + salt.utils.safe_rm(ca) + def ping(**kwargs): ''' @@ -127,6 +204,8 @@ def nodes(**kwargs): 'Exception when calling CoreV1Api->list_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def node(name, **kwargs): @@ -149,6 +228,8 @@ def node(name, **kwargs): 'Exception when calling CoreV1Api->list_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() for k8s_node in api_response.items: if k8s_node.metadata.name == name: @@ -203,6 +284,8 @@ def node_add_label(node_name, label_name, label_value, **kwargs): 'Exception when calling CoreV1Api->patch_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() return None @@ -236,6 +319,8 @@ def node_remove_label(node_name, label_name, **kwargs): 'Exception when calling CoreV1Api->patch_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() return None @@ -264,6 +349,8 @@ def namespaces(**kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def deployments(namespace='default', **kwargs): @@ -291,6 +378,8 @@ def deployments(namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def services(namespace='default', **kwargs): @@ -317,6 +406,8 @@ def services(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def pods(namespace='default', **kwargs): @@ -343,6 +434,8 @@ def pods(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def secrets(namespace='default', **kwargs): @@ -369,6 +462,8 @@ def secrets(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def configmaps(namespace='default', **kwargs): @@ -395,6 +490,8 @@ def configmaps(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_config_map: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_deployment(name, namespace='default', **kwargs): @@ -422,6 +519,8 @@ def show_deployment(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_service(name, namespace='default', **kwargs): @@ -448,6 +547,8 @@ def show_service(name, namespace='default', **kwargs): 'CoreV1Api->read_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_pod(name, namespace='default', **kwargs): @@ -474,6 +575,8 @@ def show_pod(name, namespace='default', **kwargs): 'CoreV1Api->read_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_namespace(name, **kwargs): @@ -499,6 +602,8 @@ def show_namespace(name, **kwargs): 'CoreV1Api->read_namespace: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_secret(name, namespace='default', decode=False, **kwargs): @@ -534,6 +639,8 @@ def show_secret(name, namespace='default', decode=False, **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_configmap(name, namespace='default', **kwargs): @@ -563,6 +670,8 @@ def show_configmap(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_deployment(name, namespace='default', **kwargs): @@ -594,6 +703,8 @@ def delete_deployment(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_service(name, namespace='default', **kwargs): @@ -623,6 +734,8 @@ def delete_service(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_pod(name, namespace='default', **kwargs): @@ -654,6 +767,8 @@ def delete_pod(name, namespace='default', **kwargs): 'CoreV1Api->delete_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_namespace(name, **kwargs): @@ -682,6 +797,8 @@ def delete_namespace(name, **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_secret(name, namespace='default', **kwargs): @@ -713,6 +830,8 @@ def delete_secret(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_configmap(name, namespace='default', **kwargs): @@ -745,6 +864,8 @@ def delete_configmap(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_deployment( @@ -761,7 +882,7 @@ def create_deployment( ''' body = __create_object_body( kind='Deployment', - obj_class=kubernetes.client.V1beta1Deployment, + obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, @@ -789,6 +910,8 @@ def create_deployment( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_pod( @@ -833,6 +956,8 @@ def create_pod( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_service( @@ -876,6 +1001,8 @@ def create_service( 'CoreV1Api->create_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_secret( @@ -921,6 +1048,8 @@ def create_secret( 'CoreV1Api->create_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_configmap( @@ -962,6 +1091,8 @@ def create_configmap( 'CoreV1Api->create_namespaced_config_map: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_namespace( @@ -996,6 +1127,8 @@ def create_namespace( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_deployment(name, @@ -1012,7 +1145,7 @@ def replace_deployment(name, ''' body = __create_object_body( kind='Deployment', - obj_class=kubernetes.client.V1beta1Deployment, + obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, @@ -1040,6 +1173,8 @@ def replace_deployment(name, '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_service(name, @@ -1089,6 +1224,8 @@ def replace_service(name, 'CoreV1Api->replace_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_secret(name, @@ -1134,6 +1271,8 @@ def replace_secret(name, 'CoreV1Api->replace_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_configmap(name, @@ -1173,6 +1312,8 @@ def replace_configmap(name, 'CoreV1Api->replace_namespaced_configmap: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def __create_object_body(kind, @@ -1275,9 +1416,9 @@ def __dict_to_object_meta(name, namespace, metadata): def __dict_to_deployment_spec(spec): ''' - Converts a dictionary into kubernetes V1beta1DeploymentSpec instance. + Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' - spec_obj = kubernetes.client.V1beta1DeploymentSpec() + spec_obj = AppsV1beta1DeploymentSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) diff --git a/salt/modules/osquery.py b/salt/modules/osquery.py index 137125f070..842f63944d 100644 --- a/salt/modules/osquery.py +++ b/salt/modules/osquery.py @@ -36,7 +36,7 @@ def _table_attrs(table): ''' Helper function to find valid table attributes ''' - cmd = 'osqueryi --json "pragma table_info({0})"'.format(table) + cmd = ['osqueryi'] + ['--json'] + ['pragma table_info{0}'.format(table)] res = __salt__['cmd.run_all'](cmd) if res['retcode'] == 0: attrs = [] @@ -55,7 +55,7 @@ def _osquery(sql, format='json'): 'result': True, } - cmd = 'osqueryi --json "{0}"'.format(sql) + cmd = ['osqueryi'] + ['--json'] + [sql] res = __salt__['cmd.run_all'](cmd) if res['retcode'] == 0: ret['data'] = json.loads(res['stdout']) diff --git a/salt/modules/parted.py b/salt/modules/parted.py index 04a7d37d47..f568c3d001 100644 --- a/salt/modules/parted.py +++ b/salt/modules/parted.py @@ -216,7 +216,7 @@ def align_check(device, part_type, partition): 'Invalid partition passed to partition.align_check' ) - cmd = 'parted -m -s {0} align-check {1} {2}'.format( + cmd = 'parted -m {0} align-check {1} {2}'.format( device, part_type, partition ) out = __salt__['cmd.run'](cmd).splitlines() diff --git a/salt/modules/pillar.py b/salt/modules/pillar.py index d0beb92a06..7293f39244 100644 --- a/salt/modules/pillar.py +++ b/salt/modules/pillar.py @@ -257,8 +257,8 @@ def items(*args, **kwargs): __opts__, __grains__, __opts__['id'], - pillar_override=kwargs.get('pillar'), - pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv']) + pillar_override=pillar_override, + pillarenv=pillarenv) return pillar.compile_pillar() diff --git a/salt/modules/puppet.py b/salt/modules/puppet.py index 0b49f9e365..ec8597d62e 100644 --- a/salt/modules/puppet.py +++ b/salt/modules/puppet.py @@ -345,9 +345,10 @@ def summary(): def plugin_sync(): ''' - Runs a plugin synch between the puppet master and agent + Runs a plugin sync between the puppet master and agent CLI Example: + .. code-block:: bash salt '*' puppet.plugin_sync diff --git a/salt/modules/rabbitmq.py b/salt/modules/rabbitmq.py index b55f2dd173..d63c9bd8c4 100644 --- a/salt/modules/rabbitmq.py +++ b/salt/modules/rabbitmq.py @@ -135,6 +135,7 @@ def _safe_output(line): ''' return not any([ line.startswith('Listing') and line.endswith('...'), + line.startswith('Listing') and '\t' not in line, '...done' in line, line.startswith('WARNING:') ]) @@ -608,11 +609,11 @@ def set_user_tags(name, tags, runas=None): if runas is None and not salt.utils.is_windows(): runas = salt.utils.get_user() - if tags and isinstance(tags, (list, tuple)): - tags = ' '.join(tags) + if not isinstance(tags, (list, tuple)): + tags = [tags] res = __salt__['cmd.run_all']( - [RABBITMQCTL, 'set_user_tags', name, tags], + [RABBITMQCTL, 'set_user_tags', name] + list(tags), runas=runas, python_shell=False) msg = "Tag(s) set" diff --git a/salt/modules/reg.py b/salt/modules/reg.py index 63249dc5fe..644859f141 100644 --- a/salt/modules/reg.py +++ b/salt/modules/reg.py @@ -150,7 +150,9 @@ class Registry(object): # pylint: disable=R0903 _winreg.REG_DWORD: 'REG_DWORD', _winreg.REG_EXPAND_SZ: 'REG_EXPAND_SZ', _winreg.REG_MULTI_SZ: 'REG_MULTI_SZ', - _winreg.REG_SZ: 'REG_SZ' + _winreg.REG_SZ: 'REG_SZ', + # REG_QWORD isn't in the winreg library + 11: 'REG_QWORD' } self.opttype_reverse = { _winreg.REG_OPTION_NON_VOLATILE: 'REG_OPTION_NON_VOLATILE', diff --git a/salt/modules/runit.py b/salt/modules/runit.py index 083ac86917..e743ed4636 100644 --- a/salt/modules/runit.py +++ b/salt/modules/runit.py @@ -80,7 +80,8 @@ for service_dir in VALID_SERVICE_DIRS: AVAIL_SVR_DIRS = [] # Define the module's virtual name -__virtualname__ = 'service' +__virtualname__ = 'runit' +__virtual_aliases__ = ('runit',) def __virtual__(): @@ -91,8 +92,12 @@ def __virtual__(): if __grains__.get('init') == 'runit': if __grains__['os'] == 'Void': add_svc_avail_path('/etc/sv') + global __virtualname__ + __virtualname__ = 'service' return __virtualname__ - return False + if salt.utils.which('sv'): + return __virtualname__ + return (False, 'Runit not available. Please install sv') def _service_path(name): diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index 8a060e418e..d227b12eb4 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -403,7 +403,7 @@ def _context_string_to_dict(context): return ret -def _filetype_id_to_string(filetype='a'): +def filetype_id_to_string(filetype='a'): ''' Translates SELinux filetype single-letter representation to a more human-readable version (which is also used in `semanage fcontext -l`). @@ -444,7 +444,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l 'sel_role': '[^:]+', # se_role for file context is always object_r 'sel_type': sel_type or '[^:]+', 'sel_level': sel_level or '[^:]+'} - cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else _filetype_id_to_string(filetype) + cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype) cmd = 'semanage fcontext -l | egrep ' + \ "'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs) current_entry_text = __salt__['cmd.shell'](cmd) diff --git a/salt/modules/status.py b/salt/modules/status.py index f2eeaa42fa..b5ece1d7d2 100644 --- a/salt/modules/status.py +++ b/salt/modules/status.py @@ -218,7 +218,7 @@ def uptime(): with salt.utils.fopen(ut_path) as rfh: seconds = int(float(rfh.read().split()[0])) elif salt.utils.is_sunos(): - # note: some flavors/vesions report the host uptime inside a zone + # note: some flavors/versions report the host uptime inside a zone # https://support.oracle.com/epmos/faces/BugDisplay?id=15611584 res = __salt__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time') if res['retcode'] > 0: diff --git a/salt/modules/testinframod.py b/salt/modules/testinframod.py index c4ad1df0ef..a384de9e7b 100644 --- a/salt/modules/testinframod.py +++ b/salt/modules/testinframod.py @@ -242,6 +242,8 @@ def _copy_function(module_name, name=None): elif hasattr(mod, '__call__'): mod_sig = inspect.getargspec(mod.__call__) parameters = mod_sig.args + log.debug('Parameters accepted by module {0}: {1}'.format(module_name, + parameters)) additional_args = {} for arg in set(parameters).intersection(set(methods)): additional_args[arg] = methods.pop(arg) @@ -251,12 +253,15 @@ def _copy_function(module_name, name=None): else: modinstance = mod() except TypeError: - modinstance = None - methods = {} + log.exception('Module failed to instantiate') + raise + valid_methods = {} + log.debug('Called methods are: {0}'.format(methods)) for meth_name in methods: if not meth_name.startswith('_'): - methods[meth_name] = methods[meth_name] - for meth, arg in methods.items(): + valid_methods[meth_name] = methods[meth_name] + log.debug('Valid methods are: {0}'.format(valid_methods)) + for meth, arg in valid_methods.items(): result = _get_method_result(mod, modinstance, meth, arg) assertion_result = _apply_assertion(arg, result) if not assertion_result: @@ -285,13 +290,17 @@ def _register_functions(): functions, and then register them in the module namespace so that they can be called via salt. """ - for module_ in modules.__all__: + try: + modules_ = [_to_snake_case(module_) for module_ in modules.__all__] + except AttributeError: + modules_ = [module_ for module_ in modules.modules] + + for mod_name in modules_: mod_name = _to_snake_case(module_) mod_func = _copy_function(mod_name, str(mod_name)) - mod_func.__doc__ = _build_doc(module_) + mod_func.__doc__ = _build_doc(mod_name) __all__.append(mod_name) globals()[mod_name] = mod_func - if TESTINFRA_PRESENT: _register_functions() diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index 12d9319031..461fb5310d 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -1687,9 +1687,6 @@ def check_perms(path, perms = changes[user]['perms'] try: - log.debug('*' * 68) - log.debug(perms) - log.debug('*' * 68) salt.utils.win_dacl.set_permissions( path, user, perms, 'deny', applies_to) ret['changes']['deny_perms'][user] = changes[user] diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py index d466380d70..f584a0ab94 100644 --- a/salt/modules/win_groupadd.py +++ b/salt/modules/win_groupadd.py @@ -12,6 +12,7 @@ from __future__ import absolute_import # Import salt libs import salt.utils +import salt.utils.win_functions try: @@ -35,10 +36,18 @@ def __virtual__(): return (False, "Module win_groupadd: module only works on Windows systems") -def add(name, gid=None, system=False): +def add(name, **kwargs): ''' Add the specified group + Args: + + name (str): + The name of the group to add + + Returns: + dict: A dictionary of results + CLI Example: .. code-block:: bash @@ -57,29 +66,32 @@ def add(name, gid=None, system=False): compObj = nt.GetObject('', 'WinNT://.,computer') newGroup = compObj.Create('group', name) newGroup.SetInfo() - ret['changes'].append(( - 'Successfully created group {0}' - ).format(name)) + ret['changes'].append('Successfully created group {0}'.format(name)) except pywintypes.com_error as com_err: ret['result'] = False if len(com_err.excepinfo) >= 2: friendly_error = com_err.excepinfo[2].rstrip('\r\n') - ret['comment'] = ( - 'Failed to create group {0}. {1}' - ).format(name, friendly_error) + ret['comment'] = 'Failed to create group {0}. {1}' \ + ''.format(name, friendly_error) else: ret['result'] = None - ret['comment'] = ( - 'The group {0} already exists.' - ).format(name) + ret['comment'] = 'The group {0} already exists.'.format(name) return ret -def delete(name): +def delete(name, **kwargs): ''' Remove the named group + Args: + + name (str): + The name of the group to remove + + Returns: + dict: A dictionary of results + CLI Example: .. code-block:: bash @@ -118,6 +130,14 @@ def info(name): ''' Return information about a group + Args: + + name (str): + The name of the group for which to get information + + Returns: + dict: A dictionary of information about the group + CLI Example: .. code-block:: bash @@ -151,6 +171,17 @@ def getent(refresh=False): ''' Return info on all groups + Args: + + refresh (bool): + Refresh the info for all groups in ``__context__``. If False only + the groups in ``__context__`` wil be returned. If True the + ``__context__`` will be refreshed with current data and returned. + Default is False + + Returns: + A list of groups and their information + CLI Example: .. code-block:: bash @@ -182,16 +213,26 @@ def getent(refresh=False): return ret -def adduser(name, username): +def adduser(name, username, **kwargs): ''' - add a user to a group + Add a user to a group + + Args: + + name (str): + The name of the group to modify + + username (str): + The name of the user to add to the group + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.adduser foo username - ''' ret = {'name': name, @@ -209,7 +250,7 @@ def adduser(name, username): '/', '\\').encode('ascii', 'backslashreplace').lower()) try: - if __fixlocaluser(username.lower()) not in existingMembers: + if salt.utils.win_functions.get_sam_name(username) not in existingMembers: if not __opts__['test']: groupObj.Add('WinNT://' + username.replace('\\', '/')) @@ -231,16 +272,26 @@ def adduser(name, username): return ret -def deluser(name, username): +def deluser(name, username, **kwargs): ''' - remove a user from a group + Remove a user from a group + + Args: + + name (str): + The name of the group to modify + + username (str): + The name of the user to remove from the group + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.deluser foo username - ''' ret = {'name': name, @@ -258,7 +309,7 @@ def deluser(name, username): '/', '\\').encode('ascii', 'backslashreplace').lower()) try: - if __fixlocaluser(username.lower()) in existingMembers: + if salt.utils.win_functions.get_sam_name(username) in existingMembers: if not __opts__['test']: groupObj.Remove('WinNT://' + username.replace('\\', '/')) @@ -280,16 +331,27 @@ def deluser(name, username): return ret -def members(name, members_list): +def members(name, members_list, **kwargs): ''' - remove a user from a group + Ensure a group contains only the members in the list + + Args: + + name (str): + The name of the group to modify + + members_list (str): + A single user or a comma separated list of users. The group will + contain only the users specified in this list. + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.members foo 'user1,user2,user3' - ''' ret = {'name': name, @@ -297,7 +359,7 @@ def members(name, members_list): 'changes': {'Users Added': [], 'Users Removed': []}, 'comment': []} - members_list = [__fixlocaluser(thisMember) for thisMember in members_list.lower().split(",")] + members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")] if not isinstance(members_list, list): ret['result'] = False ret['comment'].append('Members is not a list object') @@ -364,27 +426,26 @@ def members(name, members_list): return ret -def __fixlocaluser(username): - ''' - prefixes a username w/o a backslash with the computername - - i.e. __fixlocaluser('Administrator') would return 'computername\administrator' - ''' - if '\\' not in username: - username = ('{0}\\{1}').format(__salt__['grains.get']('host'), username) - - return username.lower() - - def list_groups(refresh=False): ''' Return a list of groups + Args: + + refresh (bool): + Refresh the info for all groups in ``__context__``. If False only + the groups in ``__context__`` wil be returned. If True, the + ``__context__`` will be refreshed with current data and returned. + Default is False + + Returns: + list: A list of groups on the machine + CLI Example: .. code-block:: bash - salt '*' group.getent + salt '*' group.list_groups ''' if 'group.list_groups' in __context__ and not refresh: return __context__['group.getent'] diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index d0fe647b03..bf52b4f0d5 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -837,6 +837,9 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, # IIS 7.5 and earlier have different syntax for associating a certificate with a site # Modify IP spec to IIS 7.5 format iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") + # win 2008 uses the following format: ip!port and not ip!port! + if iis7path.endswith("!"): + iis7path = iis7path[:-1] ps_cmd = ['New-Item', '-Path', "'{0}'".format(iis7path), @@ -856,7 +859,7 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, new_cert_bindings = list_cert_bindings(site) - if binding_info not in new_cert_bindings(site): + if binding_info not in new_cert_bindings: log.error('Binding not present: {0}'.format(binding_info)) return False @@ -1255,6 +1258,9 @@ def set_container_setting(name, container, settings): salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools' settings="{'managedPipeLineMode': 'Integrated'}" ''' + + identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'} + identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'} ps_cmd = list() container_path = r"IIS:\{0}\{1}".format(container, name) @@ -1281,6 +1287,10 @@ def set_container_setting(name, container, settings): except ValueError: value = "'{0}'".format(settings[setting]) + # Map to numeric to support server 2008 + if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys(): + value = identityType_map2numeric[settings[setting]] + ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(container_path), '-Name', "'{0}'".format(setting), @@ -1300,6 +1310,10 @@ def set_container_setting(name, container, settings): failed_settings = dict() for setting in settings: + # map identity type from numeric to string for comparing + if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): + settings[setting] = identityType_map2string[settings[setting]] + if str(settings[setting]) != str(new_settings[setting]): failed_settings[setting] = settings[setting] diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 6bf5d437d8..2b834227e9 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2835,7 +2835,8 @@ def _findOptionValueInSeceditFile(option): _reader = codecs.open(_tfile, 'r', encoding='utf-16') _secdata = _reader.readlines() _reader.close() - _ret = __salt__['file.remove'](_tfile) + if __salt__['file.file_exists'](_tfile): + _ret = __salt__['file.remove'](_tfile) for _line in _secdata: if _line.startswith(option): return True, _line.split('=')[1].strip() @@ -2856,16 +2857,20 @@ def _importSeceditConfig(infdata): _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-config-{0}.inf'.format(_d)) # make sure our temp files don't already exist - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) # add the inf data to the file, win_file sure could use the write() function _ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.append'](_tInfFile, infdata) # run secedit to make the change _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) # cleanup our temp files - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) return True except Exception as e: log.debug('error occurred while trying to import secedit data') @@ -3503,22 +3508,31 @@ def _checkAllAdmxPolicies(policy_class, not_configured_policies.remove(policy_item) for not_configured_policy in not_configured_policies: - policy_vals[not_configured_policy.attrib['name']] = 'Not Configured' + not_configured_policy_namespace = not_configured_policy.nsmap[not_configured_policy.prefix] + if not_configured_policy_namespace not in policy_vals: + policy_vals[not_configured_policy_namespace] = {} + policy_vals[not_configured_policy_namespace][not_configured_policy.attrib['name']] = 'Not Configured' if return_full_policy_names: - full_names[not_configured_policy.attrib['name']] = _getFullPolicyName( + if not_configured_policy_namespace not in full_names: + full_names[not_configured_policy_namespace] = {} + full_names[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _getFullPolicyName( not_configured_policy, not_configured_policy.attrib['name'], return_full_policy_names, adml_policy_resources) log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name'])) - hierarchy[not_configured_policy.attrib['name']] = _build_parent_list(not_configured_policy, - admx_policy_definitions, - return_full_policy_names, - adml_policy_resources) + if not_configured_policy_namespace not in hierarchy: + hierarchy[not_configured_policy_namespace] = {} + hierarchy[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _build_parent_list( + not_configured_policy, + admx_policy_definitions, + return_full_policy_names, + adml_policy_resources) for admx_policy in admx_policies: this_key = None this_valuename = None this_policyname = None + this_policynamespace = None this_policy_setting = 'Not Configured' element_only_enabled_disabled = True explicit_enable_disable_value_setting = False @@ -3537,6 +3551,7 @@ def _checkAllAdmxPolicies(policy_class, log.error('policy item {0} does not have the required "name" ' 'attribute'.format(admx_policy.attrib)) break + this_policynamespace = admx_policy.nsmap[admx_policy.prefix] if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3548,7 +3563,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3560,21 +3577,27 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if not explicit_enable_disable_value_setting and this_valuename: # the policy has a key/valuename but no explicit enabled/Disabled @@ -3587,7 +3610,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key, this_valuename, 'REG_DWORD', @@ -3596,7 +3621,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ELEMENTS_XPATH(admx_policy): if element_only_enabled_disabled or this_policy_setting == 'Enabled': @@ -3794,65 +3821,84 @@ def _checkAllAdmxPolicies(policy_class, and len(configured_elements.keys()) == len(required_elements.keys()): if policy_disabled_elements == len(required_elements.keys()): log.debug('{0} is disabled by all enum elements'.format(this_policyname)) - policy_vals[this_policyname] = 'Disabled' + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = 'Disabled' else: - policy_vals[this_policyname] = configured_elements + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements log.debug('{0} is enabled by enum elements'.format(this_policyname)) else: if this_policy_setting == 'Enabled': - policy_vals[this_policyname] = configured_elements - if return_full_policy_names and this_policyname in policy_vals: - full_names[this_policyname] = _getFullPolicyName( + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements + if return_full_policy_names and this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in full_names: + full_names[this_policynamespace] = {} + full_names[this_policynamespace][this_policyname] = _getFullPolicyName( admx_policy, admx_policy.attrib['name'], return_full_policy_names, adml_policy_resources) - if this_policyname in policy_vals: - hierarchy[this_policyname] = _build_parent_list(admx_policy, + if this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in hierarchy: + hierarchy[this_policynamespace] = {} + hierarchy[this_policynamespace][this_policyname] = _build_parent_list(admx_policy, admx_policy_definitions, return_full_policy_names, adml_policy_resources) if policy_vals and return_full_policy_names and not hierarchical_return: unpathed_dict = {} pathed_dict = {} - for policy_item in list(policy_vals): - if full_names[policy_item] in policy_vals: - # add this item with the path'd full name - full_path_list = hierarchy[policy_item] + for policy_namespace in list(policy_vals): + for policy_item in list(policy_vals[policy_namespace]): + if full_names[policy_namespace][policy_item] in policy_vals[policy_namespace]: + # add this item with the path'd full name + full_path_list = hierarchy[policy_namespace][policy_item] + full_path_list.reverse() + full_path_list.append(full_names[policy_namespace][policy_item]) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(policy_item) + pathed_dict[full_names[policy_namespace][policy_item]] = True + else: + policy_vals[policy_namespace][full_names[policy_namespace][policy_item]] = policy_vals[policy_namespace].pop(policy_item) + if policy_namespace not in unpathed_dict: + unpathed_dict[policy_namespace] = {} + unpathed_dict[policy_namespace][full_names[policy_namespace][policy_item]] = policy_item + # go back and remove any "unpathed" policies that need a full path + for path_needed in unpathed_dict[policy_namespace]: + # remove the item with the same full name and re-add it w/a path'd version + full_path_list = hierarchy[policy_namespace][unpathed_dict[policy_namespace][path_needed]] full_path_list.reverse() - full_path_list.append(full_names[policy_item]) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(policy_item) - pathed_dict[full_names[policy_item]] = True - else: - policy_vals[full_names[policy_item]] = policy_vals.pop(policy_item) - unpathed_dict[full_names[policy_item]] = policy_item - # go back and remove any "unpathed" policies that need a full path - for path_needed in unpathed_dict: - # remove the item with the same full name and re-add it w/a path'd version - full_path_list = hierarchy[unpathed_dict[path_needed]] - full_path_list.reverse() - full_path_list.append(path_needed) - log.debug('full_path_list == {0}'.format(full_path_list)) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(path_needed) + full_path_list.append(path_needed) + log.debug('full_path_list == {0}'.format(full_path_list)) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(path_needed) + for policy_namespace in list(policy_vals): + if policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) if policy_vals and hierarchical_return: if hierarchy: - for hierarchy_item in hierarchy: - if hierarchy_item in policy_vals: - tdict = {} - first_item = True - for item in hierarchy[hierarchy_item]: - newdict = {} - if first_item: - h_policy_name = hierarchy_item - if return_full_policy_names: - h_policy_name = full_names[hierarchy_item] - newdict[item] = {h_policy_name: policy_vals.pop(hierarchy_item)} - first_item = False - else: - newdict[item] = tdict - tdict = newdict - if tdict: - policy_vals = dictupdate.update(policy_vals, tdict) + for policy_namespace in hierarchy: + for hierarchy_item in hierarchy[policy_namespace]: + if hierarchy_item in policy_vals[policy_namespace]: + tdict = {} + first_item = True + for item in hierarchy[policy_namespace][hierarchy_item]: + newdict = {} + if first_item: + h_policy_name = hierarchy_item + if return_full_policy_names: + h_policy_name = full_names[policy_namespace][hierarchy_item] + newdict[item] = {h_policy_name: policy_vals[policy_namespace].pop(hierarchy_item)} + first_item = False + else: + newdict[item] = tdict + tdict = newdict + if tdict: + policy_vals = dictupdate.update(policy_vals, tdict) + if policy_namespace in policy_vals and policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) policy_vals = { module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: { 'Administrative Templates': policy_vals @@ -3982,78 +4028,77 @@ def _write_regpol_data(data_to_write, gpt_extension_guid: admx registry extension guid for the class ''' try: - if data_to_write: - reg_pol_header = u'\u5250\u6765\x01\x00' - if not os.path.exists(policy_file_path): - ret = __salt__['file.makedirs'](policy_file_path) - with salt.utils.fopen(policy_file_path, 'wb') as pol_file: - if not data_to_write.startswith(reg_pol_header): - pol_file.write(reg_pol_header.encode('utf-16-le')) - pol_file.write(data_to_write.encode('utf-16-le')) - try: - gpt_ini_data = '' - if os.path.exists(gpt_ini_path): - with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file: - gpt_ini_data = gpt_file.read() - if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): - gpt_ini_data = '[General]\r\n' + gpt_ini_data - if _regexSearchRegPolData(r'{0}='.format(re.escape(gpt_extension)), gpt_ini_data): - # ensure the line contains the ADM guid - gpt_ext_loc = re.search(r'^{0}=.*\r\n'.format(re.escape(gpt_extension)), - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - gpt_ext_str = gpt_ini_data[gpt_ext_loc.start():gpt_ext_loc.end()] - if not _regexSearchRegPolData(r'{0}'.format(re.escape(gpt_extension_guid)), - gpt_ext_str): - gpt_ext_str = gpt_ext_str.split('=') - gpt_ext_str[1] = gpt_extension_guid + gpt_ext_str[1] - gpt_ext_str = '='.join(gpt_ext_str) - gpt_ini_data = gpt_ini_data[0:gpt_ext_loc.start()] + gpt_ext_str + gpt_ini_data[gpt_ext_loc.end():] - else: - general_location = re.search(r'^\[General\]\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[general_location.start():general_location.end()], - gpt_extension, gpt_extension_guid, - gpt_ini_data[general_location.end():]) - # https://technet.microsoft.com/en-us/library/cc978247.aspx - if _regexSearchRegPolData(r'Version=', gpt_ini_data): - version_loc = re.search(r'^Version=.*\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - version_str = gpt_ini_data[version_loc.start():version_loc.end()] - version_str = version_str.split('=') - version_nums = struct.unpack('>2H', struct.pack('>I', int(version_str[1]))) - if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): - version_nums = (version_nums[0], version_nums[1] + 1) - elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): - version_nums = (version_nums[0] + 1, version_nums[1]) - version_num = struct.unpack('>I', struct.pack('>2H', *version_nums))[0] - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[0:version_loc.start()], - 'Version', version_num, - gpt_ini_data[version_loc.end():]) - else: - general_location = re.search(r'^\[General\]\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): - version_nums = (0, 1) - elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): - version_nums = (1, 0) - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[general_location.start():general_location.end()], - 'Version', - int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), - gpt_ini_data[general_location.end():]) - if gpt_ini_data: - with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file: - gpt_file.write(gpt_ini_data) - except Exception as e: - msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( - gpt_ini_path, e) - raise CommandExecutionError(msg) + reg_pol_header = u'\u5250\u6765\x01\x00' + if not os.path.exists(policy_file_path): + ret = __salt__['file.makedirs'](policy_file_path) + with salt.utils.fopen(policy_file_path, 'wb') as pol_file: + if not data_to_write.startswith(reg_pol_header): + pol_file.write(reg_pol_header.encode('utf-16-le')) + pol_file.write(data_to_write.encode('utf-16-le')) + try: + gpt_ini_data = '' + if os.path.exists(gpt_ini_path): + with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file: + gpt_ini_data = gpt_file.read() + if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): + gpt_ini_data = '[General]\r\n' + gpt_ini_data + if _regexSearchRegPolData(r'{0}='.format(re.escape(gpt_extension)), gpt_ini_data): + # ensure the line contains the ADM guid + gpt_ext_loc = re.search(r'^{0}=.*\r\n'.format(re.escape(gpt_extension)), + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + gpt_ext_str = gpt_ini_data[gpt_ext_loc.start():gpt_ext_loc.end()] + if not _regexSearchRegPolData(r'{0}'.format(re.escape(gpt_extension_guid)), + gpt_ext_str): + gpt_ext_str = gpt_ext_str.split('=') + gpt_ext_str[1] = gpt_extension_guid + gpt_ext_str[1] + gpt_ext_str = '='.join(gpt_ext_str) + gpt_ini_data = gpt_ini_data[0:gpt_ext_loc.start()] + gpt_ext_str + gpt_ini_data[gpt_ext_loc.end():] + else: + general_location = re.search(r'^\[General\]\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[general_location.start():general_location.end()], + gpt_extension, gpt_extension_guid, + gpt_ini_data[general_location.end():]) + # https://technet.microsoft.com/en-us/library/cc978247.aspx + if _regexSearchRegPolData(r'Version=', gpt_ini_data): + version_loc = re.search(r'^Version=.*\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + version_str = gpt_ini_data[version_loc.start():version_loc.end()] + version_str = version_str.split('=') + version_nums = struct.unpack('>2H', struct.pack('>I', int(version_str[1]))) + if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): + version_nums = (version_nums[0], version_nums[1] + 1) + elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): + version_nums = (version_nums[0] + 1, version_nums[1]) + version_num = struct.unpack('>I', struct.pack('>2H', *version_nums))[0] + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[0:version_loc.start()], + 'Version', version_num, + gpt_ini_data[version_loc.end():]) + else: + general_location = re.search(r'^\[General\]\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): + version_nums = (0, 1) + elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): + version_nums = (1, 0) + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[general_location.start():general_location.end()], + 'Version', + int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), + gpt_ini_data[general_location.end():]) + if gpt_ini_data: + with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file: + gpt_file.write(gpt_ini_data) + except Exception as e: + msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( + gpt_ini_path, e) + raise CommandExecutionError(msg) except Exception as e: msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format(policy_file_path, e) raise CommandExecutionError(msg) @@ -4117,6 +4162,7 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False): def _writeAdminTemplateRegPolFile(admtemplate_data, + admtemplate_namespace_data, admx_policy_definitions=None, adml_policy_resources=None, display_language='en-US', @@ -4133,7 +4179,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - policySearchXpath = etree.XPath('//*[@*[local-name() = "id"] = $id or @*[local-name() = "name"] = $id]') + policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( @@ -4145,298 +4191,303 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, hierarchical_return=False, return_not_configured=False) log.debug('preparing to loop through policies requested to be configured') - for adm_policy in admtemplate_data: - if str(admtemplate_data[adm_policy]).lower() == 'not configured': - if adm_policy in base_policy_settings: - base_policy_settings.pop(adm_policy) - else: - log.debug('adding {0} to base_policy_settings'.format(adm_policy)) - base_policy_settings[adm_policy] = admtemplate_data[adm_policy] - for admPolicy in base_policy_settings: - log.debug('working on admPolicy {0}'.format(admPolicy)) - explicit_enable_disable_value_setting = False - this_key = None - this_valuename = None - if str(base_policy_settings[admPolicy]).lower() == 'disabled': - log.debug('time to disable {0}'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - if DISABLED_VALUE_XPATH(this_policy): - # set the disabled value in the registry.pol file - explicit_enable_disable_value_setting = True - disabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - DISABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if DISABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - disabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - DISABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with disabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + for adm_namespace in admtemplate_data: + for adm_policy in admtemplate_data[adm_namespace]: + if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured': + if adm_policy in base_policy_settings[adm_namespace]: + base_policy_settings[adm_namespace].pop(adm_policy) + else: + log.debug('adding {0} to base_policy_settings'.format(adm_policy)) + if adm_namespace not in base_policy_settings: + base_policy_settings[adm_namespace] = {} + base_policy_settings[adm_namespace][adm_policy] = admtemplate_data[adm_namespace][adm_policy] + for adm_namespace in base_policy_settings: + for admPolicy in base_policy_settings[adm_namespace]: + log.debug('working on admPolicy {0}'.format(admPolicy)) + explicit_enable_disable_value_setting = False + this_key = None + this_valuename = None + if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': + log.debug('time to disable {0}'.format(admPolicy)) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + if DISABLED_VALUE_XPATH(this_policy): + # set the disabled value in the registry.pol file + explicit_enable_disable_value_setting = True + disabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + DISABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - disabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - None, - check_deleted=True) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - log.debug('checking elements of {0}'.format(admPolicy)) - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if etree.QName(child_item).localname == 'boolean' \ - and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - # WARNING: no OOB adm files use true/falseList items - # this has not been fully vetted - temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} - for this_list in temp_dict: - disabled_list_strings = _checkListItem( - child_item, - admPolicy, - child_key, - temp_dict[this_list], - None, - test_items=False) - log.debug('working with {1} portion of {0}'.format( - admPolicy, - this_list)) - existing_data = _policyFileReplaceOrAppendList( - disabled_list_strings, - existing_data) - elif etree.QName(child_item).localname == 'boolean' \ - or etree.QName(child_item).localname == 'decimal' \ - or etree.QName(child_item).localname == 'text' \ - or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText' \ - or etree.QName(child_item).localname == 'enum': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - elif etree.QName(child_item).localname == 'list': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - else: - msg = 'policy {0} was found but it does not appear to be valid for the class {1}' - log.error(msg.format(admPolicy, registry_class)) - else: - msg = 'policy item {0} does not have the requried "class" attribute' - log.error(msg.format(this_policy.attrib)) - else: - log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - - if ENABLED_VALUE_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - ENABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ENABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - ENABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with enabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - enabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - '1', - check_deleted=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if child_item.attrib['id'] in base_policy_settings[admPolicy]: - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - list_strings = [] - if base_policy_settings[admPolicy][child_item.attrib['id']]: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - TRUE_LIST_XPATH, - None, - test_items=False) - log.debug('working with trueList portion of {0}'.format(admPolicy)) - else: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - FALSE_LIST_XPATH, - None, - test_items=False) - existing_data = _policyFileReplaceOrAppendList( - list_strings, - existing_data) - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): - value_string = '' - if base_policy_settings[admPolicy][child_item.attrib['id']]: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - TRUE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - else: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - FALSE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - value_string, - existing_data) + if DISABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + disabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + DISABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with disabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + disabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + None, + check_deleted=True) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + log.debug('checking elements of {0}'.format(admPolicy)) + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] if etree.QName(child_item).localname == 'boolean' \ + and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + # WARNING: no OOB adm files use true/falseList items + # this has not been fully vetted + temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} + for this_list in temp_dict: + disabled_list_strings = _checkListItem( + child_item, + admPolicy, + child_key, + temp_dict[this_list], + None, + test_items=False) + log.debug('working with {1} portion of {0}'.format( + admPolicy, + this_list)) + existing_data = _policyFileReplaceOrAppendList( + disabled_list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ or etree.QName(child_item).localname == 'decimal' \ or etree.QName(child_item).localname == 'text' \ or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + or etree.QName(child_item).localname == 'multiText' \ + or etree.QName(child_item).localname == 'enum': + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, + disabled_value_string, existing_data) - elif etree.QName(child_item).localname == 'enum': - for enum_item in child_item.getchildren(): - if base_policy_settings[admPolicy][child_item.attrib['id']] == \ - _getAdmlDisplayName(adml_policy_resources, - enum_item.attrib['displayName'] - ).strip(): - enabled_value_string = _checkValueItemParent( - enum_item, - child_item.attrib['id'], - child_key, - child_valuename, - VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if VALUE_LIST_XPATH(enum_item): - enabled_list_strings = _checkListItem(enum_item, - admPolicy, - child_key, - VALUE_LIST_XPATH, - None, - test_items=False) - msg = 'working with valueList portion of {0}' - log.debug(msg.format(child_item.attrib['id'])) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - break elif etree.QName(child_item).localname == 'list': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data, - append_only=True) + disabled_value_string, + existing_data) + else: + msg = 'policy {0} was found but it does not appear to be valid for the class {1}' + log.error(msg.format(admPolicy, registry_class)) + else: + msg = 'policy item {0} does not have the requried "class" attribute' + log.error(msg.format(this_policy.attrib)) + else: + log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) + log.debug('found this_policy == {0}'.format(this_policy)) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + + if ENABLED_VALUE_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + ENABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ENABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + ENABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with enabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + enabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + '1', + check_deleted=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] + if child_item.attrib['id'] in base_policy_settings[adm_namespace][admPolicy]: + if etree.QName(child_item).localname == 'boolean' and ( + TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + list_strings = [] + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + TRUE_LIST_XPATH, + None, + test_items=False) + log.debug('working with trueList portion of {0}'.format(admPolicy)) + else: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + FALSE_LIST_XPATH, + None, + test_items=False) + existing_data = _policyFileReplaceOrAppendList( + list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' and ( + TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): + value_string = '' + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + TRUE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + else: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + FALSE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + value_string, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ + or etree.QName(child_item).localname == 'decimal' \ + or etree.QName(child_item).localname == 'text' \ + or etree.QName(child_item).localname == 'longDecimal' \ + or etree.QName(child_item).localname == 'multiText': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + elif etree.QName(child_item).localname == 'enum': + for enum_item in child_item.getchildren(): + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']] == \ + _getAdmlDisplayName(adml_policy_resources, + enum_item.attrib['displayName'] + ).strip(): + enabled_value_string = _checkValueItemParent( + enum_item, + child_item.attrib['id'], + child_key, + child_valuename, + VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if VALUE_LIST_XPATH(enum_item): + enabled_list_strings = _checkListItem(enum_item, + admPolicy, + child_key, + VALUE_LIST_XPATH, + None, + test_items=False) + msg = 'working with valueList portion of {0}' + log.debug(msg.format(child_item.attrib['id'])) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + break + elif etree.QName(child_item).localname == 'list': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data, + append_only=True) _write_regpol_data(existing_data, policy_data.admx_registry_classes[registry_class]['policy_path'], policy_data.gpt_ini_path, @@ -4552,6 +4603,7 @@ def _lookup_admin_template(policy_name, if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( display_language=adml_language) + admx_search_results = [] admx_search_results = ADMX_SEARCH_XPATH(admx_policy_definitions, policy_name=policy_name, registry_class=policy_class) @@ -4591,11 +4643,42 @@ def _lookup_admin_template(policy_name, if adml_search_results: multiple_adml_entries = False suggested_policies = '' + adml_to_remove = [] if len(adml_search_results) > 1: multiple_adml_entries = True for adml_search_result in adml_search_results: if not getattr(adml_search_result, 'text', '').strip() == policy_name: - adml_search_results.remove(adml_search_result) + adml_to_remove.append(adml_search_result) + else: + if hierarchy: + display_name_searchval = '$({0}.{1})'.format( + adml_search_result.tag.split('}')[1], + adml_search_result.attrib['id']) + #policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( + policy_search_string = '//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]'.format( + adml_search_result.prefix, + display_name_searchval, + policy_class) + admx_results = [] + admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) + for search_result in admx_search_results: + log.debug('policy_name == {0}'.format(policy_name)) + this_hierarchy = _build_parent_list(search_result, + admx_policy_definitions, + True, + adml_policy_resources) + this_hierarchy.reverse() + if hierarchy != this_hierarchy: + adml_to_remove.append(adml_search_result) + else: + admx_results.append(search_result) + if len(admx_results) == 1: + admx_search_results = admx_results + for adml in adml_to_remove: + if adml in adml_search_results: + adml_search_results.remove(adml) + if len(adml_search_results) == 1 and multiple_adml_entries: + multiple_adml_entries = False for adml_search_result in adml_search_results: dmsg = 'found an ADML entry matching the string! {0} -- {1}' log.debug(dmsg.format(adml_search_result.tag, @@ -4604,10 +4687,11 @@ def _lookup_admin_template(policy_name, adml_search_result.tag.split('}')[1], adml_search_result.attrib['id']) log.debug('searching for displayName == {0}'.format(display_name_searchval)) - admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( - admx_policy_definitions, - display_name=display_name_searchval, - registry_class=policy_class) + if not admx_search_results: + admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( + admx_policy_definitions, + display_name=display_name_searchval, + registry_class=policy_class) if admx_search_results: if len(admx_search_results) == 1 or hierarchy and not multiple_adml_entries: found = False @@ -4619,6 +4703,7 @@ def _lookup_admin_template(policy_name, True, adml_policy_resources) this_hierarchy.reverse() + log.debug('testing {0} == {1}'.format(hierarchy, this_hierarchy)) if hierarchy == this_hierarchy: found = True else: @@ -5077,6 +5162,7 @@ def set_(computer_policy=None, user_policy=None, if policies[p_class]: for policy_name in policies[p_class]: _pol = None + policy_namespace = None policy_key_name = policy_name if policy_name in _policydata.policies[p_class]['policies']: _pol = _policydata.policies[p_class]['policies'][policy_name] @@ -5126,16 +5212,19 @@ def set_(computer_policy=None, user_policy=None, adml_policy_resources=admlPolicyResources) if success: policy_name = the_policy.attrib['name'] - _admTemplateData[policy_name] = _value + policy_namespace = the_policy.nsmap[the_policy.prefix] + if policy_namespace not in _admTemplateData: + _admTemplateData[policy_namespace] = {} + _admTemplateData[policy_namespace][policy_name] = _value else: raise SaltInvocationError(msg) - if policy_name in _admTemplateData and the_policy is not None: - log.debug('setting == {0}'.format(_admTemplateData[policy_name]).lower()) - log.debug('{0}'.format(str(_admTemplateData[policy_name]).lower())) - if str(_admTemplateData[policy_name]).lower() != 'disabled' \ - and str(_admTemplateData[policy_name]).lower() != 'not configured': + if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None: + log.debug('setting == {0}'.format(_admTemplateData[policy_namespace][policy_name]).lower()) + log.debug('{0}'.format(str(_admTemplateData[policy_namespace][policy_name]).lower())) + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \ + and str(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured': if ELEMENTS_XPATH(the_policy): - if isinstance(_admTemplateData[policy_name], dict): + if isinstance(_admTemplateData[policy_namespace][policy_name], dict): for elements_item in ELEMENTS_XPATH(the_policy): for child_item in elements_item.getchildren(): # check each element @@ -5146,9 +5235,9 @@ def set_(computer_policy=None, user_policy=None, True, admlPolicyResources) log.debug('id attribute == "{0}" this_element_name == "{1}"'.format(child_item.attrib['id'], this_element_name)) - if this_element_name in _admTemplateData[policy_name]: + if this_element_name in _admTemplateData[policy_namespace][policy_name]: temp_element_name = this_element_name - elif child_item.attrib['id'] in _admTemplateData[policy_name]: + elif child_item.attrib['id'] in _admTemplateData[policy_namespace][policy_name]: temp_element_name = child_item.attrib['id'] else: msg = ('Element "{0}" must be included' @@ -5156,12 +5245,12 @@ def set_(computer_policy=None, user_policy=None, raise SaltInvocationError(msg.format(this_element_name, policy_name)) if 'required' in child_item.attrib \ and child_item.attrib['required'].lower() == 'true': - if not _admTemplateData[policy_name][temp_element_name]: + if not _admTemplateData[policy_namespace][policy_name][temp_element_name]: msg = 'Element "{0}" requires a value to be specified' raise SaltInvocationError(msg.format(temp_element_name)) if etree.QName(child_item).localname == 'boolean': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], bool): msg = 'Element {0} requires a boolean True or False' raise SaltInvocationError(msg.format(temp_element_name)) @@ -5173,9 +5262,9 @@ def set_(computer_policy=None, user_policy=None, min_val = int(child_item.attrib['minValue']) if 'maxValue' in child_item.attrib: max_val = int(child_item.attrib['maxValue']) - if int(_admTemplateData[policy_name][temp_element_name]) \ + if int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ < min_val or \ - int(_admTemplateData[policy_name][temp_element_name]) \ + int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ > max_val: msg = 'Element "{0}" value must be between {1} and {2}' raise SaltInvocationError(msg.format(temp_element_name, @@ -5185,7 +5274,7 @@ def set_(computer_policy=None, user_policy=None, # make sure the value is in the enumeration found = False for enum_item in child_item.getchildren(): - if _admTemplateData[policy_name][temp_element_name] == \ + if _admTemplateData[policy_namespace][policy_name][temp_element_name] == \ _getAdmlDisplayName( admlPolicyResources, enum_item.attrib['displayName']).strip(): @@ -5199,33 +5288,33 @@ def set_(computer_policy=None, user_policy=None, and child_item.attrib['explicitValue'].lower() == \ 'true': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], dict): msg = ('Each list item of element "{0}" ' 'requires a dict value') msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif etree.QName(child_item).localname == 'multiText': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) - _admTemplateData[policy_name][child_item.attrib['id']] = \ - _admTemplateData[policy_name].pop(temp_element_name) + _admTemplateData[policy_namespace][policy_name][child_item.attrib['id']] = \ + _admTemplateData[policy_namespace][policy_name].pop(temp_element_name) else: msg = 'The policy "{0}" has elements which must be configured' msg = msg.format(policy_name) raise SaltInvocationError(msg) else: - if str(_admTemplateData[policy_name]).lower() != 'enabled': + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'enabled': msg = ('The policy {0} must either be "Enabled", ' '"Disabled", or "Not Configured"') msg = msg.format(policy_name) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 510776e318..d3434cc2b7 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -945,27 +945,63 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): packages listed under ``pkgs`` will be installed via a single command. + You can specify a version by passing the item as a dict: + + CLI Example: + + .. code-block:: bash + + # will install the latest version of foo and bar + salt '*' pkg.install pkgs='["foo", "bar"]' + + # will install the latest version of foo and version 1.2.3 of bar + salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]' + Kwargs: version (str): - The specific version to install. If omitted, the latest version - will be installed. If passed with multiple install, the version - will apply to all packages. Recommended for single installation - only. + The specific version to install. If omitted, the latest version will + be installed. Recommend for use when installing a single package. + + If passed with a list of packages in the ``pkgs`` parameter, the + version will be ignored. + + CLI Example: + + .. code-block:: bash + + # Version is ignored + salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3 + + If passed with a comma separated list in the ``name`` parameter, the + version will apply to all packages in the list. + + CLI Example: + + .. code-block:: bash + + # Version 1.2.3 will apply to packages foo and bar + salt '*' pkg.install foo,bar version=1.2.3 cache_file (str): - A single file to copy down for use with the installer. Copied to - the same location as the installer. Use this over ``cache_dir`` if - there are many files in the directory and you only need a specific - file and don't want to cache additional files that may reside in - the installer directory. Only applies to files on ``salt://`` + A single file to copy down for use with the installer. Copied to the + same location as the installer. Use this over ``cache_dir`` if there + are many files in the directory and you only need a specific file + and don't want to cache additional files that may reside in the + installer directory. Only applies to files on ``salt://`` cache_dir (bool): True will copy the contents of the installer directory. This is - useful for installations that are not a single file. Only applies - to directories on ``salt://`` + useful for installations that are not a single file. Only applies to + directories on ``salt://`` - saltenv (str): Salt environment. Default 'base' + extra_install_flags (str): + Additional install flags that will be appended to the + ``install_flags`` defined in the software definition file. Only + applies when single package is passed. + + saltenv (str): + Salt environment. Default 'base' report_reboot_exit_codes (bool): If the installer exits with a recognized exit code indicating that @@ -1061,13 +1097,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # "sources" argument pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] + if len(pkg_params) > 1: + if kwargs.get('extra_install_flags') is not None: + log.warning('\'extra_install_flags\' argument will be ignored for ' + 'multiple package targets') + + # Windows expects an Options dictionary containing 'version' + for pkg in pkg_params: + pkg_params[pkg] = {'version': pkg_params[pkg]} + if pkg_params is None or len(pkg_params) == 0: log.error('No package definition found') return {} if not pkgs and len(pkg_params) == 1: - # Only use the 'version' param if 'name' was not specified as a - # comma-separated list + # Only use the 'version' param if a single item was passed to the 'name' + # parameter pkg_params = { name: { 'version': kwargs.get('version'), @@ -1092,11 +1137,15 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name) continue - # Get the version number passed or the latest available + # Get the version number passed or the latest available (must be a string) version_num = '' if options: - if options.get('version') is not None: - version_num = str(options.get('version')) + version_num = options.get('version', '') + # Using the salt cmdline with version=5.3 might be interpreted + # as a float it must be converted to a string in order for + # string matching to work. + if not isinstance(version_num, six.string_types) and version_num is not None: + version_num = str(version_num) if not version_num: version_num = _get_latest_pkg_version(pkginfo) @@ -1232,21 +1281,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): #Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) + # Build cmd and arguments + # cmd and arguments must be separated for use with the task scheduler + if use_msiexec: + cmd = msiexec + arguments = ['/i', cached_pkg] + if pkginfo['version_num'].get('allusers', True): + arguments.append('ALLUSERS="1"') + arguments.extend(salt.utils.shlex_split(install_flags)) + else: + cmd = cached_pkg + arguments = salt.utils.shlex_split(install_flags) + # Install the software # Check Use Scheduler Option if pkginfo[version_num].get('use_scheduler', False): - # Build Scheduled Task Parameters - if use_msiexec: - cmd = msiexec - arguments = ['/i', cached_pkg] - if pkginfo['version_num'].get('allusers', True): - arguments.append('ALLUSERS="1"') - arguments.extend(salt.utils.shlex_split(install_flags)) - else: - cmd = cached_pkg - arguments = salt.utils.shlex_split(install_flags) - # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', @@ -1260,21 +1310,46 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): start_time='01:00', ac_only=False, stop_if_on_batteries=False) + # Run Scheduled Task - if not __salt__['task.run_wait'](name='update-salt-software'): - log.error('Failed to install {0}'.format(pkg_name)) - log.error('Scheduled Task failed to run') - ret[pkg_name] = {'install status': 'failed'} - else: - # Build the install command - cmd = [] - if use_msiexec: - cmd.extend([msiexec, '/i', cached_pkg]) - if pkginfo[version_num].get('allusers', True): - cmd.append('ALLUSERS="1"') + # Special handling for installing salt + if re.search(r'salt[\s_.-]*minion', + pkg_name, + flags=re.IGNORECASE + re.UNICODE) is not None: + ret[pkg_name] = {'install status': 'task started'} + if not __salt__['task.run'](name='update-salt-software'): + log.error('Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + else: + + # Make sure the task is running, try for 5 secs + from time import time + t_end = time() + 5 + while time() < t_end: + task_running = __salt__['task.status']( + 'update-salt-software') == 'Running' + if task_running: + break + + if not task_running: + log.error( + 'Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + + # All other packages run with task scheduler else: - cmd.append(cached_pkg) - cmd.extend(salt.utils.shlex_split(install_flags)) + if not __salt__['task.run_wait'](name='update-salt-software'): + log.error('Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + else: + + # Combine cmd and arguments + cmd = [cmd] + cmd.extend(arguments) + # Launch the command result = __salt__['cmd.run_all'](cmd, cache_path, @@ -1423,6 +1498,11 @@ def remove(name=None, pkgs=None, version=None, **kwargs): continue if version_num is not None: + # Using the salt cmdline with version=5.3 might be interpreted + # as a float it must be converted to a string in order for + # string matching to work. + if not isinstance(version_num, six.string_types) and version_num is not None: + version_num = str(version_num) if version_num not in pkginfo and 'latest' in pkginfo: version_num = 'latest' elif 'latest' in pkginfo: diff --git a/salt/modules/win_pki.py b/salt/modules/win_pki.py index 5b046b364a..ca17b6d626 100644 --- a/salt/modules/win_pki.py +++ b/salt/modules/win_pki.py @@ -1,9 +1,18 @@ # -*- coding: utf-8 -*- ''' -Microsoft certificate management via the Pki PowerShell module. +Microsoft certificate management via the PKI Client PowerShell module. +https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient + +The PKI Client PowerShell module is only available on Windows 8+ and Windows +Server 2012+. +https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows +:depends: + - PowerShell 4 + - PKI Client Module (Windows 8+ / Windows Server 2012+) + .. versionadded:: 2016.11.0 ''' # Import python libs @@ -29,11 +38,17 @@ __virtualname__ = 'win_pki' def __virtual__(): ''' - Only works on Windows systems with the PKI PowerShell module installed. + Requires Windows + Requires Windows 8+ / Windows Server 2012+ + Requires PowerShell + Requires PKI Client PowerShell module installed. ''' if not salt.utils.is_windows(): return False, 'Only available on Windows Systems' + if salt.utils.version_cmp(__grains__['osversion'], '6.2.9200') == -1: + return False, 'Only available on Windows 8+ / Windows Server 2012 +' + if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' diff --git a/salt/modules/win_snmp.py b/salt/modules/win_snmp.py index 15653a7510..6fd3bd515f 100644 --- a/salt/modules/win_snmp.py +++ b/salt/modules/win_snmp.py @@ -303,6 +303,11 @@ def get_community_names(): # Windows SNMP service GUI. if isinstance(current_values, list): for current_value in current_values: + + # Ignore error values + if not isinstance(current_value, dict): + continue + permissions = str() for permission_name in _PERMISSION_TYPES: if current_value['vdata'] == _PERMISSION_TYPES[permission_name]: diff --git a/salt/modules/win_system.py b/salt/modules/win_system.py index a92f0923e5..993ab39830 100644 --- a/salt/modules/win_system.py +++ b/salt/modules/win_system.py @@ -37,6 +37,7 @@ except ImportError: import salt.utils import salt.utils.locales import salt.ext.six as six +from salt.exceptions import CommandExecutionError # Set up logging log = logging.getLogger(__name__) @@ -622,7 +623,11 @@ def join_domain(domain, .. versionadded:: 2015.8.2/2015.5.7 Returns: - dict: Dictionary if successful, otherwise False + dict: Dictionary if successful + + Raises: + CommandExecutionError: Raises an error if _join_domain returns anything + other than 0 CLI Example: @@ -655,6 +660,56 @@ def join_domain(domain, account_ou = account_ou.split('\\') account_ou = ''.join(account_ou) + err = _join_domain(domain=domain, username=username, password=password, + account_ou=account_ou, account_exists=account_exists) + + if not err: + ret = {'Domain': domain, + 'Restart': False} + if restart: + ret['Restart'] = reboot() + return ret + + raise CommandExecutionError(win32api.FormatMessage(err).rstrip()) + + +def _join_domain(domain, + username=None, + password=None, + account_ou=None, + account_exists=False): + ''' + Helper function to join the domain. + + Args: + domain (str): The domain to which the computer should be joined, e.g. + ``example.com`` + + username (str): Username of an account which is authorized to join + computers to the specified domain. Need to be either fully qualified + like ``user@domain.tld`` or simply ``user`` + + password (str): Password of the specified user + + account_ou (str): The DN of the OU below which the account for this + computer should be created when joining the domain, e.g. + ``ou=computers,ou=departm_432,dc=my-company,dc=com`` + + account_exists (bool): If set to ``True`` the computer will only join + the domain if the account already exists. If set to ``False`` the + computer account will be created if it does not exist, otherwise it + will use the existing account. Default is False. + + Returns: + int: + + :param domain: + :param username: + :param password: + :param account_ou: + :param account_exists: + :return: + ''' NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name @@ -670,23 +725,13 @@ def join_domain(domain, pythoncom.CoInitialize() conn = wmi.WMI() comp = conn.Win32_ComputerSystem()[0] - err = comp.JoinDomainOrWorkgroup(Name=domain, - Password=password, - UserName=username, - AccountOU=account_ou, - FJoinOptions=join_options) - # you have to do this because JoinDomainOrWorkgroup returns a strangely - # formatted value that looks like (0,) - if not err[0]: - ret = {'Domain': domain, - 'Restart': False} - if restart: - ret['Restart'] = reboot() - return ret - - log.error(win32api.FormatMessage(err[0]).rstrip()) - return False + # Return the results of the command as an error + # JoinDomainOrWorkgroup returns a strangely formatted value that looks like + # (0,) so return the first item + return comp.JoinDomainOrWorkgroup( + Name=domain, Password=password, UserName=username, AccountOU=account_ou, + FJoinOptions=join_options)[0] def unjoin_domain(username=None, @@ -919,7 +964,11 @@ def set_system_date_time(years=None, seconds (int): Seconds digit: 0 - 59 Returns: - bool: True if successful, otherwise False. + bool: True if successful + + Raises: + CommandExecutionError: Raises an error if ``SetLocalTime`` function + fails CLI Example: @@ -972,12 +1021,15 @@ def set_system_date_time(years=None, system_time.wSecond = int(seconds) system_time_ptr = ctypes.pointer(system_time) succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr) - return succeeded is not 0 - except OSError: + if succeeded is not 0: + return True + else: + log.error('Failed to set local time') + raise CommandExecutionError( + win32api.FormatMessage(succeeded).rstrip()) + except OSError as err: log.error('Failed to set local time') - return False - - return True + raise CommandExecutionError(err) def get_system_date(): diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 22285223e4..e3179db4b4 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -1259,7 +1259,7 @@ def status(name, location='\\'): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() - # get the folder to delete the folder from + # get the folder where the task is defined task_folder = task_service.GetFolder(location) task = task_folder.GetTask(name) diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py index 638452f2ae..237fb74924 100644 --- a/salt/modules/win_wua.py +++ b/salt/modules/win_wua.py @@ -2,6 +2,49 @@ ''' Module for managing Windows Updates using the Windows Update Agent. +List updates on the system using the following functions: + +- :ref:`available` +- :ref:`list` + +This is an easy way to find additional information about updates available to +to the system, such as the GUID, KB number, or description. + +Once you have the GUID or a KB number for the update you can get information +about the update, download, install, or uninstall it using these functions: + +- :ref:`get` +- :ref:`download` +- :ref:`install` +- :ref:`uninstall` + +The get function expects a name in the form of a GUID, KB, or Title and should +return information about a single update. The other functions accept either a +single item or a list of items for downloading/installing/uninstalling a +specific list of items. + +The :ref:`list` and :ref:`get` functions are utility functions. In addition to +returning information about updates they can also download and install updates +by setting ``download=True`` or ``install=True``. So, with :ref:`list` for +example, you could run the function with the filters you want to see what is +available. Then just add ``install=True`` to install everything on that list. + +If you want to download, install, or uninstall specific updates, use +:ref:`download`, :ref:`install`, or :ref:`uninstall`. To update your system +with the latest updates use :ref:`list` and set ``install=True`` + +You can also adjust the Windows Update settings using the :ref:`set_wu_settings` +function. This function is only supported on the following operating systems: + +- Windows Vista / Server 2008 +- Windows 7 / Server 2008R2 +- Windows 8 / Server 2012 +- Windows 8.1 / Server 2012R2 + +As of Windows 10 and Windows Server 2016, the ability to modify the Windows +Update settings has been restricted. The settings can be modified in the Local +Group Policy using the ``lgpo`` module. + .. versionadded:: 2015.8.0 :depends: @@ -54,36 +97,40 @@ def available(software=True, skip_mandatory=False, skip_reboot=False, categories=None, - severities=None, - ): + severities=None,): ''' .. versionadded:: 2017.7.0 - List updates that match the passed criteria. + List updates that match the passed criteria. This allows for more filter + options than :func:`list`. Good for finding a specific GUID or KB. Args: - software (bool): Include software updates in the results (default is - True) + software (bool): + Include software updates in the results (default is True) - drivers (bool): Include driver updates in the results (default is False) + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip updates that are already installed. Default - is False. + skip_installed (bool): + Skip updates that are already installed. Default is False. - skip_hidden (bool): Skip updates that have been hidden. Default is True. + skip_hidden (bool): + Skip updates that have been hidden. Default is True. - skip_mandatory (bool): Skip mandatory updates. Default is False. + skip_mandatory (bool): + Skip mandatory updates. Default is False. - skip_reboot (bool): Skip updates that require a reboot. Default is - False. + skip_reboot (bool): + Skip updates that require a reboot. Default is False. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -101,8 +148,9 @@ def available(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -152,28 +200,30 @@ def available(software=True, salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers - salt '*' win_wua.available categories=['Critical Updates','Drivers'] + salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates - salt '*' win_wua.available categories=['Security Updates'] severities=['Critical'] + salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical - salt '*' win_wua.available severities=['Critical'] + salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates - salt '*' win_wua.available categories=['Feature Packs','Windows 8.1'] summary=True + salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() # Look for available - updates = wua.available(skip_hidden, skip_installed, skip_mandatory, - skip_reboot, software, drivers, categories, - severities) + updates = wua.available( + skip_hidden=skip_hidden, skip_installed=skip_installed, + skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, + software=software, drivers=drivers, categories=categories, + severities=severities) # Return results as Summary or Details return updates.summary() if summary else updates.list() @@ -183,23 +233,29 @@ def list_update(name, download=False, install=False): ''' .. deprecated:: 2017.7.0 Use :func:`get` instead + Returns details for all updates that match the search criteria Args: - name (str): The name of the update you're searching for. This can be the - GUID, a KB number, or any part of the name of the update. GUIDs and - KBs are preferred. Run ``list_updates`` to get the GUID for the update - you're looking for. - download (bool): Download the update returned by this function. Run this - function first to see if the update exists, then set ``download=True`` - to download the update. + name (str): + The name of the update you're searching for. This can be the GUID, a + KB number, or any part of the name of the update. GUIDs and KBs are + preferred. Run ``list_updates`` to get the GUID for the update + you're looking for. - install (bool): Install the update returned by this function. Run this - function first to see if the update exists, then set ``install=True`` to - install the update. + download (bool): + Download the update returned by this function. Run this function + first to see if the update exists, then set ``download=True`` to + download the update. + + install (bool): + Install the update returned by this function. Run this function + first to see if the update exists, then set ``install=True`` to + install the update. Returns: + dict: Returns a dict containing a list of updates that match the name if download and install are both set to False. Should usually be a single update, but can return multiple if a partial name is given. @@ -258,23 +314,28 @@ def get(name, download=False, install=False): ''' .. versionadded:: 2017.7.0 - Returns details for all updates that match the search criteria + Returns details for the named update Args: - name (str): The name of the update you're searching for. This can be the - GUID, a KB number, or any part of the name of the update. GUIDs and - KBs are preferred. Run ``list`` to get the GUID for the update - you're looking for. - download (bool): Download the update returned by this function. Run this - function first to see if the update exists, then set ``download=True`` - to download the update. + name (str): + The name of the update you're searching for. This can be the GUID, a + KB number, or any part of the name of the update. GUIDs and KBs are + preferred. Run ``list`` to get the GUID for the update you're + looking for. - install (bool): Install the update returned by this function. Run this - function first to see if the update exists, then set ``install=True`` to - install the update. + download (bool): + Download the update returned by this function. Run this function + first to see if the update exists, then set ``download=True`` to + download the update. + + install (bool): + Install the update returned by this function. Run this function + first to see if the update exists, then set ``install=True`` to + install the update. Returns: + dict: Returns a dict containing a list of updates that match the name if download and install are both set to False. Should usually be a single update, but can return multiple if a partial name is given. @@ -357,30 +418,35 @@ def list_updates(software=True, install is True the same list will be downloaded and/or installed. Args: - software (bool): Include software updates in the results (default is - True) - drivers (bool): Include driver updates in the results (default is False) + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip installed updates in the results (default is - False) + skip_installed (bool): + Skip installed updates in the results (default is False) - download (bool): (Overrides reporting functionality) Download the list - of updates returned by this function. Run this function first with - ``download=False`` to see what will be downloaded, then set - ``download=True`` to download the updates. + download (bool): + (Overrides reporting functionality) Download the list of updates + returned by this function. Run this function first with + ``download=False`` to see what will be downloaded, then set + ``download=True`` to download the updates. - install (bool): (Overrides reporting functionality) Install the list of - updates returned by this function. Run this function first with - ``install=False`` to see what will be installed, then set - ``install=True`` to install the updates. + install (bool): + (Overrides reporting functionality) Install the list of updates + returned by this function. Run this function first with + ``install=False`` to see what will be installed, then set + ``install=True`` to install the updates. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -398,8 +464,9 @@ def list_updates(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -486,30 +553,35 @@ def list(software=True, install is True the same list will be downloaded and/or installed. Args: - software (bool): Include software updates in the results (default is - True) - drivers (bool): Include driver updates in the results (default is False) + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip installed updates in the results (default is - False) + skip_installed (bool): + Skip installed updates in the results (default is False) - download (bool): (Overrides reporting functionality) Download the list - of updates returned by this function. Run this function first with - ``download=False`` to see what will be downloaded, then set - ``download=True`` to download the updates. + download (bool): + (Overrides reporting functionality) Download the list of updates + returned by this function. Run this function first with + ``download=False`` to see what will be downloaded, then set + ``download=True`` to download the updates. - install (bool): (Overrides reporting functionality) Install the list of - updates returned by this function. Run this function first with - ``install=False`` to see what will be installed, then set - ``install=True`` to install the updates. + install (bool): + (Overrides reporting functionality) Install the list of updates + returned by this function. Run this function first with + ``install=False`` to see what will be installed, then set + ``install=True`` to install the updates. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -527,8 +599,9 @@ def list(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -575,22 +648,22 @@ def list(software=True, .. code-block:: bash # Normal Usage (list all software updates) - salt '*' win_wua.list_updates + salt '*' win_wua.list # List all updates with categories of Critical Updates and Drivers - salt '*' win_wua.list_updates categories=['Critical Updates','Drivers'] + salt '*' win_wua.list categories=['Critical Updates','Drivers'] # List all Critical Security Updates - salt '*' win_wua.list_updates categories=['Security Updates'] severities=['Critical'] + salt '*' win_wua.list categories=['Security Updates'] severities=['Critical'] # List all updates with a severity of Critical - salt '*' win_wua.list_updates severities=['Critical'] + salt '*' win_wua.list severities=['Critical'] # A summary of all available updates - salt '*' win_wua.list_updates summary=True + salt '*' win_wua.list summary=True # A summary of all Feature Packs and Windows 8.1 Updates - salt '*' win_wua.list_updates categories=['Feature Packs','Windows 8.1'] summary=True + salt '*' win_wua.list categories=['Feature Packs','Windows 8.1'] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -604,11 +677,11 @@ def list(software=True, # Download if download or install: - ret['Download'] = wua.download(updates.updates) + ret['Download'] = wua.download(updates) # Install if install: - ret['Install'] = wua.install(updates.updates) + ret['Install'] = wua.install(updates) if not ret: return updates.summary() if summary else updates.list() @@ -625,13 +698,16 @@ def download_update(name): Args: - name (str): The name of the update to download. This can be a GUID, a KB - number, or any part of the name. To ensure a single item is matched the - GUID is preferred. + name (str): + The name of the update to download. This can be a GUID, a KB number, + or any part of the name. To ensure a single item is matched the GUID + is preferred. - .. note:: If more than one result is returned an error will be raised. + .. note:: + If more than one result is returned an error will be raised. Returns: + dict: A dictionary containing the results of the download CLI Examples: @@ -641,7 +717,6 @@ def download_update(name): salt '*' win_wua.download_update 12345678-abcd-1234-abcd-1234567890ab salt '*' win_wua.download_update KB12312321 - ''' salt.utils.warn_until( 'Fluorine', @@ -660,8 +735,9 @@ def download_updates(names): Args: - names (list): A list of updates to download. This can be any combination - of GUIDs, KB numbers, or names. GUIDs or KBs are preferred. + names (list): + A list of updates to download. This can be any combination of GUIDs, + KB numbers, or names. GUIDs or KBs are preferred. Returns: @@ -672,7 +748,7 @@ def download_updates(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.download guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] + salt '*' win_wua.download_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] ''' salt.utils.warn_until( 'Fluorine', @@ -690,9 +766,14 @@ def download(names): Args: - names (str, list): A single update or a list of updates to download. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to download. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. + + .. note:: + An error will be raised if there are more results than there are items + in the names parameter Returns: @@ -703,7 +784,7 @@ def download(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.download guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] + salt '*' win_wua.download names=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -714,6 +795,13 @@ def download(names): if updates.count() == 0: raise CommandExecutionError('No updates found') + # Make sure it's a list so count comparison is correct + if isinstance(names, six.string_types): + names = [names] + + if isinstance(names, six.integer_types): + names = [str(names)] + if updates.count() > len(names): raise CommandExecutionError('Multiple updates found, names need to be ' 'more specific') @@ -734,10 +822,12 @@ def install_update(name): number, or any part of the name. To ensure a single item is matched the GUID is preferred. - .. note:: If no results or more than one result is returned an error - will be raised. + .. note:: + If no results or more than one result is returned an error will be + raised. Returns: + dict: A dictionary containing the results of the install CLI Examples: @@ -795,9 +885,14 @@ def install(names): Args: - names (str, list): A single update or a list of updates to install. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to install. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. + + .. note:: + An error will be raised if there are more results than there are items + in the names parameter Returns: @@ -808,7 +903,7 @@ def install(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.install_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB12323211'] + salt '*' win_wua.install KB12323211 ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -819,6 +914,13 @@ def install(names): if updates.count() == 0: raise CommandExecutionError('No updates found') + # Make sure it's a list so count comparison is correct + if isinstance(names, six.string_types): + names = [names] + + if isinstance(names, six.integer_types): + names = [str(names)] + if updates.count() > len(names): raise CommandExecutionError('Multiple updates found, names need to be ' 'more specific') @@ -834,9 +936,10 @@ def uninstall(names): Args: - names (str, list): A single update or a list of updates to uninstall. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to uninstall. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. Returns: @@ -875,33 +978,50 @@ def set_wu_settings(level=None, Change Windows Update settings. If no parameters are passed, the current value will be returned. - :param int level: - Number from 1 to 4 indicating the update level: + Supported: + - Windows Vista / Server 2008 + - Windows 7 / Server 2008R2 + - Windows 8 / Server 2012 + - Windows 8.1 / Server 2012R2 + + .. note: + Microsoft began using the Unified Update Platform (UUP) starting with + Windows 10 / Server 2016. The Windows Update settings have changed and + the ability to 'Save' Windows Update settings has been removed. Windows + Update settings are read-only. See MSDN documentation: + https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx + + Args: + + level (int): + Number from 1 to 4 indicating the update level: + 1. Never check for updates 2. Check for updates but let me choose whether to download and install them 3. Download updates but let me choose whether to install them 4. Install updates automatically - :param bool recommended: - Boolean value that indicates whether to include optional or recommended - updates when a search for updates and installation of updates is - performed. - :param bool featured: - Boolean value that indicates whether to display notifications for - featured updates. + recommended (bool): + Boolean value that indicates whether to include optional or + recommended updates when a search for updates and installation of + updates is performed. - :param bool elevated: - Boolean value that indicates whether non-administrators can perform some - update-related actions without administrator approval. + featured (bool): + Boolean value that indicates whether to display notifications for + featured updates. - :param bool msupdate: - Boolean value that indicates whether to turn on Microsoft Update for - other Microsoft products + elevated (bool): + Boolean value that indicates whether non-administrators can perform + some update-related actions without administrator approval. + + msupdate (bool): + Boolean value that indicates whether to turn on Microsoft Update for + other Microsoft products + + day (str): + Days of the week on which Automatic Updates installs or uninstalls + updates. Accepted values: - :param str day: - Days of the week on which Automatic Updates installs or uninstalls - updates. - Accepted values: - Everyday - Monday - Tuesday @@ -910,21 +1030,43 @@ def set_wu_settings(level=None, - Friday - Saturday - :param str time: - Time at which Automatic Updates installs or uninstalls updates. Must be - in the ##:## 24hr format, eg. 3:00 PM would be 15:00 + time (str): + Time at which Automatic Updates installs or uninstalls updates. Must + be in the ##:## 24hr format, eg. 3:00 PM would be 15:00. Must be in + 1 hour increments. - :return: Returns a dictionary containing the results. + Returns: + + dict: Returns a dictionary containing the results. CLI Examples: .. code-block:: bash salt '*' win_wua.set_wu_settings level=4 recommended=True featured=False - ''' - ret = {} - ret['Success'] = True + # The AutomaticUpdateSettings.Save() method used in this function does not + # work on Windows 10 / Server 2016. It is called in throughout this function + # like this: + # + # obj_au = win32com.client.Dispatch('Microsoft.Update.AutoUpdate') + # obj_au_settings = obj_au.Settings + # obj_au_settings.Save() + # + # The `Save()` method reports success but doesn't actually change anything. + # Windows Update settings are read-only in Windows 10 / Server 2016. There's + # a little blurb on MSDN that mentions this, but gives no alternative for + # changing these settings in Windows 10 / Server 2016. + # + # https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx + # + # Apparently the Windows Update framework in Windows Vista - Windows 8.1 has + # been changed quite a bit in Windows 10 / Server 2016. It is now called the + # Unified Update Platform (UUP). I haven't found an API or a Powershell + # commandlet for working with the the UUP. Perhaps there will be something + # forthcoming. The `win_lgpo` module might be an option for changing the + # Windows Update settings using local group policy. + ret = {'Success': True} # Initialize the PyCom system pythoncom.CoInitialize() @@ -1076,30 +1218,31 @@ def get_wu_settings(): Boolean value that indicates whether to display notifications for featured updates. Group Policy Required (Read-only): - Boolean value that indicates whether Group Policy requires the Automatic - Updates service. + Boolean value that indicates whether Group Policy requires the + Automatic Updates service. Microsoft Update: Boolean value that indicates whether to turn on Microsoft Update for other Microsoft Products Needs Reboot: - Boolean value that indicates whether the machine is in a reboot pending - state. + Boolean value that indicates whether the machine is in a reboot + pending state. Non Admins Elevated: - Boolean value that indicates whether non-administrators can perform some - update-related actions without administrator approval. + Boolean value that indicates whether non-administrators can perform + some update-related actions without administrator approval. Notification Level: Number 1 to 4 indicating the update level: 1. Never check for updates - 2. Check for updates but let me choose whether to download and install them + 2. Check for updates but let me choose whether to download and + install them 3. Download updates but let me choose whether to install them 4. Install updates automatically Read Only (Read-only): Boolean value that indicates whether the Automatic Update settings are read-only. Recommended Updates: - Boolean value that indicates whether to include optional or recommended - updates when a search for updates and installation of updates is - performed. + Boolean value that indicates whether to include optional or + recommended updates when a search for updates and installation of + updates is performed. Scheduled Day: Days of the week on which Automatic Updates installs or uninstalls updates. @@ -1182,13 +1325,12 @@ def get_needs_reboot(): Returns: - bool: True if the system requires a reboot, False if not + bool: True if the system requires a reboot, otherwise False CLI Examples: .. code-block:: bash salt '*' win_wua.get_needs_reboot - ''' return salt.utils.win_update.needs_reboot() diff --git a/salt/modules/x509.py b/salt/modules/x509.py index 986f137a82..3626bd42d8 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -330,10 +330,14 @@ def _parse_subject(subject): for nid_name, nid_num in six.iteritems(subject.nid): if nid_num in nids: continue - val = getattr(subject, nid_name) - if val: - ret[nid_name] = val - nids.append(nid_num) + try: + val = getattr(subject, nid_name) + if val: + ret[nid_name] = val + nids.append(nid_num) + except TypeError as e: + if e.args and e.args[0] == 'No string argument provided': + pass return ret @@ -1373,10 +1377,19 @@ def create_certificate( ['listen_in', 'preqrequired', '__prerequired__']: kwargs.pop(ignore, None) - cert_txt = __salt__['publish.publish']( + certs = __salt__['publish.publish']( tgt=ca_server, fun='x509.sign_remote_certificate', - arg=str(kwargs))[ca_server] + arg=str(kwargs)) + + if not any(certs): + raise salt.exceptions.SaltInvocationError( + 'ca_server did not respond' + ' salt master must permit peers to' + ' call the sign_remote_certificate function.') + + cert_txt = certs[ca_server] + if path: return write_pem( text=cert_txt, diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index e659d1b79c..0af6a811f4 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -35,8 +35,10 @@ try: import yum HAS_YUM = True except ImportError: - from salt.ext.six.moves import configparser HAS_YUM = False + +from salt.ext.six.moves import configparser + # pylint: enable=import-error,redefined-builtin # Import salt libs @@ -181,7 +183,16 @@ def _check_versionlock(): Ensure that the appropriate versionlock plugin is present ''' if _yum() == 'dnf': - vl_plugin = 'python-dnf-plugins-extras-versionlock' + if int(__grains__.get('osmajorrelease')) >= 26: + if six.PY3: + vl_plugin = 'python3-dnf-plugin-versionlock' + else: + vl_plugin = 'python2-dnf-plugin-versionlock' + else: + if six.PY3: + vl_plugin = 'python3-dnf-plugins-extras-versionlock' + else: + vl_plugin = 'python-dnf-plugins-extras-versionlock' else: vl_plugin = 'yum-versionlock' \ if __grains__.get('osmajorrelease') == '5' \ @@ -1034,6 +1045,11 @@ def refresh_db(**kwargs): clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache'] update_cmd = [_yum(), '--quiet', 'check-update'] + + if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7': + # This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL + update_cmd.append('--setopt=autocheck_running_kernel=false') + for args in (repo_arg, exclude_arg, branch_arg): if args: clean_cmd.extend(args) @@ -2694,41 +2710,32 @@ def _parse_repo_file(filename): ''' Turn a single repo file into a dict ''' - repos = {} - header = '' - repo = '' - with salt.utils.fopen(filename, 'r') as rfile: - for line in rfile: - if line.startswith('['): - repo = line.strip().replace('[', '').replace(']', '') - repos[repo] = {} + parsed = configparser.ConfigParser() + config = {} - # Even though these are essentially uselss, I want to allow the - # user to maintain their own comments, etc - if not line: - if not repo: - header += line - if line.startswith('#'): - if not repo: - header += line - else: - if 'comments' not in repos[repo]: - repos[repo]['comments'] = [] - repos[repo]['comments'].append(line.strip()) - continue + try: + parsed.read(filename) + except configparser.MissingSectionHeaderError as err: + log.error( + 'Failed to parse file {0}, error: {1}'.format(filename, err.message) + ) + return ('', {}) - # These are the actual configuration lines that matter - if '=' in line: - try: - comps = line.strip().split('=') - repos[repo][comps[0].strip()] = '='.join(comps[1:]) - except KeyError: - log.error( - 'Failed to parse line in %s, offending line was ' - '\'%s\'', filename, line.rstrip() - ) + for section in parsed._sections: + section_dict = dict(parsed._sections[section]) + section_dict.pop('__name__') + config[section] = section_dict - return (header, repos) + # Try to extract leading comments + headers = '' + with salt.utils.fopen(filename, 'r') as rawfile: + for line in rawfile: + if line.strip().startswith('#'): + headers += '{0}\n'.format(line.strip()) + else: + break + + return (headers, config) def file_list(*packages): diff --git a/salt/netapi/rest_cherrypy/__init__.py b/salt/netapi/rest_cherrypy/__init__.py index 6285de289c..d974cda6c1 100644 --- a/salt/netapi/rest_cherrypy/__init__.py +++ b/salt/netapi/rest_cherrypy/__init__.py @@ -20,7 +20,7 @@ try: except ImportError as exc: cpy_error = exc -__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy' +__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_cherrypy' logger = logging.getLogger(__virtualname__) cpy_min = '3.2.2' diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py index b9e41fac78..4099416a28 100644 --- a/salt/netapi/rest_cherrypy/app.py +++ b/salt/netapi/rest_cherrypy/app.py @@ -483,13 +483,22 @@ import signal import tarfile from multiprocessing import Process, Pipe +logger = logging.getLogger(__name__) + # Import third-party libs -# pylint: disable=import-error -import cherrypy # pylint: disable=3rd-party-module-not-gated +# pylint: disable=import-error, 3rd-party-module-not-gated +import cherrypy +try: + from cherrypy.lib import cpstats +except ImportError: + cpstats = None + logger.warn('Import of cherrypy.cpstats failed. ' + 'Possible upstream bug: ' + 'https://github.com/cherrypy/cherrypy/issues/1444') + import yaml import salt.ext.six as six -# pylint: enable=import-error - +# pylint: enable=import-error, 3rd-party-module-not-gated # Import Salt libs import salt @@ -500,8 +509,6 @@ import salt.utils.event # Import salt-api libs import salt.netapi -logger = logging.getLogger(__name__) - # Imports related to websocket try: from .tools import websockets @@ -2616,13 +2623,6 @@ class Stats(object): :status 406: |406| ''' if hasattr(logging, 'statistics'): - # Late import - try: - from cherrypy.lib import cpstats - except ImportError: - logger.error('Import of cherrypy.cpstats failed. Possible ' - 'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444') - return {} return cpstats.extrapolate_statistics(logging.statistics) return {} @@ -2742,13 +2742,14 @@ class API(object): 'tools.trailing_slash.on': True, 'tools.gzip.on': True, - 'tools.cpstats.on': self.apiopts.get('collect_stats', False), - 'tools.html_override.on': True, 'tools.cors_tool.on': True, }, } + if cpstats and self.apiopts.get('collect_stats', False): + conf['/']['tools.cpstats.on'] = True + if 'favicon' in self.apiopts: conf['/favicon.ico'] = { 'tools.staticfile.on': True, diff --git a/salt/netapi/rest_tornado/__init__.py b/salt/netapi/rest_tornado/__init__.py index fc547a02a3..2437714073 100644 --- a/salt/netapi/rest_tornado/__init__.py +++ b/salt/netapi/rest_tornado/__init__.py @@ -10,7 +10,7 @@ import os import salt.auth from salt.utils.versions import StrictVersion as _StrictVersion -__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_tornado' +__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_tornado' logger = logging.getLogger(__virtualname__) diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index cf7bd652a9..38e665b7df 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -523,8 +523,7 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin try: # Use cgi.parse_header to correctly separate parameters from value - header = cgi.parse_header(self.request.headers['Content-Type']) - value, parameters = header + value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) @@ -538,7 +537,7 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin if not self.request.body: return data = self.deserialize(self.request.body) - self.raw_data = copy(data) + self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] @@ -696,15 +695,13 @@ class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 }} ''' try: - request_payload = self.deserialize(self.request.body) - - if not isinstance(request_payload, dict): + if not isinstance(self.request_payload, dict): self.send_error(400) return - creds = {'username': request_payload['username'], - 'password': request_payload['password'], - 'eauth': request_payload['eauth'], + creds = {'username': self.request_payload['username'], + 'password': self.request_payload['password'], + 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: @@ -1641,7 +1638,7 @@ class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 value = value[0] arguments[argname] = value ret = self.event.fire_event({ - 'post': self.raw_data, + 'post': self.request_payload, 'get': arguments, # In Tornado >= v4.0.3, the headers come # back as an HTTPHeaders instance, which diff --git a/salt/output/__init__.py b/salt/output/__init__.py index d9ce631b4e..8a1732c512 100644 --- a/salt/output/__init__.py +++ b/salt/output/__init__.py @@ -168,6 +168,13 @@ def get_printout(out, opts=None, **kwargs): opts['color'] = False else: opts['color'] = True + else: + if opts.get('force_color', False): + opts['color'] = True + elif opts.get('no_color', False) or salt.utils.is_windows(): + opts['color'] = False + else: + pass outputters = salt.loader.outputters(opts) if out not in outputters: diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 8b85d14ab3..ae1d337387 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -561,7 +561,7 @@ def ext_pillar(minion_id, repo, pillar_dirs): ) for pillar_dir, env in six.iteritems(pillar.pillar_dirs): # If pillarenv is set, only grab pillars with that match pillarenv - if opts['pillarenv'] and env != opts['pillarenv']: + if opts['pillarenv'] and env != opts['pillarenv'] and env != '__env__': log.debug( 'env \'%s\' for pillar dir \'%s\' does not match ' 'pillarenv \'%s\', skipping', diff --git a/salt/proxy/napalm.py b/salt/proxy/napalm.py index b0cd8d6546..1acee33148 100644 --- a/salt/proxy/napalm.py +++ b/salt/proxy/napalm.py @@ -23,37 +23,88 @@ Please check Installation_ for complete details. Pillar ------ -The napalm proxy configuration requires four mandatory parameters in order to connect to the network device: +The napalm proxy configuration requires the following parameters in order to connect to the network device: -* driver: specifies the network device operating system. For a complete list of the supported operating systems \ -please refer to the `NAPALM Read the Docs page`_. -* host: hostname -* username: username to be used when connecting to the device -* passwd: the password needed to establish the connection -* optional_args: dictionary with the optional arguments. Check the complete list of supported `optional arguments`_ +driver + Specifies the network device operating system. + For a complete list of the supported operating systems please refer to the + `NAPALM Read the Docs page`_. -.. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems -.. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments +host + The IP Address or FQDN to use when connecting to the device. Alternatively, + the following field names can be used instead: ``hostname``, ``fqdn``, ``ip``. -.. versionadded:: 2017.7.0 +username + The username to be used when connecting to the device. -* always_alive: in certain less dynamic environments, maintaining the remote connection permanently +passwd + The password needed to establish the connection. + + .. note:: + + This field may not be mandatory when working with SSH-based drivers, and + the username has a SSH key properly configured on the device targeted to + be managed. + +optional_args + Dictionary with the optional arguments. + Check the complete list of supported `optional arguments`_. + +always_alive: ``True`` + In certain less dynamic environments, maintaining the remote connection permanently open with the network device is not always beneficial. In that case, the user can select to initialize the connection only when needed, by specifying this field to ``false``. Default: ``true`` (maintains the connection with the remote network device). -Example: + .. versionadded:: 2017.7.0 + +provider: ``napalm_base`` + The module that provides the ``get_network_device`` function. + This option is useful when the user has more specific needs and requires + to extend the NAPALM capabilities using a private library implementation. + The only constraint is that the alternative library needs to have the + ``get_network_device`` function available. + + .. versionadded:: 2017.7.1 + +multiprocessing: ``False`` + Overrides the :conf_minion:`multiprocessing` option, per proxy minion. + The ``multiprocessing`` option must be turned off for SSH-based proxies. + However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. + As multiple proxy minions may share the same configuration file, + this option permits the configuration of the ``multiprocessing`` option + more specifically, for some proxy minions. + + .. versionadded:: 2017.7.2 + + +.. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems +.. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments + +Proxy pillar file example: .. code-block:: yaml proxy: - proxytype: napalm - driver: junos - host: core05.nrt02 - username: my_username - passwd: my_password - optional_args: - port: 12201 + proxytype: napalm + driver: junos + host: core05.nrt02 + username: my_username + passwd: my_password + optional_args: + port: 12201 + +Example using a user-specific library, extending NAPALM's capabilities, e.g. ``custom_napalm_base``: + +.. code-block:: yaml + + proxy: + proxytype: napalm + driver: ios + fqdn: cr1.th2.par.as1234.net + username: salt + password: '' + provider: custom_napalm_base .. seealso:: diff --git a/salt/roster/cache.py b/salt/roster/cache.py index bb03b5f0f3..cf4244ff3a 100644 --- a/salt/roster/cache.py +++ b/salt/roster/cache.py @@ -129,7 +129,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 'host': ('ipv6-private', 'ipv6-global', 'ipv4-private', 'ipv4-public') }) if isinstance(roster_order, (tuple, list)): - salt.utils.warn_until('Oxygen', + salt.utils.warn_until('Fluorine', 'Using legacy syntax for roster_order') roster_order = { 'host': roster_order @@ -137,7 +137,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 for config_key, order in roster_order.items(): for idx, key in enumerate(order): if key in ('public', 'private', 'local'): - salt.utils.warn_until('Oxygen', + salt.utils.warn_until('Fluorine', 'roster_order {0} will include IPv6 soon. ' 'Set order to ipv4-{0} if needed.'.format(key)) order[idx] = 'ipv4-' + key diff --git a/salt/runners/auth.py b/salt/runners/auth.py index 8aec8c3389..7e8e64855a 100644 --- a/salt/runners/auth.py +++ b/salt/runners/auth.py @@ -17,16 +17,28 @@ import salt.netapi def mk_token(**load): - ''' + r''' Create an eauth token using provided credentials + Non-root users may specify an expiration date -- if allowed via the + :conf_master:`token_expire_user_override` setting -- by passing an + additional ``token_expire`` param. This overrides the + :conf_master:`token_expire` setting of the same name in the Master config + and is how long a token should live in seconds. + CLI Example: .. code-block:: shell salt-run auth.mk_token username=saltdev password=saltdev eauth=auto - salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \\ + + # Create a token valid for three years. + salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \ token_expire=94670856 + + # Calculate the number of seconds using expr. + salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \ + token_expire=$(expr \( 365 \* 24 \* 60 \* 60 \) \* 3) ''' # This will hang if the master daemon is not running. netapi = salt.netapi.NetapiClient(__opts__) diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py index 8dfc4412eb..0e8e97beb3 100644 --- a/salt/runners/git_pillar.py +++ b/salt/runners/git_pillar.py @@ -86,7 +86,8 @@ def update(branch=None, repo=None): else: pillar = salt.utils.gitfs.GitPillar(__opts__) pillar.init_remotes(pillar_conf, - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES) + salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + salt.pillar.git_pillar.PER_REMOTE_ONLY) for remote in pillar.remotes: # Skip this remote if it doesn't match the search criteria if branch is not None: diff --git a/salt/runners/manage.py b/salt/runners/manage.py index ebf6471d9a..fbf8ea3f23 100644 --- a/salt/runners/manage.py +++ b/salt/runners/manage.py @@ -22,6 +22,7 @@ from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: dis # Import salt libs import salt.key import salt.utils +import salt.utils.compat import salt.utils.minions import salt.client import salt.client.ssh @@ -669,7 +670,7 @@ def versions(): ver_diff = -2 else: minion_version = salt.version.SaltStackVersion.parse(minions[minion]) - ver_diff = cmp(minion_version, master_version) + ver_diff = salt.utils.compat.cmp(minion_version, master_version) if ver_diff not in version_status: version_status[ver_diff] = {} diff --git a/salt/runners/saltutil.py b/salt/runners/saltutil.py index a6099e18b3..339a8b44ef 100644 --- a/salt/runners/saltutil.py +++ b/salt/runners/saltutil.py @@ -38,6 +38,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): ''' log.debug('Syncing all') ret = {} + ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index b2f367e5ac..2bd2646b61 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -359,6 +359,7 @@ class SPMClient(object): # First we download everything, then we install for package in dl_list: + out_file = dl_list[package]['dest_file'] # Kick off the install self._install_indv_pkg(package, out_file) return diff --git a/salt/state.py b/salt/state.py index 44fb73b97d..0d35b98276 100644 --- a/salt/state.py +++ b/salt/state.py @@ -37,6 +37,7 @@ import salt.utils.dictupdate import salt.utils.event import salt.utils.url import salt.utils.process +import salt.utils.files import salt.syspaths as syspaths from salt.utils import immutabletypes from salt.template import compile_template, compile_template_str @@ -97,6 +98,7 @@ STATE_RUNTIME_KEYWORDS = frozenset([ 'reload_grains', 'reload_pillar', 'runas', + 'runas_password', 'fire_event', 'saltenv', 'use', @@ -145,6 +147,13 @@ def _gen_tag(low): return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) +def _clean_tag(tag): + ''' + Make tag name safe for filenames + ''' + return salt.utils.files.safe_filename_leaf(tag) + + def _l_tag(name, id_): low = {'name': 'listen_{0}'.format(name), '__id__': 'listen_{0}'.format(id_), @@ -1694,7 +1703,7 @@ class State(object): trb) } troot = os.path.join(self.opts['cachedir'], self.jid) - tfile = os.path.join(troot, tag) + tfile = os.path.join(troot, _clean_tag(tag)) if not os.path.isdir(troot): try: os.makedirs(troot) @@ -1752,6 +1761,11 @@ class State(object): self.state_con['runas'] = low.get('runas', None) + if low['state'] == 'cmd' and 'password' in low: + self.state_con['runas_password'] = low['password'] + else: + self.state_con['runas_password'] = low.get('runas_password', None) + if not low.get('__prereq__'): log.info( 'Executing state {0}.{1} for [{2}]'.format( @@ -1864,6 +1878,9 @@ class State(object): sys.modules[self.states[cdata['full']].__module__].__opts__[ 'test'] = test + self.state_con.pop('runas') + self.state_con.pop('runas_password') + # If format_call got any warnings, let's show them to the user if 'warnings' in cdata: ret.setdefault('warnings', []).extend(cdata['warnings']) @@ -2038,7 +2055,7 @@ class State(object): proc = running[tag].get('proc') if proc: if not proc.is_alive(): - ret_cache = os.path.join(self.opts['cachedir'], self.jid, tag) + ret_cache = os.path.join(self.opts['cachedir'], self.jid, _clean_tag(tag)) if not os.path.isfile(ret_cache): ret = {'result': False, 'comment': 'Parallel process failed to return', @@ -2113,11 +2130,14 @@ class State(object): reqs[r_state].append(chunk) continue try: - if (fnmatch.fnmatch(chunk['name'], req_val) or - fnmatch.fnmatch(chunk['__id__'], req_val)): - if req_key == 'id' or chunk['state'] == req_key: - found = True - reqs[r_state].append(chunk) + if isinstance(req_val, six.string_types): + if (fnmatch.fnmatch(chunk['name'], req_val) or + fnmatch.fnmatch(chunk['__id__'], req_val)): + if req_key == 'id' or chunk['state'] == req_key: + found = True + reqs[r_state].append(chunk) + else: + raise KeyError except KeyError as exc: raise SaltRenderError( 'Could not locate requisite of [{0}] present in state with name [{1}]'.format( @@ -2293,13 +2313,17 @@ class State(object): req_val = lreq[req_key] comment += \ '{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val) - running[tag] = {'changes': {}, - 'result': False, - 'comment': comment, - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + if low.get('__prereq__'): + run_dict = self.pre + else: + run_dict = running + run_dict[tag] = {'changes': {}, + 'result': False, + 'comment': comment, + '__run_num__': self.__run_num, + '__sls__': low['__sls__']} self.__run_num += 1 - self.event(running[tag], len(chunks), fire_event=low.get('fire_event')) + self.event(run_dict[tag], len(chunks), fire_event=low.get('fire_event')) return running for chunk in reqs: # Check to see if the chunk has been run, only run it if diff --git a/salt/states/archive.py b/salt/states/archive.py index e36f178f2e..f053d3c207 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -1221,7 +1221,7 @@ def extracted(name, return ret if not os.path.isdir(name): - __salt__['file.makedirs'](name, user=user) + __states__['file.directory'](name, user=user, makedirs=True) created_destdir = True log.debug('Extracting {0} to {1}'.format(cached_source, name)) diff --git a/salt/states/bigip.py b/salt/states/bigip.py index 8a575b0d1d..44462534dc 100644 --- a/salt/states/bigip.py +++ b/salt/states/bigip.py @@ -85,7 +85,7 @@ def _check_for_changes(entity_type, ret, existing, modified): if 'generation' in existing['content'].keys(): del existing['content']['generation'] - if cmp(modified['content'], existing['content']) == 0: + if modified['content'] == existing['content']: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ @@ -94,7 +94,7 @@ def _check_for_changes(entity_type, ret, existing, modified): ret['changes']['new'] = modified['content'] else: - if cmp(modified, existing) == 0: + if modified == existing: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ diff --git a/salt/states/boto_cfn.py b/salt/states/boto_cfn.py index fcc7375464..97f2cf6260 100644 --- a/salt/states/boto_cfn.py +++ b/salt/states/boto_cfn.py @@ -43,6 +43,9 @@ from __future__ import absolute_import import logging import json +# Import Salt libs +import salt.utils.compat + # Import 3rd party libs try: from salt._compat import ElementTree as ET @@ -141,10 +144,14 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi stack_policy_body = _get_template(stack_policy_body, name) stack_policy_during_update_body = _get_template(stack_policy_during_update_body, name) + for i in [template_body, stack_policy_body, stack_policy_during_update_body]: + if isinstance(i, dict): + return i + _valid = _validate(template_body, template_url, region, key, keyid, profile) log.debug('Validate is : {0}.'.format(_valid)) if _valid is not True: - code, message = _get_error(_valid) + code, message = _valid ret['result'] = False ret['comment'] = 'Template could not be validated.\n{0} \n{1}'.format(code, message) return ret @@ -154,7 +161,7 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi template = template['GetTemplateResponse']['GetTemplateResult']['TemplateBody'].encode('ascii', 'ignore') template = json.loads(template) _template_body = json.loads(template_body) - compare = cmp(template, _template_body) + compare = salt.utils.compat.cmp(template, _template_body) if compare != 0: log.debug('Templates are not the same. Compare value is {0}'.format(compare)) # At this point we should be able to run update safely since we already validated the template @@ -250,7 +257,7 @@ def _get_template(template, name): def _validate(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None): # Validates template. returns true if template syntax is correct. validate = __salt__['boto_cfn.validate_template'](template_body, template_url, region, key, keyid, profile) - log.debug('Validate is result is {0}.'.format(str(validate))) + log.debug('Validate result is {0}.'.format(str(validate))) if isinstance(validate, str): code, message = _get_error(validate) log.debug('Validate error is {0} and message is {1}.'.format(code, message)) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 41566f5916..5146c9d695 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -2,7 +2,7 @@ ''' Manage AWS Application Load Balancer -.. versionadded:: TBD +.. versionadded:: 2017.7.0 Add and remove targets from an ALB target group. @@ -54,6 +54,8 @@ def __virtual__(): def targets_registered(name, targets, region=None, key=None, keyid=None, profile=None): ''' + .. versionadded:: 2017.7.0 + Add targets to an Application Load Balancer target group. This state will not remove targets. name @@ -63,8 +65,6 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, A list of target IDs or a string of a single target that this target group should distribute traffic to. - .. versionadded:: TBD - .. code-block:: yaml add-targets: diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index 79c69048ef..d83f9bddd3 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -92,7 +92,7 @@ def installed(name, version=None, source=None, force=False, pre_versions=False, # Determine action # Package not installed - if name not in [package.split('|')[0].lower() for package in pre_install.splitlines()]: + if name.lower() not in [package.lower() for package in pre_install.keys()]: if version: ret['changes'] = {name: 'Version {0} will be installed' ''.format(version)} @@ -193,9 +193,13 @@ def uninstalled(name, version=None, uninstall_args=None, override_args=False): pre_uninstall = __salt__['chocolatey.list'](local_only=True) # Determine if package is installed - if name in [package.split('|')[0].lower() for package in pre_uninstall.splitlines()]: - ret['changes'] = {name: '{0} version {1} will be removed' - ''.format(name, pre_uninstall[name][0])} + if name.lower() in [package.lower() for package in pre_uninstall.keys()]: + try: + ret['changes'] = {name: '{0} version {1} will be removed' + ''.format(name, pre_uninstall[name][0])} + except KeyError: + ret['changes'] = {name: '{0} will be removed' + ''.format(name)} else: ret['comment'] = 'The package {0} is not installed'.format(name) return ret diff --git a/salt/states/cisconso.py b/salt/states/cisconso.py index eb26cfaf33..7622a3102d 100644 --- a/salt/states/cisconso.py +++ b/salt/states/cisconso.py @@ -8,6 +8,12 @@ For documentation on setting up the cisconso proxy minion look in the documentat for :mod:`salt.proxy.cisconso `. ''' +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +import salt.utils.compat + def __virtual__(): return 'cisconso.set_data_value' in __salt__ @@ -53,7 +59,7 @@ def value_present(name, datastore, path, config): existing = __salt__['cisconso.get_data'](datastore, path) - if cmp(existing, config): + if salt.utils.compat.cmp(existing, config): ret['result'] = True ret['comment'] = 'Config is already set' diff --git a/salt/states/cron.py b/salt/states/cron.py index b5d69e81ca..efa8ab25d3 100644 --- a/salt/states/cron.py +++ b/salt/states/cron.py @@ -116,7 +116,7 @@ entry on the minion already contains a numeric value, then using the ``random`` keyword will not modify it. Added the opportunity to set a job with a special keyword like '@reboot' or -'@hourly'. +'@hourly'. Quotes must be used, otherwise PyYAML will strip the '@' sign. .. code-block:: yaml @@ -151,6 +151,13 @@ from salt.modules.cron import ( ) +def __virtual__(): + if 'cron.list_tab' in __salt__: + return True + else: + return (False, 'cron module could not be loaded') + + def _check_cron(user, cmd, minute=None, @@ -296,7 +303,8 @@ def present(name, edits. This defaults to the state id special - A special keyword to specify periodicity (eg. @reboot, @hourly...) + A special keyword to specify periodicity (eg. @reboot, @hourly...). + Quotes must be used, otherwise PyYAML will strip the '@' sign. .. versionadded:: 2016.3.0 ''' @@ -382,7 +390,8 @@ def absent(name, edits. This defaults to the state id special - The special keyword used in the job (eg. @reboot, @hourly...) + The special keyword used in the job (eg. @reboot, @hourly...). + Quotes must be used, otherwise PyYAML will strip the '@' sign. ''' ### NOTE: The keyword arguments in **kwargs are ignored in this state, but ### cannot be removed from the function definition, otherwise the use diff --git a/salt/states/docker_container.py b/salt/states/docker_container.py index 1bfc8cf489..1f5996c3d5 100644 --- a/salt/states/docker_container.py +++ b/salt/states/docker_container.py @@ -145,7 +145,7 @@ def running(name, .. _docker-container-running-skip-translate: skip_translate - This function translates Salt CLI input into the format which + This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular @@ -677,24 +677,14 @@ def running(name, - foo2.domain.tld domainname - Set custom DNS search domains. Can be expressed as a comma-separated - list or a YAML list. The below two examples are equivalent: + The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - - dommainname: domain.tld,domain2.tld - - .. code-block:: yaml - - foo: - docker_container.running: - - image: bar/baz:latest - - dommainname: - - domain.tld - - domain2.tld + - dommainname: domain.tld entrypoint Entrypoint for the container diff --git a/salt/states/docker_image.py b/salt/states/docker_image.py index e3c1a37779..087ce8f693 100644 --- a/salt/states/docker_image.py +++ b/salt/states/docker_image.py @@ -135,13 +135,14 @@ def present(name, .. versionadded:: 2016.11.0 sls - Allow for building images with ``dockerng.sls_build`` by specify the - SLS files to build with. This can be a list or comma-seperated string. + Allow for building of image with :py:func:`docker.sls_build + ` by specifying the SLS files with + which to build. This can be a list or comma-seperated string. .. code-block:: yaml myuser/myimage:mytag: - dockerng.image_present: + docker_image.present: - sls: - webapp1 - webapp2 @@ -151,12 +152,14 @@ def present(name, .. versionadded: 2017.7.0 base - Base image with which to start ``dockerng.sls_build`` + Base image with which to start :py:func:`docker.sls_build + ` .. versionadded: 2017.7.0 saltenv - environment from which to pull sls files for ``dockerng.sls_build``. + Environment from which to pull SLS files for :py:func:`docker.sls_build + ` .. versionadded: 2017.7.0 ''' @@ -169,11 +172,14 @@ def present(name, ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.' return ret - # Ensure that we have repo:tag notation image = ':'.join(salt.utils.docker.get_repo_tag(name)) - all_tags = __salt__['docker.list_tags']() + resolved_tag = __salt__['docker.resolve_tag'](image) - if image in all_tags: + if resolved_tag is False: + # Specified image is not present + image_info = None + else: + # Specified image is present if not force: ret['result'] = True ret['comment'] = 'Image \'{0}\' already present'.format(name) @@ -185,8 +191,6 @@ def present(name, ret['comment'] = \ 'Unable to get info for image \'{0}\': {1}'.format(name, exc) return ret - else: - image_info = None if build or sls: action = 'built' @@ -197,15 +201,15 @@ def present(name, if __opts__['test']: ret['result'] = None - if (image in all_tags and force) or image not in all_tags: + if (resolved_tag is not False and force) or resolved_tag is False: ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action) return ret if build: try: image_update = __salt__['docker.build'](path=build, - image=image, - dockerfile=dockerfile) + image=image, + dockerfile=dockerfile) except Exception as exc: ret['comment'] = ( 'Encountered error building {0} as {1}: {2}' @@ -219,10 +223,10 @@ def present(name, if isinstance(sls, list): sls = ','.join(sls) try: - image_update = __salt__['dockerng.sls_build'](name=image, - base=base, - mods=sls, - saltenv=saltenv) + image_update = __salt__['docker.sls_build'](name=image, + base=base, + mods=sls, + saltenv=saltenv) except Exception as exc: ret['comment'] = ( 'Encountered error using sls {0} for building {1}: {2}' @@ -252,10 +256,8 @@ def present(name, client_timeout=client_timeout ) except Exception as exc: - ret['comment'] = ( - 'Encountered error pulling {0}: {1}' - .format(image, exc) - ) + ret['comment'] = \ + 'Encountered error pulling {0}: {1}'.format(image, exc) return ret if (image_info is not None and image_info['Id'][:12] == image_update .get('Layers', {}) @@ -267,7 +269,7 @@ def present(name, # Only add to the changes dict if layers were pulled ret['changes'] = image_update - ret['result'] = image in __salt__['docker.list_tags']() + ret['result'] = bool(__salt__['docker.resolve_tag'](image)) if not ret['result']: # This shouldn't happen, failure to pull should be caught above @@ -345,23 +347,16 @@ def absent(name=None, images=None, force=False): ret['comment'] = 'One of \'name\' and \'images\' must be provided' return ret elif images is not None: - targets = [] - for target in images: - try: - targets.append(':'.join(salt.utils.docker.get_repo_tag(target))) - except TypeError: - # Don't stomp on images with unicode characters in Python 2, - # only force image to be a str if it wasn't already (which is - # very unlikely). - targets.append(':'.join(salt.utils.docker.get_repo_tag(str(target)))) + targets = images elif name: - try: - targets = [':'.join(salt.utils.docker.get_repo_tag(name))] - except TypeError: - targets = [':'.join(salt.utils.docker.get_repo_tag(str(name)))] + targets = [name] pre_tags = __salt__['docker.list_tags']() - to_delete = [x for x in targets if x in pre_tags] + to_delete = [] + for target in targets: + resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags) + if resolved_tag is not False: + to_delete.append(resolved_tag) log.debug('targets = {0}'.format(targets)) log.debug('to_delete = {0}'.format(to_delete)) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index 5dac2d2e22..a681847642 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -90,8 +90,20 @@ def present(name, driver=None, containers=None): # map containers to container's Ids. containers = [__salt__['docker.inspect_container'](c)['Id'] for c in containers] networks = __salt__['docker.networks'](names=[name]) + log.trace( + 'docker_network.present: current networks: {0}'.format(networks) + ) + + # networks will contain all Docker networks which partially match 'name'. + # We need to loop through to find the matching network, if there is one. + network = None if networks: - network = networks[0] # we expect network's name to be unique + for network_iter in networks: + if network_iter['Name'] == name: + network = network_iter + break + + if network is not None: if all(c in network['Containers'] for c in containers): ret['result'] = True ret['comment'] = 'Network \'{0}\' already exists.'.format(name) @@ -154,7 +166,20 @@ def absent(name, driver=None): 'comment': ''} networks = __salt__['docker.networks'](names=[name]) - if not networks: + log.trace( + 'docker_network.absent: current networks: {0}'.format(networks) + ) + + # networks will contain all Docker networks which partially match 'name'. + # We need to loop through to find the matching network, if there is one. + network = None + if networks: + for network_iter in networks: + if network_iter['Name'] == name: + network = network_iter + break + + if network is None: ret['result'] = True ret['comment'] = 'Network \'{0}\' already absent'.format(name) return ret diff --git a/salt/states/docker_volume.py b/salt/states/docker_volume.py index 84a4f2154d..209b3839ac 100644 --- a/salt/states/docker_volume.py +++ b/salt/states/docker_volume.py @@ -187,7 +187,7 @@ def present(name, driver=None, driver_opts=None, force=False): ret['result'] = result return ret - ret['result'] = None if __opts__['test'] else True + ret['result'] = True ret['comment'] = 'Volume \'{0}\' already exists.'.format(name) return ret diff --git a/salt/states/file.py b/salt/states/file.py index 7148376c85..8d81998016 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -622,7 +622,11 @@ def _clean_dir(root, keep, exclude_pat): while True: fn_ = os.path.dirname(fn_) real_keep.add(fn_) - if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\\\'])]: + if fn_ in [ + os.sep, + ''.join([os.path.splitdrive(fn_)[0], os.sep]), + ''.join([os.path.splitdrive(fn_)[0], os.sep, os.sep]) + ]: break def _delete_not_kept(nfn): @@ -1433,7 +1437,10 @@ def absent(name): ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: - __salt__['file.remove'](name) + if salt.utils.is_windows(): + __salt__['file.remove'](name, force=True) + else: + __salt__['file.remove'](name) ret['comment'] = 'Removed file {0}'.format(name) ret['changes']['removed'] = name return ret @@ -1546,7 +1553,7 @@ def managed(name, the salt master and potentially run through a templating system. name - The location of the file to manage + The location of the file to manage, as an absolute path. source The source file to download to the minion, this source file can be @@ -1716,13 +1723,15 @@ def managed(name, group The group ownership set for the file, this defaults to the group salt - is running as on the minion On Windows, this is ignored + is running as on the minion. On Windows, this is ignored mode - The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. + The permissions to set on this file, e.g. ``644``, ``0775``, or + ``4664``. - The default mode for new files and directories corresponds umask of salt - process. For existing files and directories it's not enforced. + The default mode for new files and directories corresponds to the + umask of the salt process. The mode of existing files and directories + will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. @@ -2551,7 +2560,7 @@ def directory(name, Ensure that a named directory is present and has the right perms name - The location to create or manage a directory + The location to create or manage a directory, as an absolute path user The user to own the directory; this defaults to the user salt is @@ -3736,7 +3745,13 @@ def line(name, content=None, match=None, mode=None, location=None, if not name: return _error(ret, 'Must provide name to file.line') - managed(name, create=create, user=user, group=group, mode=file_mode) + managed( + name, + create=create, + user=user, + group=group, + mode=file_mode, + replace=False) check_res, check_msg = _check_file(name) if not check_res: @@ -3784,7 +3799,8 @@ def replace(name, not_found_content=None, backup='.bak', show_changes=True, - ignore_if_missing=False): + ignore_if_missing=False, + backslash_literal=False): r''' Maintain an edit in a file. @@ -3798,12 +3814,13 @@ def replace(name, A regular expression, to be matched using Python's :py:func:`~re.search`. - ..note:: + .. note:: + If you need to match a literal string that contains regex special characters, you may want to use salt's custom Jinja filter, ``escape_regex``. - ..code-block:: jinja + .. code-block:: jinja {{ 'http://example.com?foo=bar%20baz' | escape_regex }} @@ -3883,6 +3900,14 @@ def replace(name, state will display an error raised by the execution module. If set to ``True``, the state will simply report no changes. + backslash_literal : False + .. versionadded:: 2016.11.7 + + Interpret backslashes as literal backslashes for the repl and not + escape characters. This will help when using append/prepend so that + the backslashes are not interpreted for the repl on the second run of + the state. + For complex regex patterns, it can be useful to avoid the need for complex quoting and escape sequences by making use of YAML's multiline string syntax. @@ -3931,7 +3956,8 @@ def replace(name, backup=backup, dry_run=__opts__['test'], show_changes=show_changes, - ignore_if_missing=ignore_if_missing) + ignore_if_missing=ignore_if_missing, + backslash_literal=backslash_literal) if changes: ret['pchanges']['diff'] = changes diff --git a/salt/states/git.py b/salt/states/git.py index a269c9b2d9..d29641b3d1 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1138,13 +1138,22 @@ def latest(name, password=password, https_user=https_user, https_pass=https_pass) - comments.append( - 'Remote \'{0}\' changed from {1} to {2}'.format( - remote, - salt.utils.url.redact_http_basic_auth(fetch_url), - redacted_fetch_url + if fetch_url is None: + comments.append( + 'Remote \'{0}\' set to {1}'.format( + remote, + redacted_fetch_url + ) + ) + ret['changes']['new'] = name + ' => ' + remote + else: + comments.append( + 'Remote \'{0}\' changed from {1} to {2}'.format( + remote, + salt.utils.url.redact_http_basic_auth(fetch_url), + redacted_fetch_url + ) ) - ) if remote_rev is not None: if __opts__['test']: @@ -1292,6 +1301,23 @@ def latest(name, 'if it does not already exist).', comments ) + remote_tags = set([ + x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( + cwd=target, + remote=remote, + opts="--tags", + user=user, + password=password, + identity=identity, + saltenv=__env__, + ignore_retcode=True, + ).keys() if '^{}' not in x + ]) + if set(all_local_tags) != remote_tags: + has_remote_rev = False + ret['changes']['new_tags'] = list(remote_tags.symmetric_difference( + all_local_tags + )) if not has_remote_rev: try: @@ -1447,8 +1473,6 @@ def latest(name, user=user, password=password, ignore_retcode=True): - merge_rev = remote_rev if rev == 'HEAD' \ - else desired_upstream if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not @@ -1465,7 +1489,7 @@ def latest(name, __salt__['git.merge']( target, - rev=merge_rev, + rev=remote_rev, opts=merge_opts, user=user, password=password) @@ -2229,13 +2253,18 @@ def detached(name, local_commit_id = _get_local_rev_and_branch(target, user, password)[0] - if remote_rev_type is 'hash' \ - and __salt__['git.describe'](target, - rev, - user=user, - password=password): - # The rev is a hash and it exists locally so skip to checkout - hash_exists_locally = True + if remote_rev_type is 'hash': + try: + __salt__['git.describe'](target, + rev, + user=user, + password=password, + ignore_retcode=True) + except CommandExecutionError: + hash_exists_locally = False + else: + # The rev is a hash and it exists locally so skip to checkout + hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, diff --git a/salt/states/glusterfs.py b/salt/states/glusterfs.py index d2fbe176c6..cdc3684d94 100644 --- a/salt/states/glusterfs.py +++ b/salt/states/glusterfs.py @@ -120,13 +120,13 @@ def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, .. code-block:: yaml myvolume: - glusterfs.created: + glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: - glusterfs.created: + glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 diff --git a/salt/states/group.py b/salt/states/group.py index 8218e415d6..d280243e08 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -1,10 +1,15 @@ # -*- coding: utf-8 -*- -''' +r''' Management of user groups ========================= -The group module is used to create and manage unix group settings, groups -can be either present or absent: +The group module is used to create and manage group settings, groups can be +either present or absent. User/Group names can be passed to the ``adduser``, +``deluser``, and ``members`` parameters. ``adduser`` and ``deluser`` can be used +together but not with ``members``. + +In Windows, if no domain is specified in the user or group name (ie: +`DOMAIN\username``) the module will assume a local user or group. .. code-block:: yaml @@ -36,6 +41,10 @@ import sys # Import 3rd-party libs import salt.ext.six as six +# Import Salt libs +import salt.utils +import salt.utils.win_functions + def _changes(name, gid=None, @@ -50,6 +59,18 @@ def _changes(name, if not lgrp: return False + # User and Domain names are not case sensitive in Windows. Let's make them + # all lower case so we can compare properly + if salt.utils.is_windows(): + if lgrp['members']: + lgrp['members'] = [user.lower() for user in lgrp['members']] + if members: + members = [salt.utils.win_functions.get_sam_name(user) for user in members] + if addusers: + addusers = [salt.utils.win_functions.get_sam_name(user) for user in addusers] + if delusers: + delusers = [salt.utils.win_functions.get_sam_name(user) for user in delusers] + change = {} if gid: if lgrp['gid'] != gid: @@ -57,7 +78,7 @@ def _changes(name, if members: # -- if new member list if different than the current - if set(lgrp['members']) ^ set(members): + if set(lgrp['members']).symmetric_difference(members): change['members'] = members if addusers: @@ -79,31 +100,58 @@ def present(name, addusers=None, delusers=None, members=None): - ''' + r''' Ensure that a group is present - name - The name of the group to manage + Args: - gid - The group id to assign to the named group; if left empty, then the next - available group id will be assigned + name (str): + The name of the group to manage - system - Whether or not the named group is a system group. This is essentially - the '-r' option of 'groupadd'. + gid (str): + The group id to assign to the named group; if left empty, then the + next available group id will be assigned. Ignored on Windows - addusers - List of additional users to be added as a group members. + system (bool): + Whether or not the named group is a system group. This is essentially + the '-r' option of 'groupadd'. Ignored on Windows - delusers - Ensure these user are removed from the group membership. + addusers (list): + List of additional users to be added as a group members. Cannot + conflict with names in delusers. Cannot be used in conjunction with + members. - members - Replace existing group members with a list of new members. + delusers (list): + Ensure these user are removed from the group membership. Cannot + conflict with names in addusers. Cannot be used in conjunction with + members. - Note: Options 'members' and 'addusers/delusers' are mutually exclusive and - can not be used together. + members (list): + Replace existing group members with a list of new members. Cannot be + used in conjunction with addusers or delusers. + + Example: + + .. code-block:: yaml + + # Adds DOMAIN\db_admins and Administrators to the local db_admin group + # Removes Users + db_admin: + group.present: + - addusers: + - DOMAIN\db_admins + - Administrators + - delusers: + - Users + + # Ensures only DOMAIN\domain_admins and the local Administrator are + # members of the local Administrators group. All other users are + # removed + Administrators: + group.present: + - members: + - DOMAIN\domain_admins + - Administrator ''' ret = {'name': name, 'changes': {}, @@ -233,8 +281,17 @@ def absent(name): ''' Ensure that the named group is absent - name - The name of the group to remove + Args: + name (str): + The name of the group to remove + + Example: + + .. code-block:: yaml + + # Removes the local group `db_admin` + db_admin: + group.absent ''' ret = {'name': name, 'changes': {}, diff --git a/salt/states/ini_manage.py b/salt/states/ini_manage.py index 10cff758ca..7ea26e05b7 100644 --- a/salt/states/ini_manage.py +++ b/salt/states/ini_manage.py @@ -86,8 +86,11 @@ def options_present(name, sections=None, separator='=', strict=False): changes[section_name].update({key_to_remove: ''}) changes[section_name].update({key_to_remove: {'before': orig_value, 'after': None}}) - changes[section_name].update( - __salt__['ini.set_option'](name, {section_name: section_body}, separator)[section_name]) + options_updated = __salt__['ini.set_option'](name, {section_name: section_body}, separator) + if options_updated: + changes[section_name].update(options_updated[section_name]) + if not changes[section_name]: + del changes[section_name] else: changes = __salt__['ini.set_option'](name, sections, separator) except IOError as err: @@ -99,8 +102,12 @@ def options_present(name, sections=None, separator='=', strict=False): ret['comment'] = 'Errors encountered. {0}'.format(changes['error']) ret['changes'] = {} else: - ret['comment'] = 'Changes take effect' - ret['changes'] = changes + if changes: + ret['changes'] = changes + ret['comment'] = 'Changes take effect' + else: + ret['changes'] = {} + ret['comment'] = 'No changes take effect' return ret @@ -199,7 +206,7 @@ def sections_present(name, sections=None, separator='='): ret['result'] = False ret['comment'] = "{0}".format(err) return ret - if cmp(dict(sections[section]), cur_section) == 0: + if dict(sections[section]) == cur_section: ret['comment'] += 'Section unchanged {0}.\n'.format(section) continue elif cur_section: diff --git a/salt/states/jenkins.py b/salt/states/jenkins.py index 01548e5381..047ee8d6fe 100644 --- a/salt/states/jenkins.py +++ b/salt/states/jenkins.py @@ -16,6 +16,7 @@ import logging import salt.ext.six as six from salt.ext.six.moves import zip import salt.utils +from salt.exceptions import CommandExecutionError # Import XML parser import xml.etree.ElementTree as ET @@ -36,17 +37,23 @@ def _elements_equal(e1, e2): return all(_elements_equal(c1, c2) for c1, c2 in zip(e1, e2)) +def _fail(ret, msg): + ret['comment'] = msg + ret['result'] = False + return ret + + def present(name, config=None, **kwargs): ''' - Ensure the job is present in the Jenkins - configured jobs + Ensure the job is present in the Jenkins configured jobs + name The unique name for the Jenkins job + config - The Salt URL for the file to use for - configuring the job. + The Salt URL for the file to use for configuring the job ''' ret = {'name': name, @@ -54,9 +61,7 @@ def present(name, 'changes': {}, 'comment': ['Job {0} is up to date.'.format(name)]} - _job_exists = __salt__['jenkins.job_exists'](name) - - if _job_exists: + if __salt__['jenkins.job_exists'](name): _current_job_config = __salt__['jenkins.get_job_config'](name) buf = six.moves.StringIO(_current_job_config) oldXML = ET.fromstring(buf.read()) @@ -68,21 +73,28 @@ def present(name, diff = difflib.unified_diff( ET.tostringlist(oldXML, encoding='utf8', method='xml'), ET.tostringlist(newXML, encoding='utf8', method='xml'), lineterm='') - __salt__['jenkins.update_job'](name, config, __env__) - ret['changes'][name] = ''.join(diff) - ret['comment'].append('Job {0} updated.'.format(name)) + try: + __salt__['jenkins.update_job'](name, config, __env__) + except CommandExecutionError as exc: + return _fail(ret, exc.strerror) + else: + ret['changes'] = ''.join(diff) + ret['comment'].append('Job \'{0}\' updated.'.format(name)) else: cached_source_path = __salt__['cp.cache_file'](config, __env__) with salt.utils.fopen(cached_source_path) as _fp: new_config_xml = _fp.read() - __salt__['jenkins.create_job'](name, config, __env__) + try: + __salt__['jenkins.create_job'](name, config, __env__) + except CommandExecutionError as exc: + return _fail(ret, exc.strerror) buf = six.moves.StringIO(new_config_xml) diff = difflib.unified_diff('', buf.readlines(), lineterm='') ret['changes'][name] = ''.join(diff) - ret['comment'].append('Job {0} added.'.format(name)) + ret['comment'].append('Job \'{0}\' added.'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret @@ -91,24 +103,23 @@ def present(name, def absent(name, **kwargs): ''' - Ensure the job is present in the Jenkins - configured jobs + Ensure the job is absent from the Jenkins configured jobs name - The name of the Jenkins job to remove. - + The name of the Jenkins job to remove ''' - ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} - _job_exists = __salt__['jenkins.job_exists'](name) - - if _job_exists: - __salt__['jenkins.delete_job'](name) - ret['comment'] = 'Job {0} deleted.'.format(name) + if __salt__['jenkins.job_exists'](name): + try: + __salt__['jenkins.delete_job'](name) + except CommandExecutionError as exc: + return _fail(ret, exc.strerror) + else: + ret['comment'] = 'Job \'{0}\' deleted.'.format(name) else: - ret['comment'] = 'Job {0} already absent.'.format(name) + ret['comment'] = 'Job \'{0}\' already absent.'.format(name) return ret diff --git a/salt/states/kubernetes.py b/salt/states/kubernetes.py index 64cd451532..bc62da1ab7 100644 --- a/salt/states/kubernetes.py +++ b/salt/states/kubernetes.py @@ -73,6 +73,8 @@ The kubernetes module is used to manage different kubernetes resources. key1: value1 key2: value2 key3: value3 + +.. versionadded: 2017.7.0 ''' from __future__ import absolute_import diff --git a/salt/states/module.py b/salt/states/module.py index 31a4355464..202999e7d8 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -262,6 +262,7 @@ def run(**kwargs): missing = [] tests = [] for func in functions: + func = func.split(':')[0] if func not in __salt__: missing.append(func) elif __opts__['test']: @@ -284,8 +285,9 @@ def run(**kwargs): failures = [] success = [] for func in functions: + _func = func.split(':')[0] try: - func_ret = _call_function(func, returner=kwargs.get('returner'), + func_ret = _call_function(_func, returner=kwargs.get('returner'), func_args=kwargs.get(func)) if not _get_result(func_ret, ret['changes'].get('ret', {})): if isinstance(func_ret, dict): @@ -313,22 +315,35 @@ def _call_function(name, returner=None, **kwargs): ''' argspec = salt.utils.args.get_function_argspec(__salt__[name]) func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code - argspec.defaults or [])) - func_args = [] - for funcset in kwargs.get('func_args') or {}: - if isinstance(funcset, dict): - func_kw.update(funcset) + argspec.defaults or [])) + arg_type, na_type, kw_type = [], {}, False + for funcset in reversed(kwargs.get('func_args') or []): + if not isinstance(funcset, dict): + kw_type = True + if kw_type: + if isinstance(funcset, dict): + arg_type += funcset.values() + na_type.update(funcset) + else: + arg_type.append(funcset) else: - func_args.append(funcset) + func_kw.update(funcset) + arg_type.reverse() + _exp_prm = len(argspec.args or []) - len(argspec.defaults or []) + _passed_prm = len(arg_type) missing = [] - for arg in argspec.args: - if arg not in func_kw: - missing.append(arg) + if na_type and _exp_prm > _passed_prm: + for arg in argspec.args: + if arg not in func_kw: + missing.append(arg) if missing: raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing))) + elif _exp_prm > _passed_prm: + raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format( + _exp_prm, _passed_prm)) - mret = __salt__[name](*func_args, **func_kw) + mret = __salt__[name](*arg_type, **func_kw) if returner is not None: returners = salt.loader.returners(__opts__, __salt__) if returner in returners: diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index 9318e227b4..1b2ed44d56 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -109,7 +109,7 @@ def managed(name, template_mode='755', saltenv=None, template_engine='jinja', - skip_verify=True, + skip_verify=False, defaults=None, test=False, commit=True, @@ -194,10 +194,12 @@ def managed(name, - :mod:`py` - :mod:`wempy` - skip_verify: True + skip_verify: False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. + .. versionchanged:: 2017.7.1 + test: False Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False`` (will commit the changes on the device). diff --git a/salt/states/netyang.py b/salt/states/netyang.py index e39b09b3b9..2db6208f57 100644 --- a/salt/states/netyang.py +++ b/salt/states/netyang.py @@ -92,6 +92,13 @@ def managed(name, Use certain profiles to generate the config. If not specified, will use the platform default profile(s). + compliance_report: ``False`` + Return the compliance report in the comment. + The compliance report structured object can be found however + in the ``pchanges`` field of the output (not displayed on the CLI). + + .. versionadded:: 2017.7.3 + test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit @@ -140,6 +147,7 @@ def managed(name, debug = kwargs.get('debug', False) or __opts__.get('debug', False) commit = kwargs.get('commit', True) or __opts__.get('commit', True) replace = kwargs.get('replace', False) or __opts__.get('replace', False) + return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False) profiles = kwargs.get('profiles', []) temp_file = __salt__['temp.file']() log.debug('Creating temp file: {0}'.format(temp_file)) @@ -180,7 +188,13 @@ def managed(name, log.debug('Loaded config result:') log.debug(loaded_changes) __salt__['file.remove'](temp_file) - return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug) + loaded_changes['compliance_report'] = compliance_report + return salt.utils.napalm.loaded_ret(ret, + loaded_changes, + test, + debug, + opts=__opts__, + compliance_report=return_compliance_report) def configured(name, diff --git a/salt/states/pkg.py b/salt/states/pkg.py index c24f330c63..3c47a74920 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -105,6 +105,7 @@ if salt.utils.is_windows(): import datetime import errno import time + from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info @@ -185,15 +186,12 @@ def _fulfills_version_spec(versions, oper, desired_version, if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: - if oper == '==': - if fnmatch.fnmatch(ver, desired_version): - return True - - elif salt.utils.compare_versions(ver1=ver, - oper=oper, - ver2=desired_version, - cmp_func=cmp_func, - ignore_epoch=ignore_epoch): + if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ + or salt.utils.compare_versions(ver1=ver, + oper=oper, + ver2=desired_version, + cmp_func=cmp_func, + ignore_epoch=ignore_epoch): return True return False @@ -1266,8 +1264,11 @@ def installed( ``NOTE:`` For :mod:`apt `, :mod:`ebuild `, - :mod:`pacman `, :mod:`yumpkg `, - and :mod:`zypper `, version numbers can be specified + :mod:`pacman `, + :mod:`winrepo `, + :mod:`yumpkg `, and + :mod:`zypper `, + version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml @@ -1441,6 +1442,15 @@ def installed( 'result': True, 'comment': 'No packages to install provided'} + # If just a name (and optionally a version) is passed, just pack them into + # the pkgs argument. + if name and not any((pkgs, sources)): + if version: + pkgs = [{name: version}] + version = None + else: + pkgs = [name] + kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if not isinstance(pkg_verify, list): @@ -1607,7 +1617,7 @@ def installed( if salt.utils.is_freebsd(): force = True # Downgrades need to be forced. try: - pkg_ret = __salt__['pkg.install'](name, + pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, force=force, diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index d9c41f8cb3..ec430d7a60 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -245,14 +245,12 @@ def state(name, if pillar: cmd_kw['kwarg']['pillar'] = pillar - # If pillarenv is directly defined, use it - if pillarenv: + if pillarenv is not None: cmd_kw['kwarg']['pillarenv'] = pillarenv - # Use pillarenv if it's passed from __opts__ (via state.orchestrate for example) - elif __opts__.get('pillarenv'): - cmd_kw['kwarg']['pillarenv'] = __opts__['pillarenv'] - cmd_kw['kwarg']['saltenv'] = saltenv if saltenv is not None else __env__ + if saltenv is not None: + cmd_kw['kwarg']['saltenv'] = saltenv + cmd_kw['kwarg']['queue'] = queue if isinstance(concurrent, bool): diff --git a/salt/states/selinux.py b/salt/states/selinux.py index 93e45bfa2a..8187ea8338 100644 --- a/salt/states/selinux.py +++ b/salt/states/selinux.py @@ -171,8 +171,14 @@ def boolean(name, value, persist=False): name, rvalue) return ret - if __salt__['selinux.setsebool'](name, rvalue, persist): + ret['result'] = __salt__['selinux.setsebool'](name, rvalue, persist) + if ret['result']: ret['comment'] = 'Boolean {0} has been set to {1}'.format(name, rvalue) + ret['changes'].update({'State': {'old': bools[name]['State'], + 'new': rvalue}}) + if persist and not default: + ret['changes'].update({'Default': {'old': bools[name]['Default'], + 'new': rvalue}}) return ret ret['comment'] = 'Failed to set the boolean {0} to {1}'.format(name, rvalue) return ret diff --git a/salt/states/service.py b/salt/states/service.py index 7b68ee0040..ff5300df6a 100644 --- a/salt/states/service.py +++ b/salt/states/service.py @@ -893,7 +893,7 @@ def mod_watch(name, try: result = func(name, **func_kwargs) except CommandExecutionError as exc: - ret['result'] = True + ret['result'] = False ret['comment'] = exc.strerror return ret diff --git a/salt/states/sqlite3.py b/salt/states/sqlite3.py index 8251e04e71..ce8d9f982f 100644 --- a/salt/states/sqlite3.py +++ b/salt/states/sqlite3.py @@ -160,9 +160,13 @@ def row_absent(name, db, table, where_sql, where_args=None): changes['changes']['old'] = rows[0] else: - cursor = conn.execute("DELETE FROM `" + - table + "` WHERE " + where_sql, - where_args) + if where_args is None: + cursor = conn.execute("DELETE FROM `" + + table + "` WHERE " + where_sql) + else: + cursor = conn.execute("DELETE FROM `" + + table + "` WHERE " + where_sql, + where_args) conn.commit() if cursor.rowcount == 1: changes['result'] = True diff --git a/salt/states/testinframod.py b/salt/states/testinframod.py index 0350f4d385..a31a72c3e0 100644 --- a/salt/states/testinframod.py +++ b/salt/states/testinframod.py @@ -52,8 +52,12 @@ def _to_snake_case(pascal_case): def _generate_functions(): - for module in modules.__all__: - module_name = _to_snake_case(module) + try: + modules_ = [_to_snake_case(module_) for module_ in modules.__all__] + except AttributeError: + modules_ = [module_ for module_ in modules.modules] + + for module_name in modules_: func_name = 'testinfra.{0}'.format(module_name) __all__.append(module_name) log.debug('Generating state for module %s as function %s', diff --git a/salt/states/user.py b/salt/states/user.py index fb5397910e..8a731cc2a1 100644 --- a/salt/states/user.py +++ b/salt/states/user.py @@ -126,12 +126,14 @@ def _changes(name, if shell and lusr['shell'] != shell: change['shell'] = shell if 'shadow.info' in __salt__ and 'shadow.default_hash' in __salt__: - if password: + if password and not empty_password: default_hash = __salt__['shadow.default_hash']() if lshad['passwd'] == default_hash \ or lshad['passwd'] != default_hash and enforce_password: if lshad['passwd'] != password: change['passwd'] = password + if empty_password and lshad['passwd'] != '': + change['empty_password'] = True if date and date is not 0 and lshad['lstchg'] != date: change['date'] = date if mindays and mindays is not 0 and lshad['min'] != mindays: @@ -454,9 +456,6 @@ def present(name, if gid_from_name: gid = __salt__['file.group_to_gid'](name) - if empty_password: - __salt__['shadow.del_password'](name) - changes = _changes(name, uid, gid, @@ -510,6 +509,9 @@ def present(name, if key == 'passwd' and empty_password: log.warning("No password will be set when empty_password=True") continue + if key == 'empty_password' and val: + __salt__['shadow.del_password'](name) + continue if key == 'date': __salt__['shadow.set_date'](name, date) continue @@ -676,6 +678,14 @@ def present(name, ' {1}'.format(name, 'XXX-REDACTED-XXX') ret['result'] = False ret['changes']['password'] = 'XXX-REDACTED-XXX' + if empty_password and not password: + __salt__['shadow.del_password'](name) + spost = __salt__['shadow.info'](name) + if spost['passwd'] != '': + ret['comment'] = 'User {0} created but failed to ' \ + 'empty password'.format(name) + ret['result'] = False + ret['changes']['password'] = '' if date: __salt__['shadow.set_date'](name, date) spost = __salt__['shadow.info'](name) diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index 614236afc1..6407315c21 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -481,7 +481,6 @@ def container_setting(name, container, settings=None): :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. - Example of usage for the ``AppPools`` container: .. code-block:: yaml @@ -495,6 +494,7 @@ def container_setting(name, container, settings=None): processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword + processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: @@ -509,6 +509,8 @@ def container_setting(name, container, settings=None): logFile.period: Daily limits.maxUrlSegments: 32 ''' + + identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'} ret = {'name': name, 'changes': {}, 'comment': str(), @@ -528,6 +530,10 @@ def container_setting(name, container, settings=None): container=container, settings=settings.keys()) for setting in settings: + # map identity type from numeric to string for comparing + if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): + settings[setting] = identityType_map2string[settings[setting]] + if str(settings[setting]) != str(current_settings[setting]): ret_settings['changes'][setting] = {'old': current_settings[setting], 'new': settings[setting]} @@ -540,8 +546,8 @@ def container_setting(name, container, settings=None): ret['changes'] = ret_settings return ret - __salt__['win_iis.set_container_setting'](name=name, container=container, - settings=settings) + __salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings) + new_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) diff --git a/salt/states/zabbix_usergroup.py b/salt/states/zabbix_usergroup.py index a5d68b1546..292a2aafc9 100644 --- a/salt/states/zabbix_usergroup.py +++ b/salt/states/zabbix_usergroup.py @@ -84,7 +84,7 @@ def present(name, **kwargs): for right in kwargs['rights']: for key in right: right[key] = str(right[key]) - if cmp(sorted(kwargs['rights']), sorted(usergroup['rights'])) != 0: + if sorted(kwargs['rights']) != sorted(usergroup['rights']): update_rights = True else: update_rights = True diff --git a/salt/thorium/__init__.py b/salt/thorium/__init__.py index 72e854a36b..5363733d1a 100644 --- a/salt/thorium/__init__.py +++ b/salt/thorium/__init__.py @@ -69,7 +69,7 @@ class ThorState(salt.state.HighState): cache = {'grains': {}, 'pillar': {}} if self.grains or self.pillar: if self.opts.get('minion_data_cache'): - minions = self.cache.ls('minions') + minions = self.cache.list('minions') if not minions: return cache for minion in minions: diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 46789c3dd5..d8d8235920 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -250,15 +250,16 @@ class IPCClient(object): # FIXME key = str(socket_path) - if key not in loop_instance_map: + client = loop_instance_map.get(key) + if client is None: log.debug('Initializing new IPCClient for path: {0}'.format(key)) - new_client = object.__new__(cls) + client = object.__new__(cls) # FIXME - new_client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) - loop_instance_map[key] = new_client + client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) + loop_instance_map[key] = client else: log.debug('Re-using IPCClient for {0}'.format(key)) - return loop_instance_map[key] + return client def __singleton_init__(self, socket_path, io_loop=None): ''' diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py index 8aadfef45e..a9001f03a5 100644 --- a/salt/transport/tcp.py +++ b/salt/transport/tcp.py @@ -221,17 +221,18 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel): loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) - if key not in loop_instance_map: + obj = loop_instance_map.get(key) + if obj is None: log.debug('Initializing new AsyncTCPReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj + obj = object.__new__(cls) + obj.__singleton_init__(opts, **kwargs) + loop_instance_map[key] = obj else: log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key)) - return loop_instance_map[key] + return obj @classmethod def __key(cls, opts, **kwargs): diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index 67eb54e304..1bb3abebe1 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -80,28 +80,19 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) - if key not in loop_instance_map: + obj = loop_instance_map.get(key) + if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj + obj = object.__new__(cls) + obj.__singleton_init__(opts, **kwargs) + loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid())) else: log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key)) - try: - return loop_instance_map[key] - except KeyError: - # In iterating over the loop_instance_map, we may have triggered - # garbage collection. Therefore, the key is no longer present in - # the map. Re-gen and add to map. - log.debug('Initializing new AsyncZeroMQReqChannel due to GC for {0}'.format(key)) - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj - return loop_instance_map[key] + return obj def __deepcopy__(self, memo): cls = self.__class__ diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index b4e3c58abb..9345e84746 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -1,6 +1,11 @@ # -*- coding: utf-8 -*- ''' Some of the utils used by salt + +NOTE: The dev team is working on splitting up this file for the Oxygen release. +Please do not add any new functions to this file. New functions should be +organized in other files under salt/utils/. Please consult the dev team if you +are unsure where a new function should go. ''' # Import python libs @@ -1444,6 +1449,24 @@ def expr_match(line, expr): def check_whitelist_blacklist(value, whitelist=None, blacklist=None): ''' Check a whitelist and/or blacklist to see if the value matches it. + + value + The item to check the whitelist and/or blacklist against. + + whitelist + The list of items that are white-listed. If ``value`` is found + in the whitelist, then the function returns ``True``. Otherwise, + it returns ``False``. + + blacklist + The list of items that are black-listed. If ``value`` is found + in the blacklist, then the function returns ``False``. Otherwise, + it returns ``True``. + + If both a whitelist and a blacklist are provided, value membership + in the blacklist will be examined first. If the value is not found + in the blacklist, then the whitelist is checked. If the value isn't + found in the whitelist, the function returns ``False``. ''' if blacklist is not None: if not hasattr(blacklist, '__iter__'): @@ -2075,7 +2098,7 @@ def is_true(value=None): pass # Now check for truthiness - if isinstance(value, (int, float)): + if isinstance(value, (six.integer_types, float)): return value > 0 elif isinstance(value, six.string_types): return str(value).lower() == 'true' @@ -2851,7 +2874,7 @@ def repack_dictlist(data, if val_cb is None: val_cb = lambda x, y: y - valid_non_dict = (six.string_types, int, float) + valid_non_dict = (six.string_types, six.integer_types, float) if isinstance(data, list): for element in data: if isinstance(element, valid_non_dict): diff --git a/salt/utils/aws.py b/salt/utils/aws.py index 9e1212e92e..011dd346d7 100644 --- a/salt/utils/aws.py +++ b/salt/utils/aws.py @@ -392,7 +392,7 @@ def query(params=None, setname=None, requesturl=None, location=None, service_url = prov_dict.get('service_url', 'amazonaws.com') if not location: - location = get_location(opts, provider) + location = get_location(opts, prov_dict) if endpoint is None: if not requesturl: diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index cd6d59e9b5..4b84bd59c4 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -58,6 +58,7 @@ import salt.config import salt.loader import salt.template import salt.utils +import salt.utils.compat import salt.utils.event from salt.utils import vt from salt.utils.nb_popen import NonBlockingPopen @@ -292,12 +293,14 @@ def salt_config_to_yaml(configuration, line_break='\n'): Dumper=SafeOrderedDumper) -def bootstrap(vm_, opts): +def bootstrap(vm_, opts=None): ''' This is the primary entry point for logging into any system (POSIX or Windows) to install Salt. It will make the decision on its own as to which deploy function to call. ''' + if opts is None: + opts = __opts__ deploy_config = salt.config.get_cloud_config_value( 'deploy', vm_, opts, default=False) @@ -311,6 +314,11 @@ def bootstrap(vm_, opts): } } + if vm_.get('driver') == 'saltify': + saltify_driver = True + else: + saltify_driver = False + key_filename = salt.config.get_cloud_config_value( 'key_filename', vm_, opts, search_global=False, default=salt.config.get_cloud_config_value( @@ -475,6 +483,9 @@ def bootstrap(vm_, opts): 'make_minion', vm_, opts, default=True ) + if saltify_driver: + deploy_kwargs['wait_for_passwd_maxtries'] = 0 # No need to wait/retry with Saltify + win_installer = salt.config.get_cloud_config_value( 'win_installer', vm_, opts ) @@ -502,6 +513,8 @@ def bootstrap(vm_, opts): deploy_kwargs['winrm_use_ssl'] = salt.config.get_cloud_config_value( 'winrm_use_ssl', vm_, opts, default=True ) + if saltify_driver: + deploy_kwargs['port_timeout'] = 1 # No need to wait/retry with Saltify # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) @@ -809,21 +822,21 @@ def wait_for_winexesvc(host, port, username, password, timeout=900): log.debug('winexe connected...') return True log.debug('Return code was {0}'.format(ret_code)) - time.sleep(1) except socket.error as exc: log.debug('Caught exception in wait_for_winexesvc: {0}'.format(exc)) - time.sleep(1) - if time.time() - start > timeout: - log.error('winexe connection timed out: {0}'.format(timeout)) - return False - log.debug( - 'Retrying winexe connection to host {0} on port {1} ' - '(try {2})'.format( - host, - port, - try_count - ) + + if time.time() - start > timeout: + log.error('winexe connection timed out: {0}'.format(timeout)) + return False + log.debug( + 'Retrying winexe connection to host {0} on port {1} ' + '(try {2})'.format( + host, + port, + try_count ) + ) + time.sleep(1) def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True): @@ -852,19 +865,19 @@ def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True): log.debug('WinRM session connected...') return s log.debug('Return code was {0}'.format(r.status_code)) - time.sleep(1) except WinRMTransportError as exc: log.debug('Caught exception in wait_for_winrm: {0}'.format(exc)) - if time.time() - start > timeout: - log.error('WinRM connection timed out: {0}'.format(timeout)) - return None - log.debug( - 'Retrying WinRM connection to host {0} on port {1} ' - '(try {2})'.format( - host, port, trycount - ) + + if time.time() - start > timeout: + log.error('WinRM connection timed out: {0}'.format(timeout)) + return None + log.debug( + 'Retrying WinRM connection to host {0} on port {1} ' + '(try {2})'.format( + host, port, trycount ) - time.sleep(1) + ) + time.sleep(1) def validate_windows_cred(host, @@ -3041,7 +3054,7 @@ def diff_node_cache(prov_dir, node, new_data, opts): # Perform a simple diff between the old and the new data, and if it differs, # return both dicts. # TODO: Return an actual diff - diff = cmp(new_data, cache_data) + diff = salt.utils.compat.cmp(new_data, cache_data) if diff != 0: fire_event( 'event', diff --git a/salt/utils/compat.py b/salt/utils/compat.py index b97820db92..09c5cc2828 100644 --- a/salt/utils/compat.py +++ b/salt/utils/compat.py @@ -46,3 +46,15 @@ def deepcopy_bound(name): finally: copy._deepcopy_dispatch = pre_dispatch return ret + + +def cmp(x, y): + ''' + Compatibility helper function to replace the ``cmp`` function from Python 2. The + ``cmp`` function is no longer available in Python 3. + + cmp(x, y) -> integer + + Return negative if xy. + ''' + return (x > y) - (x < y) diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py index 8c3daeacd3..f7066b48bf 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py @@ -251,9 +251,8 @@ def memoize(func): str_args.append(str(arg)) else: str_args.append(arg) - args = str_args - args_ = ','.join(list(args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)]) + args_ = ','.join(list(str_args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)]) if args_ not in cache: cache[args_] = func(*args, **kwargs) return cache[args_] @@ -542,8 +541,14 @@ class _WithDeprecated(_DeprecationDecorator): f_name=function.__name__)) opts = self._globals.get('__opts__', '{}') - use_deprecated = full_name in opts.get(self.CFG_USE_DEPRECATED, list()) - use_superseded = full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) + pillar = self._globals.get('__pillar__', '{}') + + use_deprecated = (full_name in opts.get(self.CFG_USE_DEPRECATED, list()) or + full_name in pillar.get(self.CFG_USE_DEPRECATED, list())) + + use_superseded = (full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) or + full_name in pillar.get(self.CFG_USE_SUPERSEDED, list())) + if use_deprecated and use_superseded: raise SaltConfigurationError("Function '{0}' is mentioned both in deprecated " "and superseded sections. Please remove any of that.".format(full_name)) @@ -565,8 +570,11 @@ class _WithDeprecated(_DeprecationDecorator): f_name=self._orig_f_name) return func_path in self._globals.get('__opts__').get( + self.CFG_USE_DEPRECATED, list()) or func_path in self._globals.get('__pillar__').get( self.CFG_USE_DEPRECATED, list()) or (self._policy == self.OPT_IN and not (func_path in self._globals.get('__opts__', {}).get( + self.CFG_USE_SUPERSEDED, list())) + and not (func_path in self._globals.get('__pillar__', {}).get( self.CFG_USE_SUPERSEDED, list()))), func_path def __call__(self, function): diff --git a/salt/utils/docker/translate.py b/salt/utils/docker/translate.py index b6d8717316..372596a759 100644 --- a/salt/utils/docker/translate.py +++ b/salt/utils/docker/translate.py @@ -387,7 +387,7 @@ def dns(val, **kwargs): def domainname(val, **kwargs): # pylint: disable=unused-argument - return _translate_stringlist(val) + return _translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument diff --git a/salt/utils/files.py b/salt/utils/files.py index 12dd6987c6..8d463756d9 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -7,9 +7,11 @@ import contextlib import errno import logging import os +import re import shutil import subprocess import time +import urllib # Import salt libs import salt.utils @@ -57,7 +59,7 @@ def recursive_copy(source, dest): (identical to cp -r on a unix machine) ''' for root, _, files in os.walk(source): - path_from_source = root.replace(source, '').lstrip('/') + path_from_source = root.replace(source, '').lstrip(os.sep) target_directory = os.path.join(dest, path_from_source) if not os.path.exists(target_directory): os.makedirs(target_directory) @@ -258,3 +260,39 @@ def set_umask(mask): yield finally: os.umask(orig_mask) + + +def safe_filename_leaf(file_basename): + ''' + Input the basename of a file, without the directory tree, and returns a safe name to use + i.e. only the required characters are converted by urllib.quote + If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. + For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible + windows is \\ / : * ? " < > | posix is / + + .. versionadded:: 2017.7.2 + ''' + def _replace(re_obj): + return urllib.quote(re_obj.group(0), safe=u'') + if not isinstance(file_basename, six.text_type): + # the following string is not prefixed with u + return re.sub('[\\\\:/*?"<>|]', + _replace, + six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) + # the following string is prefixed with u + return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) + + +def safe_filepath(file_path_name): + ''' + Input the full path and filename, splits on directory separator and calls safe_filename_leaf for + each part of the path. + + .. versionadded:: 2017.7.2 + ''' + (drive, path) = os.path.splitdrive(file_path_name) + path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) + if drive: + return os.sep.join([drive, path]) + else: + return path diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index f9bc9b9b01..ef8c2adf74 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -38,7 +38,6 @@ from salt.utils.versions import LooseVersion as _LooseVersion # Import third party libs import salt.ext.six as six -VALID_PROVIDERS = ('pygit2', 'gitpython') # Optional per-remote params that can only be used on a per-remote basis, and # thus do not have defaults in salt/config.py. PER_REMOTE_ONLY = ('name',) @@ -164,7 +163,7 @@ class GitProvider(object): directly. self.provider should be set in the sub-class' __init__ function before - invoking GitProvider.__init__(). + invoking the parent class' __init__. ''' def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): @@ -857,8 +856,10 @@ class GitPython(GitProvider): def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): self.provider = 'gitpython' - GitProvider.__init__(self, opts, remote, per_remote_defaults, - per_remote_only, override_params, cache_root, role) + super(GitPython, self).__init__( + opts, remote, per_remote_defaults, per_remote_only, + override_params, cache_root, role + ) def add_refspecs(self, *refspecs): ''' @@ -976,17 +977,17 @@ class GitPython(GitProvider): else: new = True - try: - ssl_verify = self.repo.git.config('--get', 'http.sslVerify') - except git.exc.GitCommandError: - ssl_verify = '' - desired_ssl_verify = str(self.ssl_verify).lower() - if ssl_verify != desired_ssl_verify: - self.repo.git.config('http.sslVerify', desired_ssl_verify) + try: + ssl_verify = self.repo.git.config('--get', 'http.sslVerify') + except git.exc.GitCommandError: + ssl_verify = '' + desired_ssl_verify = str(self.ssl_verify).lower() + if ssl_verify != desired_ssl_verify: + self.repo.git.config('http.sslVerify', desired_ssl_verify) - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + # Ensure that refspecs for the "origin" remote are set up as configured + if hasattr(self, 'refspecs'): + self.configure_refspecs() return new @@ -1192,8 +1193,10 @@ class Pygit2(GitProvider): def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): self.provider = 'pygit2' - GitProvider.__init__(self, opts, remote, per_remote_defaults, - per_remote_only, override_params, cache_root, role) + super(Pygit2, self).__init__( + opts, remote, per_remote_defaults, per_remote_only, + override_params, cache_root, role + ) def add_refspecs(self, *refspecs): ''' @@ -1454,18 +1457,18 @@ class Pygit2(GitProvider): else: new = True - try: - ssl_verify = self.repo.config.get_bool('http.sslVerify') - except KeyError: - ssl_verify = None - if ssl_verify != self.ssl_verify: - self.repo.config.set_multivar('http.sslVerify', - '', - str(self.ssl_verify).lower()) + try: + ssl_verify = self.repo.config.get_bool('http.sslVerify') + except KeyError: + ssl_verify = None + if ssl_verify != self.ssl_verify: + self.repo.config.set_multivar('http.sslVerify', + '', + str(self.ssl_verify).lower()) - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + # Ensure that refspecs for the "origin" remote are set up as configured + if hasattr(self, 'refspecs'): + self.configure_refspecs() return new @@ -1877,11 +1880,17 @@ class Pygit2(GitProvider): fp_.write(blob.data) +GIT_PROVIDERS = { + 'pygit2': Pygit2, + 'gitpython': GitPython, +} + + class GitBase(object): ''' Base class for gitfs/git_pillar ''' - def __init__(self, opts, valid_providers=VALID_PROVIDERS, cache_root=None): + def __init__(self, opts, git_providers=None, cache_root=None): ''' IMPORTANT: If specifying a cache_root, understand that this is also where the remotes will be cloned. A non-default cache_root is only @@ -1889,8 +1898,9 @@ class GitBase(object): out into the winrepo locations and not within the cachedir. ''' self.opts = opts - self.valid_providers = valid_providers - self.get_provider() + self.git_providers = git_providers if git_providers is not None \ + else GIT_PROVIDERS + self.verify_provider() if cache_root is not None: self.cache_root = self.remote_root = cache_root else: @@ -1948,7 +1958,7 @@ class GitBase(object): self.remotes = [] for remote in remotes: - repo_obj = self.provider_class( + repo_obj = self.git_providers[self.provider]( self.opts, remote, per_remote_defaults, @@ -2202,7 +2212,7 @@ class GitBase(object): # Hash file won't exist if no files have yet been served up pass - def get_provider(self): + def verify_provider(self): ''' Determine which provider to use ''' @@ -2223,12 +2233,12 @@ class GitBase(object): # Should only happen if someone does something silly like # set the provider to a numeric value. desired_provider = str(desired_provider).lower() - if desired_provider not in self.valid_providers: + if desired_provider not in self.git_providers: log.critical( 'Invalid {0}_provider \'{1}\'. Valid choices are: {2}' .format(self.role, desired_provider, - ', '.join(self.valid_providers)) + ', '.join(self.git_providers)) ) failhard(self.role) elif desired_provider == 'pygit2' and self.verify_pygit2(): @@ -2241,17 +2251,13 @@ class GitBase(object): .format(self.role) ) failhard(self.role) - if self.provider == 'pygit2': - self.provider_class = Pygit2 - elif self.provider == 'gitpython': - self.provider_class = GitPython def verify_gitpython(self, quiet=False): ''' Check if GitPython is available and at a compatible version (>= 0.3.0) ''' def _recommend(): - if HAS_PYGIT2 and 'pygit2' in self.valid_providers: + if HAS_PYGIT2 and 'pygit2' in self.git_providers: log.error(_RECOMMEND_PYGIT2.format(self.role)) if not HAS_GITPYTHON: @@ -2262,7 +2268,7 @@ class GitBase(object): ) _recommend() return False - elif 'gitpython' not in self.valid_providers: + elif 'gitpython' not in self.git_providers: return False # pylint: disable=no-member @@ -2302,7 +2308,7 @@ class GitBase(object): Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0. ''' def _recommend(): - if HAS_GITPYTHON and 'gitpython' in self.valid_providers: + if HAS_GITPYTHON and 'gitpython' in self.git_providers: log.error(_RECOMMEND_GITPYTHON.format(self.role)) if not HAS_PYGIT2: @@ -2313,7 +2319,7 @@ class GitBase(object): ) _recommend() return False - elif 'pygit2' not in self.valid_providers: + elif 'pygit2' not in self.git_providers: return False # pylint: disable=no-member @@ -2432,7 +2438,7 @@ class GitFS(GitBase): ''' def __init__(self, opts): self.role = 'gitfs' - GitBase.__init__(self, opts) + super(GitFS, self).__init__(opts) def dir_list(self, load): ''' @@ -2735,7 +2741,7 @@ class GitPillar(GitBase): ''' def __init__(self, opts): self.role = 'git_pillar' - GitBase.__init__(self, opts) + super(GitPillar, self).__init__(opts) def checkout(self): ''' @@ -2837,7 +2843,7 @@ class WinRepo(GitBase): ''' def __init__(self, opts, winrepo_dir): self.role = 'winrepo' - GitBase.__init__(self, opts, cache_root=winrepo_dir) + super(WinRepo, self).__init__(opts, cache_root=winrepo_dir) def checkout(self): ''' diff --git a/salt/utils/http.py b/salt/utils/http.py index bfac6b07f1..eaec8df33a 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -466,8 +466,8 @@ def query(url, # We want to use curl_http if we have a proxy defined if proxy_host and proxy_port: if HAS_CURL_HTTPCLIENT is False: - ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl, but the ' - 'pycurl library does not seem to be installed') + ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl and tornado, ' + 'but the libraries does not seem to be installed') log.error(ret['error']) return ret @@ -779,7 +779,8 @@ def _render(template, render, renderer, template_dict, opts): blacklist = opts.get('renderer_blacklist') whitelist = opts.get('renderer_whitelist') ret = compile_template(template, rend, renderer, blacklist, whitelist, **template_dict) - ret = ret.read() + if salt.utils.stringio.is_readable(ret): + ret = ret.read() if str(ret).startswith('#!') and not str(ret).startswith('#!/'): ret = str(ret).split('\n', 1)[1] return ret diff --git a/salt/utils/master.py b/salt/utils/master.py index c48137ce4c..16e95a40aa 100644 --- a/salt/utils/master.py +++ b/salt/utils/master.py @@ -122,7 +122,7 @@ class MasterPillarUtil(object): 'and enfore_mine_cache are both disabled.') return mine_data if not minion_ids: - minion_ids = self.cache.ls('minions') + minion_ids = self.cache.list('minions') for minion_id in minion_ids: if not salt.utils.verify.valid_id(self.opts, minion_id): continue @@ -141,7 +141,7 @@ class MasterPillarUtil(object): 'enabled.') return grains, pillars if not minion_ids: - minion_ids = self.cache.ls('minions') + minion_ids = self.cache.list('minions') for minion_id in minion_ids: if not salt.utils.verify.valid_id(self.opts, minion_id): continue @@ -364,7 +364,7 @@ class MasterPillarUtil(object): # in the same file, 'data.p' grains, pillars = self._get_cached_minion_data(*minion_ids) try: - c_minions = self.cache.ls('minions') + c_minions = self.cache.list('minions') for minion_id in minion_ids: if not salt.utils.verify.valid_id(self.opts, minion_id): continue diff --git a/salt/utils/minions.py b/salt/utils/minions.py index f4de4e92d9..8afa41698c 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -75,7 +75,7 @@ def get_minion_data(minion, opts): if opts.get('minion_data_cache', False): cache = salt.cache.factory(opts) if minion is None: - for id_ in cache.ls('minions'): + for id_ in cache.list('minions'): data = cache.fetch('minions/{0}'.format(id_), 'data') if data is None: continue @@ -342,13 +342,13 @@ class CkMinions(object): if greedy: minions = self._pki_minions() elif cache_enabled: - minions = self.cache.ls('minions') + minions = self.cache.list('minions') else: return [] if cache_enabled: if greedy: - cminions = self.cache.ls('minions') + cminions = self.cache.list('minions') else: cminions = minions if cminions is None: @@ -412,7 +412,7 @@ class CkMinions(object): mlist.append(fn_) return mlist elif cache_enabled: - return self.cache.ls('minions') + return self.cache.list('minions') else: return list() @@ -574,7 +574,7 @@ class CkMinions(object): ''' minions = set() if self.opts.get('minion_data_cache', False): - search = self.cache.ls('minions') + search = self.cache.list('minions') if search is None: return minions addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port'])) diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index c69a92432e..0523e0a568 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -19,10 +19,12 @@ from __future__ import absolute_import import traceback import logging +import importlib from functools import wraps log = logging.getLogger(__file__) import salt.utils +import salt.output # Import third party lib try: @@ -242,6 +244,11 @@ def get_device_opts(opts, salt_obj=None): network_device = {} # by default, look in the proxy config details device_dict = opts.get('proxy', {}) or opts.get('napalm', {}) + if opts.get('proxy') or opts.get('napalm'): + opts['multiprocessing'] = device_dict.get('multiprocessing', False) + # Most NAPALM drivers are SSH-based, so multiprocessing should default to False. + # But the user can be allows to have a different value for the multiprocessing, which will + # override the opts. if salt_obj and not device_dict: # get the connection details from the opts device_dict = salt_obj['config.merge']('napalm') @@ -264,6 +271,7 @@ def get_device_opts(opts, salt_obj=None): network_device['TIMEOUT'] = device_dict.get('timeout', 60) network_device['OPTIONAL_ARGS'] = device_dict.get('optional_args', {}) network_device['ALWAYS_ALIVE'] = device_dict.get('always_alive', True) + network_device['PROVIDER'] = device_dict.get('provider') network_device['UP'] = False # get driver object form NAPALM if 'config_lock' not in network_device['OPTIONAL_ARGS']: @@ -281,7 +289,24 @@ def get_device(opts, salt_obj=None): ''' log.debug('Setting up NAPALM connection') network_device = get_device_opts(opts, salt_obj=salt_obj) - _driver_ = napalm_base.get_network_driver(network_device.get('DRIVER_NAME')) + provider_lib = napalm_base + if network_device.get('PROVIDER'): + # In case the user requires a different provider library, + # other than napalm-base. + # For example, if napalm-base does not satisfy the requirements + # and needs to be enahanced with more specific features, + # we may need to define a custom library on top of napalm-base + # with the constraint that it still needs to provide the + # `get_network_driver` function. However, even this can be + # extended later, if really needed. + # Configuration example: + # provider: napalm_base_example + try: + provider_lib = importlib.import_module(network_device.get('PROVIDER')) + except ImportError as ierr: + log.error('Unable to import {0}'.format(network_device.get('PROVIDER')), exc_info=True) + log.error('Falling back to napalm-base') + _driver_ = provider_lib.get_network_driver(network_device.get('DRIVER_NAME')) try: network_device['DRIVER'] = _driver_( network_device.get('HOSTNAME', ''), @@ -408,58 +433,58 @@ def default_ret(name): return ret -def loaded_ret(ret, loaded, test, debug): +def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): ''' Return the final state output. - ret The initial state output structure. - loaded The loaded dictionary. ''' # Always get the comment - ret.update({ - 'comment': loaded.get('comment', '') - }) + changes = {} pchanges = {} + ret['comment'] = loaded['comment'] + if 'diff' in loaded: + changes['diff'] = loaded['diff'] + pchanges['diff'] = loaded['diff'] + if 'compliance_report' in loaded: + if compliance_report: + changes['compliance_report'] = loaded['compliance_report'] + pchanges['compliance_report'] = loaded['compliance_report'] + if debug and 'loaded_config' in loaded: + changes['loaded_config'] = loaded['loaded_config'] + pchanges['loaded_config'] = loaded['loaded_config'] + ret['pchanges'] = pchanges + if changes.get('diff'): + ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], + diff=changes['diff']) + if changes.get('loaded_config'): + ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format( + comment_base=ret['comment'], + loaded_cfg=changes['loaded_config']) + if changes.get('compliance_report'): + ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format( + comment_base=ret['comment'], + compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts)) if not loaded.get('result', False): # Failure of some sort return ret - if debug: - # Always check for debug - pchanges.update({ - 'loaded_config': loaded.get('loaded_config', '') - }) - ret.update({ - "pchanges": pchanges - }) if not loaded.get('already_configured', True): # We're making changes - pchanges.update({ - "diff": loaded.get('diff', '') - }) - ret.update({ - 'pchanges': pchanges - }) if test: - for k, v in pchanges.items(): - ret.update({ - "comment": "{}:\n{}\n\n{}".format(k, v, ret.get("comment", '')) - }) - ret.update({ - 'result': None, - }) + ret['result'] = None return ret # Not test, changes were applied ret.update({ 'result': True, - 'changes': pchanges, - 'comment': "Configuration changed!\n{}".format(ret.get('comment', '')) + 'changes': changes, + 'comment': "Configuration changed!\n{}".format(loaded['comment']) }) return ret # No changes ret.update({ - 'result': True + 'result': True, + 'changes': {} }) return ret diff --git a/salt/utils/network.py b/salt/utils/network.py index aee5be16ab..33c7a14c9f 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -453,6 +453,11 @@ def _network_hosts(ip_addr_entry): def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. + + .. note:: + + When running this command with a large IPv6 network, the command will + take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index a4410c82d6..925ec9f4e4 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2156,10 +2156,18 @@ class SaltCPOptionParser(six.with_metaclass(OptionParserMeta, def _mixin_setup(self): file_opts_group = optparse.OptionGroup(self, 'File Options') + file_opts_group.add_option( + '-C', '--chunked', + default=False, + dest='chunked', + action='store_true', + help='Use chunked files transfer. Supports big files, recursive ' + 'lookup and directories creation.' + ) file_opts_group.add_option( '-n', '--no-compression', default=True, - dest='compression', + dest='gzip', action='store_false', help='Disable gzip compression.' ) @@ -2180,7 +2188,6 @@ class SaltCPOptionParser(six.with_metaclass(OptionParserMeta, self.config['tgt'] = self.args[0] self.config['src'] = [os.path.realpath(x) for x in self.args[1:-1]] self.config['dest'] = self.args[-1] - self.config['gzip'] = True def setup_config(self): return config.master_config(self.get_config_file_path()) diff --git a/salt/utils/process.py b/salt/utils/process.py index aefe8731ee..027af65edc 100644 --- a/salt/utils/process.py +++ b/salt/utils/process.py @@ -15,6 +15,7 @@ import contextlib import subprocess import multiprocessing import multiprocessing.util +import socket # Import salt libs @@ -55,7 +56,20 @@ def notify_systemd(): import systemd.daemon except ImportError: if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'): - return systemd_notify_call('--ready') + # Notify systemd synchronously + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + # Handle abstract namespace socket + if notify_socket.startswith('@'): + notify_socket = '\0{0}'.format(notify_socket[1:]) + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.connect(notify_socket) + sock.sendall('READY=1'.encode()) + sock.close() + except socket.error: + return systemd_notify_call('--ready') + return True return False if systemd.daemon.booted(): @@ -130,8 +144,10 @@ def get_pidfile(pidfile): ''' with salt.utils.fopen(pidfile) as pdf: pid = pdf.read() - - return int(pid) + if pid: + return int(pid) + else: + return def clean_proc(proc, wait_for_kill=10): diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py index 6e382b477f..57c4fd0863 100644 --- a/salt/utils/reactor.py +++ b/salt/utils/reactor.py @@ -283,6 +283,9 @@ class ReactWrap(object): # Update called function's low data with event user to # segregate events fired by reactor and avoid reaction loops kwargs['__user__'] = self.event_user + # Replace ``state`` kwarg which comes from high data compiler. + # It breaks some runner functions and seems unnecessary. + kwargs['__state__'] = kwargs.pop('state') l_fun(*f_call.get('args', ()), **kwargs) except Exception: diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 231f2331c0..8613d6a36f 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -333,6 +333,7 @@ import logging import errno import random import yaml +import copy # Import Salt libs import salt.config @@ -841,10 +842,16 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - kwargs['__pub_{0}'.format(key)] = val + kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) + # runners do not provide retcode + if 'retcode' in self.functions.pack['__context__']: + ret['retcode'] = self.functions.pack['__context__']['retcode'] + + ret['success'] = True + data_returner = data.get('returner', None) if data_returner or self.schedule_returner: if 'return_config' in data: @@ -861,7 +868,6 @@ class Schedule(object): for returner in OrderedDict.fromkeys(rets): ret_str = '{0}.returner'.format(returner) if ret_str in self.returners: - ret['success'] = True self.returners[ret_str](ret) else: log.info( @@ -870,11 +876,6 @@ class Schedule(object): ) ) - # runners do not provide retcode - if 'retcode' in self.functions.pack['__context__']: - ret['retcode'] = self.functions.pack['__context__']['retcode'] - - ret['success'] = True except Exception: log.exception("Unhandled exception running {0}".format(ret['fun'])) # Although catch-all exception handlers are bad, the exception here @@ -1129,21 +1130,28 @@ class Schedule(object): log.error('Invalid date string {0}. ' 'Ignoring job {1}.'.format(i, job)) continue - when = int(time.mktime(when__.timetuple())) - if when >= now: - _when.append(when) + _when.append(int(time.mktime(when__.timetuple()))) if data['_splay']: _when.append(data['_splay']) + # Sort the list of "whens" from earlier to later schedules _when.sort() + + for i in _when: + if i < now and len(_when) > 1: + # Remove all missed schedules except the latest one. + # We need it to detect if it was triggered previously. + _when.remove(i) + if _when: - # Grab the first element - # which is the next run time + # Grab the first element, which is the next run time or + # last scheduled time in the past. when = _when[0] if '_run' not in data: - data['_run'] = True + # Prevent run of jobs from the past + data['_run'] = bool(when >= now) if not data['_next_fire_time']: data['_next_fire_time'] = when diff --git a/salt/utils/templates.py b/salt/utils/templates.py index 87820fb550..b4bf049dc1 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -72,6 +72,9 @@ class AliasedLoader(object): def __getattr__(self, name): return getattr(self.wrapped, name) + def __contains__(self, name): + return name in self.wrapped + class AliasedModule(object): ''' diff --git a/salt/utils/url.py b/salt/utils/url.py index 0998ee5a0c..df6e64443b 100644 --- a/salt/utils/url.py +++ b/salt/utils/url.py @@ -60,15 +60,15 @@ def is_escaped(url): ''' test whether `url` is escaped with `|` ''' - if salt.utils.is_windows(): - return False - scheme = urlparse(url).scheme if not scheme: return url.startswith('|') elif scheme == 'salt': path, saltenv = parse(url) - return path.startswith('|') + if salt.utils.is_windows() and '|' in url: + return path.startswith('_') + else: + return path.startswith('|') else: return False @@ -100,15 +100,15 @@ def unescape(url): ''' remove escape character `|` from `url` ''' - if salt.utils.is_windows(): - return url - scheme = urlparse(url).scheme if not scheme: return url.lstrip('|') elif scheme == 'salt': path, saltenv = parse(url) - return create(path.lstrip('|'), saltenv) + if salt.utils.is_windows() and '|' in url: + return create(path.lstrip('_'), saltenv) + else: + return create(path.lstrip('|'), saltenv) else: return url diff --git a/salt/utils/versions.py b/salt/utils/versions.py index e4eb352415..d6d73ac866 100644 --- a/salt/utils/versions.py +++ b/salt/utils/versions.py @@ -11,7 +11,7 @@ because on python 3 you can no longer compare strings against integers. ''' -# Import pytohn libs +# Import python libs from __future__ import absolute_import # pylint: disable=blacklisted-module from distutils.version import StrictVersion as _StrictVersion @@ -19,7 +19,7 @@ from distutils.version import LooseVersion as _LooseVersion # pylint: enable=blacklisted-module # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class StrictVersion(_StrictVersion): diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 19292f2128..4b8f5a091a 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -238,7 +238,7 @@ def _get_service_instance(host, username, password, protocol, pwd=password, protocol=protocol, port=port, - sslContext=ssl._create_unverified_context(), + sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 23ee3edf04..4e3ec9663c 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -4,6 +4,9 @@ Various functions to be used by windows during start up and to monkey patch missing functions in other modules ''' from __future__ import absolute_import +import platform + +# Import Salt Libs from salt.exceptions import CommandExecutionError # Import 3rd Party Libs @@ -138,3 +141,19 @@ def get_current_user(): return False return user_name + + +def get_sam_name(username): + ''' + Gets the SAM name for a user. It basically prefixes a username without a + backslash with the computer name. If the username contains a backslash, it + is returned as is. + + Everything is returned lower case + + i.e. salt.utils.fix_local_user('Administrator') would return 'computername\administrator' + ''' + if '\\' not in username: + username = '{0}\\{1}'.format(platform.node(), username) + + return username.lower() diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py index 3d07c005c7..da0fd8d466 100644 --- a/salt/utils/yamlloader.py +++ b/salt/utils/yamlloader.py @@ -46,9 +46,9 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): self.add_constructor( u'tag:yaml.org,2002:omap', type(self).construct_yaml_map) - self.add_constructor( - u'tag:yaml.org,2002:python/unicode', - type(self).construct_unicode) + self.add_constructor( + u'tag:yaml.org,2002:python/unicode', + type(self).construct_unicode) self.dictclass = dictclass def construct_yaml_map(self, node): diff --git a/salt/version.py b/salt/version.py index 6fc4ccbf61..6e5d4e74af 100644 --- a/salt/version.py +++ b/salt/version.py @@ -654,18 +654,22 @@ def system_information(): release = platform.release() if platform.win32_ver()[0]: import win32api # pylint: disable=3rd-party-module-not-gated - if ((sys.version_info.major == 2 and sys.version_info >= (2, 7, 12)) or - (sys.version_info.major == 3 and sys.version_info >= (3, 5, 2))): - if win32api.GetVersionEx(1)[8] > 1: - server = {'Vista': '2008Server', - '7': '2008ServerR2', - '8': '2012Server', - '8.1': '2012ServerR2', - '10': '2016Server'} - release = server.get(platform.release(), - 'UNKServer') - _, ver, sp, extra = platform.win32_ver() - version = ' '.join([release, ver, sp, extra]) + server = {'Vista': '2008Server', + '7': '2008ServerR2', + '8': '2012Server', + '8.1': '2012ServerR2', + '10': '2016Server'} + # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function + # started reporting the Desktop version instead of the Server version on + # Server versions of Windows, so we need to look those up + # So, if you find a Server Platform that's a key in the server + # dictionary, then lookup the actual Server Release. + # If this is a Server Platform then `GetVersionEx` will return a number + # greater than 1. + if win32api.GetVersionEx(1)[8] > 1 and release in server: + release = server[release] + _, ver, sp, extra = platform.win32_ver() + version = ' '.join([release, ver, sp, extra]) system = [ ('system', platform.system()), diff --git a/tests/integration/files/file/base/_modules/runtests_helpers.py b/tests/integration/files/file/base/_modules/runtests_helpers.py index 0aa5df742a..876c7b29bd 100644 --- a/tests/integration/files/file/base/_modules/runtests_helpers.py +++ b/tests/integration/files/file/base/_modules/runtests_helpers.py @@ -51,6 +51,7 @@ def get_invalid_docs(): allow_failure = ( 'cmd.win_runas', 'cp.recv', + 'cp.recv_chunked', 'glance.warn_until', 'ipset.long_range', 'libcloud_dns.get_driver', diff --git a/tests/integration/files/file/base/jinja_salt_contains_function.sls b/tests/integration/files/file/base/jinja_salt_contains_function.sls new file mode 100644 index 0000000000..24978d1799 --- /dev/null +++ b/tests/integration/files/file/base/jinja_salt_contains_function.sls @@ -0,0 +1,10 @@ +{% set salt_foo_bar_exist = 'foo.bar' in salt %} +{% set salt_test_ping_exist = 'test.ping' in salt %} + +test-ping-exist: + test.succeed_without_changes: + - name: salt_test_ping_exist_{{ salt_test_ping_exist }} + +foo-bar-not-exist: + test.succeed_without_changes: + - name: salt_foo_bar_exist_{{ salt_foo_bar_exist }} diff --git a/tests/integration/modules/test_gem.py b/tests/integration/modules/test_gem.py index da902c9f66..f0a6d048aa 100644 --- a/tests/integration/modules/test_gem.py +++ b/tests/integration/modules/test_gem.py @@ -19,8 +19,9 @@ from tornado.httpclient import HTTPClient GEM = 'tidy' GEM_VER = '1.1.2' -OLD_GEM = 'thor' -OLD_VERSION = '0.17.0' +OLD_GEM = 'brass' +OLD_VERSION = '1.0.0' +NEW_VERSION = '1.2.1' GEM_LIST = [GEM, OLD_GEM] @@ -129,18 +130,18 @@ class GemModuleTest(ModuleCase): self.run_function('gem.install', [OLD_GEM], version=OLD_VERSION) gem_list = self.run_function('gem.list', [OLD_GEM]) - self.assertEqual({'thor': ['0.17.0']}, gem_list) + self.assertEqual({OLD_GEM: [OLD_VERSION]}, gem_list) self.run_function('gem.update', [OLD_GEM]) gem_list = self.run_function('gem.list', [OLD_GEM]) - self.assertEqual({'thor': ['0.19.4', '0.17.0']}, gem_list) + self.assertEqual({OLD_GEM: [NEW_VERSION, OLD_VERSION]}, gem_list) self.run_function('gem.uninstall', [OLD_GEM]) self.assertFalse(self.run_function('gem.list', [OLD_GEM])) - def test_udpate_system(self): + def test_update_system(self): ''' - gem.udpate_system + gem.update_system ''' ret = self.run_function('gem.update_system') self.assertTrue(ret) diff --git a/tests/integration/modules/test_git.py b/tests/integration/modules/test_git.py index 2720f4dceb..8c229c6df7 100644 --- a/tests/integration/modules/test_git.py +++ b/tests/integration/modules/test_git.py @@ -40,7 +40,7 @@ def _git_version(): git_version = subprocess.Popen( ['git', '--version'], shell=False, - close_fds=False if salt.utils.is_windows else True, + close_fds=False if salt.utils.is_windows() else True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] except OSError: diff --git a/tests/integration/shell/test_auth.py b/tests/integration/shell/test_auth.py index 3c4c7000a2..fc80c7ac05 100644 --- a/tests/integration/shell/test_auth.py +++ b/tests/integration/shell/test_auth.py @@ -6,6 +6,7 @@ # Import python libs from __future__ import absolute_import +import logging import pwd import grp import random @@ -21,6 +22,8 @@ from salt.utils.pycrypto import gen_hash # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin +log = logging.getLogger(__name__) + def gen_password(): ''' @@ -54,7 +57,6 @@ class AuthTest(ShellCase): group = 'saltops' def setUp(self): - # This is a little wasteful but shouldn't be a problem for user in (self.userA, self.userB): try: pwd.getpwnam(user) @@ -68,6 +70,21 @@ class AuthTest(ShellCase): self.run_call('group.add {0}'.format(self.group)) self.run_call('user.chgroups {0} {1} True'.format(self.userB, self.group)) + def tearDown(self): + for user in (self.userA, self.userB): + try: + pwd.getpwnam(user) + except KeyError: + pass + else: + self.run_call('user.delete {0}'.format(user)) + try: + grp.getgrnam(self.group) + except KeyError: + pass + else: + self.run_call('group.delete {0}'.format(self.group)) + def test_pam_auth_valid_user(self): ''' test that pam auth mechanism works with a valid user @@ -85,6 +102,7 @@ class AuthTest(ShellCase): cmd = ('-a pam "*" test.ping ' '--username {0} --password {1}'.format(self.userA, password)) resp = self.run_salt(cmd) + log.debug('resp = %s', resp) self.assertTrue( 'minion:' in resp ) @@ -121,10 +139,3 @@ class AuthTest(ShellCase): self.assertTrue( 'minion:' in resp ) - - def test_zzzz_tearDown(self): - for user in (self.userA, self.userB): - if pwd.getpwnam(user): - self.run_call('user.delete {0}'.format(user)) - if grp.getgrnam(self.group): - self.run_call('group.delete {0}'.format(self.group)) diff --git a/tests/integration/states/test_npm.py b/tests/integration/states/test_npm.py index 6aad2a6094..661b701a4d 100644 --- a/tests/integration/states/test_npm.py +++ b/tests/integration/states/test_npm.py @@ -15,6 +15,10 @@ from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs import salt.utils +import salt.modules.cmdmod as cmd +from salt.utils.versions import LooseVersion + +MAX_NPM_VERSION = '5.0.0' @skipIf(salt.utils.which('npm') is None, 'npm not installed') @@ -38,7 +42,7 @@ class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): ''' Determine if URL-referenced NPM module can be successfully installed. ''' - ret = self.run_state('npm.installed', name='git://github.com/request/request') + ret = self.run_state('npm.installed', name='request/request#v2.81.1') self.assertSaltTrueReturn(ret) ret = self.run_state('npm.removed', name='git://github.com/request/request') self.assertSaltTrueReturn(ret) @@ -53,6 +57,8 @@ class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('npm.installed', name=None, pkgs=['pm2', 'grunt']) self.assertSaltTrueReturn(ret) + @skipIf(salt.utils.which('npm') and LooseVersion(cmd.run('npm -v')) >= LooseVersion(MAX_NPM_VERSION), + 'Skip with npm >= 5.0.0 until #41770 is fixed') @destructiveTest def test_npm_cache_clean(self): ''' diff --git a/tests/integration/states/test_pkg.py b/tests/integration/states/test_pkg.py index c2f112e026..814e3578c8 100644 --- a/tests/integration/states/test_pkg.py +++ b/tests/integration/states/test_pkg.py @@ -673,6 +673,44 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) + @requires_system_grains + def test_pkg_014_installed_missing_release(self, grains=None): # pylint: disable=unused-argument + ''' + Tests that a version number missing the release portion still resolves + as correctly installed. For example, version 2.0.2 instead of 2.0.2-1.el7 + ''' + os_family = grains.get('os_family', '') + + if os_family.lower() != 'redhat': + self.skipTest('Test only runs on RedHat OS family') + + pkg_targets = _PKG_TARGETS.get(os_family, []) + + # Make sure that we have targets that match the os_family. If this + # fails then the _PKG_TARGETS dict above needs to have an entry added, + # with two packages that are not installed before these tests are run + self.assertTrue(pkg_targets) + + target = pkg_targets[0] + version = self.run_function('pkg.version', [target]) + + # If this assert fails, we need to find new targets, this test needs to + # be able to test successful installation of packages, so this package + # needs to not be installed before we run the states below + self.assertFalse(version) + + ret = self.run_state( + 'pkg.installed', + name=target, + version=salt.utils.str_version_to_evr(version)[1], + refresh=False, + ) + self.assertSaltTrueReturn(ret) + + # Clean up + ret = self.run_state('pkg.removed', name=target) + self.assertSaltTrueReturn(ret) + @requires_salt_modules('pkg.group_install') @requires_system_grains def test_group_installed_handle_missing_package_group(self, grains=None): # pylint: disable=unused-argument diff --git a/tests/integration/states/test_renderers.py b/tests/integration/states/test_renderers.py index c7e916e8e3..256aabd230 100644 --- a/tests/integration/states/test_renderers.py +++ b/tests/integration/states/test_renderers.py @@ -21,3 +21,12 @@ class TestJinjaRenderer(ModuleCase): ret = self.run_function('state.sls', ['jinja_dot_notation']) for state_ret in ret.values(): self.assertTrue(state_ret['result']) + + def test_salt_contains_function(self): + ''' + Test if we are able to check if a function exists inside the "salt" + wrapper (AliasLoader) which is available on Jinja templates. + ''' + ret = self.run_function('state.sls', ['jinja_salt_contains_function']) + for state_ret in ret.values(): + self.assertTrue(state_ret['result']) diff --git a/tests/unit/cache/test_localfs.py b/tests/unit/cache/test_localfs.py index 3aa2608a79..e1aa8ecc32 100644 --- a/tests/unit/cache/test_localfs.py +++ b/tests/unit/cache/test_localfs.py @@ -209,26 +209,26 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin): with patch('os.remove', MagicMock(side_effect=OSError)): self.assertRaises(SaltCacheError, localfs.flush, bank='', key='key', cachedir='/var/cache/salt') - # 'ls' function tests: 3 + # 'list' function tests: 3 - def test_ls_no_base_dir(self): + def test_list_no_base_dir(self): ''' Tests that the ls function returns an empty list if the bank directory doesn't exist. ''' with patch('os.path.isdir', MagicMock(return_value=False)): - self.assertEqual(localfs.ls(bank='', cachedir=''), []) + self.assertEqual(localfs.list_(bank='', cachedir=''), []) - def test_ls_error_raised_no_bank_directory_access(self): + def test_list_error_raised_no_bank_directory_access(self): ''' Tests that a SaltCacheError is raised when there is a problem accessing the cache bank directory. ''' with patch('os.path.isdir', MagicMock(return_value=True)): with patch('os.listdir', MagicMock(side_effect=OSError)): - self.assertRaises(SaltCacheError, localfs.ls, bank='', cachedir='') + self.assertRaises(SaltCacheError, localfs.list_, bank='', cachedir='') - def test_ls_success(self): + def test_list_success(self): ''' Tests the return of the ls function containing bank entries. ''' @@ -240,7 +240,7 @@ class LocalFSTest(TestCase, LoaderModuleMockMixin): # Now test the return of the ls function with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}): - self.assertEqual(localfs.ls(bank='bank', cachedir=tmp_dir), ['key']) + self.assertEqual(localfs.list_(bank='bank', cachedir=tmp_dir), ['key']) # 'contains' function tests: 1 diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index 1920448e66..48cce5cf5a 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -9,8 +9,12 @@ import os import shutil import tempfile import textwrap -import pwd import logging +import stat +try: + import pwd +except ImportError: + pass # Import 3rd-party libs import yaml @@ -189,7 +193,6 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): self.integration_base_files = os.path.join(FILES, 'file', 'base') # Create the dir if it doesn't already exist - try: shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/') except OSError: @@ -203,7 +206,12 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): if 'USERNAME' not in os.environ: try: - os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name + import salt.utils + if salt.utils.is_windows(): + import salt.utils.win_functions + os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() + else: + os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name except AttributeError: log.error('Unable to get effective username, falling back to ' '\'root\'.') @@ -219,14 +227,18 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): Remove the temporary git repository and gitfs cache directory to ensure a clean environment for each test. ''' - shutil.rmtree(self.tmp_repo_dir) - shutil.rmtree(self.tmp_cachedir) - shutil.rmtree(self.tmp_sock_dir) + shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error) del self.tmp_repo_dir del self.tmp_cachedir del self.tmp_sock_dir del self.integration_base_files + def _rmtree_error(self, func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + def test_file_list(self): ret = gitfs.file_list(LOAD) self.assertIn('testfile', ret) diff --git a/tests/unit/modules/test_boto_elb.py b/tests/unit/modules/test_boto_elb.py index 5d1ee5a9d1..b577f586a7 100644 --- a/tests/unit/modules/test_boto_elb.py +++ b/tests/unit/modules/test_boto_elb.py @@ -4,6 +4,7 @@ from __future__ import absolute_import import logging from copy import deepcopy +import pkg_resources # import Python Third Party Libs # pylint: disable=import-error @@ -15,28 +16,28 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2, mock_elb + from moto import mock_ec2_deprecated, mock_elb_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): pass return stub_function - def mock_elb(self): + def mock_elb_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_elb_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_elb unit tests to use the @mock_elb_deprecated decorator + without a "NameError: name 'mock_elb_deprecated' is not defined" error. ''' def stub_function(self): pass @@ -45,8 +46,10 @@ except ImportError: # Import Salt Libs import salt.config +import salt.ext.six as six import salt.loader import salt.modules.boto_elb as boto_elb +import salt.utils.versions # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -63,11 +66,32 @@ conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, boto_conn_parameters = {'aws_access_key_id': access_key, 'aws_secret_access_key': secret_key} instance_parameters = {'instance_type': 't1.micro'} +required_moto = '0.3.7' +required_moto_py3 = '1.0.1' + + +def _has_required_moto(): + ''' + Returns True or False depending on if ``moto`` is installed and at the correct version, + depending on what version of Python is running these tests. + ''' + if not HAS_MOTO: + return False + else: + moto_version = salt.utils.versions.LooseVersion(pkg_resources.get_distribution('moto').version) + if moto_version < required_moto: + return False + elif six.PY3 and moto_version < required_moto_py3: + return False + + return True @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(HAS_BOTO is False, 'The boto module must be installed.') @skipIf(HAS_MOTO is False, 'The moto module must be installed.') +@skipIf(_has_required_moto() is False, 'The moto module must be >= to {0} for ' + 'PY2 or {1} for PY3.'.format(required_moto, required_moto_py3)) class BotoElbTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for salt.modules.boto_elb module @@ -90,8 +114,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): # __virtual__ must be caller in order for _get_conn to be injected boto_elb.__virtual__() - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_register_instances_valid_id_result_true(self): ''' tests that given a valid instance id and valid ELB that @@ -109,8 +133,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(True, register_result) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_register_instances_valid_id_string(self): ''' tests that given a string containing a instance id and valid ELB that @@ -132,8 +156,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): log.debug(load_balancer_refreshed.instances) self.assertEqual([reservations.instances[0].id], registered_instance_ids) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_result_true(self): ''' tests that given an valid id the boto_elb deregister_instances method @@ -153,8 +177,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(True, deregister_result) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_string(self): ''' tests that given an valid id the boto_elb deregister_instances method @@ -179,8 +203,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): load_balancer_refreshed.instances] self.assertEqual(actual_instances, expected_instances) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_list(self): ''' tests that given an valid ids in the form of a list that the boto_elb diff --git a/tests/unit/modules/test_boto_secgroup.py b/tests/unit/modules/test_boto_secgroup.py index 7a1d9c0197..ef90958dec 100644 --- a/tests/unit/modules/test_boto_secgroup.py +++ b/tests/unit/modules/test_boto_secgroup.py @@ -29,17 +29,17 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_secgroup unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_secgroup unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): pass @@ -117,7 +117,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): {'to_port': 80, 'from_port': 80, 'ip_protocol': u'tcp', 'cidr_ip': u'0.0.0.0/0'}] self.assertEqual(boto_secgroup._split_rules(rules), split_rules) - @mock_ec2 + @mock_ec2_deprecated def test_create_ec2_classic(self): ''' Test of creation of an EC2-Classic security group. The test ensures @@ -137,7 +137,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_created_group[0].vpc_id] self.assertEqual(expected_create_result, secgroup_create_result) - @mock_ec2 + @mock_ec2_deprecated def test_create_ec2_vpc(self): ''' test of creation of an EC2-VPC security group. The test ensures that a @@ -155,7 +155,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_create_result = [secgroup_created_group[0].name, secgroup_created_group[0].description, secgroup_created_group[0].vpc_id] self.assertEqual(expected_create_result, secgroup_create_result) - @mock_ec2 + @mock_ec2_deprecated def test_get_group_id_ec2_classic(self): ''' tests that given a name of a group in EC2-Classic that the correct @@ -177,7 +177,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): @skipIf(True, 'test skipped because moto does not yet support group' ' filters https://github.com/spulec/moto/issues/154') - @mock_ec2 + @mock_ec2_deprecated def test_get_group_id_ec2_vpc(self): ''' tests that given a name of a group in EC2-VPC that the correct @@ -197,7 +197,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(group_vpc.id, retrieved_group_id) - @mock_ec2 + @mock_ec2_deprecated def test_get_config_single_rule_group_name(self): ''' tests return of 'config' when given group name. get_config returns an OrderedDict. @@ -213,7 +213,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): group = conn.create_security_group(name=group_name, description=group_name) group.authorize(ip_protocol=ip_protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) # setup the expected get_config result - expected_get_config_result = OrderedDict([('name', group.name), ('group_id', group.id), ('owner_id', u'111122223333'), + expected_get_config_result = OrderedDict([('name', group.name), ('group_id', group.id), ('owner_id', u'123456789012'), ('description', group.description), ('tags', {}), ('rules', [{'to_port': to_port, 'from_port': from_port, 'ip_protocol': ip_protocol, 'cidr_ip': cidr_ip}]), @@ -221,7 +221,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_get_config_result = boto_secgroup.get_config(group_id=group.id, **conn_parameters) self.assertEqual(expected_get_config_result, secgroup_get_config_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_name_classic(self): ''' tests 'true' existence of a group in EC2-Classic when given name @@ -234,11 +234,11 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(name=group_name, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_name_classic(self): pass - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_name_vpc(self): ''' tests 'true' existence of a group in EC2-VPC when given name and vpc_id @@ -250,7 +250,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(name=group_name, vpc_id=vpc_id, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_name_vpc(self): ''' tests 'false' existence of a group in vpc when given name and vpc_id @@ -259,7 +259,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_name, vpc_id=vpc_id, **conn_parameters) self.assertFalse(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_group_id(self): ''' tests 'true' existence of a group when given group_id @@ -271,7 +271,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_id=group.id, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_group_id(self): ''' tests 'false' existence of a group when given group_id @@ -280,7 +280,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_id=group_id, **conn_parameters) self.assertFalse(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_delete_group_ec2_classic(self): ''' test deletion of a group in EC2-Classic. Test does the following: @@ -306,11 +306,11 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): actual_groups = [group.id for group in conn.get_all_security_groups()] self.assertEqual(expected_groups, actual_groups) - @mock_ec2 + @mock_ec2_deprecated def test_delete_group_name_ec2_vpc(self): pass - @mock_ec2 + @mock_ec2_deprecated def test__get_conn_true(self): ''' tests ensures that _get_conn returns an boto.ec2.connection.EC2Connection object. diff --git a/tests/unit/modules/test_boto_vpc.py b/tests/unit/modules/test_boto_vpc.py index 2f53edf5e5..e5b337c2d1 100644 --- a/tests/unit/modules/test_boto_vpc.py +++ b/tests/unit/modules/test_boto_vpc.py @@ -40,17 +40,17 @@ except ImportError: try: import moto - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_vpc unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): @@ -63,7 +63,7 @@ except ImportError: # which was added in boto 2.8.0 # https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12 required_boto_version = '2.8.0' -required_moto_version = '0.3.7' +required_moto_version = '1.0.0' region = 'us-east-1' access_key = 'GKTADJGHEIQSXMKKRBJ08H' @@ -272,7 +272,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): TestCase for salt.modules.boto_vpc module ''' - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via id when the vpc already exists @@ -283,7 +283,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -295,7 +295,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via name when vpc exists @@ -306,7 +306,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -318,7 +318,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via tag when vpc exists @@ -329,7 +329,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -341,7 +341,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via cidr when vpc exists @@ -352,7 +352,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -364,7 +364,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_vpc_exists_but_providing_no_filters_the_vpc_exists_method_raises_a_salt_invocation_error(self): ''' @@ -375,7 +375,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): 'cidr or tags.'): boto_vpc.exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_name(self): ''' Tests getting vpc id when filtering by name @@ -386,7 +386,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_name(self): ''' Tests getting vpc id when filtering by invalid name @@ -397,7 +397,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_cidr(self): ''' Tests getting vpc id when filtering by cidr @@ -408,7 +408,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_cidr(self): ''' Tests getting vpc id when filtering by invalid cidr @@ -419,7 +419,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_tags(self): ''' Tests getting vpc id when filtering by tags @@ -430,7 +430,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_tags(self): ''' Tests getting vpc id when filtering by invalid tags @@ -441,7 +441,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_get_vpc_id_method_when_not_providing_filters_raises_a_salt_invocation_error(self): ''' @@ -450,7 +450,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): with self.assertRaisesRegex(SaltInvocationError, 'At least one of the following must be provided: vpc_id, vpc_name, cidr or tags.'): boto_vpc.get_id(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_more_than_one_vpc_is_matched_raises_a_salt_command_execution_error(self): ''' Tests getting vpc id but providing no filters @@ -461,7 +461,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): with self.assertRaisesRegex(CommandExecutionError, 'Found more than one VPC matching the criteria.'): boto_vpc.get_id(cidr=u'10.0.0.0/24', **conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -470,7 +470,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_and_specifying_a_vpc_name_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -479,7 +479,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_and_specifying_tags_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -488,7 +488,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_a_vpc_fails_the_create_vpc_method_returns_false(self): ''' @@ -499,7 +499,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_creation_result['created']) self.assertTrue('error' in vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_vpc_the_delete_vpc_method_returns_true(self): ''' Tests deleting an existing vpc @@ -510,7 +510,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_vpc_the_delete_vpc_method_returns_false(self): ''' Tests deleting a non-existent vpc @@ -519,7 +519,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(delete_vpc_result['deleted']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_true(self): ''' Tests describing parameters via vpc id if vpc exist @@ -546,7 +546,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_vpc, {'vpc': vpc_properties}) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_false(self): ''' Tests describing parameters via vpc id if vpc does not exist @@ -557,7 +557,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(describe_vpc['vpc']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_describing_vpc_by_id_on_connection_error_it_returns_error(self): ''' @@ -570,7 +570,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): describe_result = boto_vpc.describe(vpc_id=vpc.id, **conn_parameters) self.assertTrue('error' in describe_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_but_providing_no_vpc_id_the_describe_method_raises_a_salt_invocation_error(self): ''' Tests describing vpc without vpc id @@ -588,7 +588,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): .format(required_boto_version, _get_boto_version())) @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_single_subnet(self): ''' tests that given multiple subnet ids in the same VPC that the VPC ID is @@ -601,7 +601,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(vpc.id, subnet_association['vpc_id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_multiple_subnets_same_vpc(self): ''' tests that given multiple subnet ids in the same VPC that the VPC ID is @@ -614,7 +614,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(vpc.id, subnet_association['vpc_id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_multiple_subnets_different_vpc(self): ''' tests that given multiple subnet ids in different VPCs that False is @@ -628,7 +628,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(set(subnet_association['vpc_ids']), set([vpc_a.id, vpc_b.id])) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully @@ -640,7 +640,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) self.assertTrue('id' in subnet_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_and_specifying_a_name_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully when specifying a name @@ -651,7 +651,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_and_specifying_tags_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully when specifying a tag @@ -663,7 +663,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_a_subnet_fails_the_create_subnet_method_returns_error(self): ''' @@ -675,7 +675,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', **conn_parameters) self.assertTrue('error' in subnet_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_subnet_the_delete_subnet_method_returns_true(self): ''' Tests deleting an existing subnet @@ -687,7 +687,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_deletion_result['deleted']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_subnet_the_delete_vpc_method_returns_false(self): ''' Tests deleting a subnet that doesn't exist @@ -695,7 +695,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): delete_subnet_result = boto_vpc.delete_subnet(subnet_id='1234', **conn_parameters) self.assertTrue('error' in delete_subnet_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self): ''' Tests checking if a subnet exists when it does exist @@ -707,7 +707,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self): ''' Tests checking if a subnet exists which doesn't exist @@ -716,7 +716,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_exists_method_returns_true(self): ''' Tests checking subnet existence by name @@ -728,7 +728,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_does_not_exist_the_subnet_method_returns_false(self): ''' Tests checking subnet existence by name when it doesn't exist @@ -740,7 +740,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_exists_method_returns_true(self): ''' Tests checking subnet existence by tag @@ -752,7 +752,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_does_not_exist_the_subnet_method_returns_false(self): ''' Tests checking subnet existence by tag when subnet doesn't exist @@ -764,7 +764,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_subnet_exists_but_providing_no_filters_the_subnet_exists_method_raises_a_salt_invocation_error(self): ''' @@ -776,7 +776,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): boto_vpc.subnet_exists(**conn_parameters) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_id_for_existing_subnet_returns_correct_data(self): ''' Tests describing a subnet by id. @@ -791,7 +791,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnet'].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_id_for_non_existent_subnet_returns_none(self): ''' Tests describing a non-existent subnet by id. @@ -805,7 +805,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_subnet_results['subnet'], None) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_name_for_existing_subnet_returns_correct_data(self): ''' Tests describing a subnet by name. @@ -820,7 +820,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnet'].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_name_for_non_existent_subnet_returns_none(self): ''' Tests describing a non-existent subnet by id. @@ -834,7 +834,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_subnet_results['subnet'], None) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnets_by_id_for_existing_subnet_returns_correct_data(self): ''' Tests describing multiple subnets by id. @@ -852,7 +852,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): set(['id', 'cidr_block', 'availability_zone', 'tags'])) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnets_by_name_for_existing_subnets_returns_correct_data(self): ''' Tests describing multiple subnets by id. @@ -869,7 +869,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnets'][0].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_create_subnet_passes_availability_zone(self): ''' Tests that the availability_zone kwarg is passed on to _create_resource @@ -890,7 +890,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway successfully (with no vpc id or name) @@ -901,7 +901,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): keyid=access_key) self.assertTrue(igw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_non_existent_vpc_the_create_internet_gateway_method_returns_an_error(self): ''' Tests that creating an internet gateway for a non-existent VPC fails. @@ -913,7 +913,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): vpc_name='non-existent-vpc') self.assertTrue('error' in igw_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_vpc_name_specified_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway with vpc name specified. @@ -928,7 +928,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(igw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_vpc_id_specified_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway with vpc name specified. @@ -951,7 +951,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_the_create_nat_gateway_method_returns_true(self): ''' Tests creating an nat gateway successfully (with subnet_id specified) @@ -965,7 +965,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): keyid=access_key) self.assertTrue(ngw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_with_non_existent_subnet_the_create_nat_gateway_method_returns_an_error(self): ''' Tests that creating an nat gateway for a non-existent subnet fails. @@ -977,7 +977,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): subnet_name='non-existent-subnet') self.assertTrue('error' in ngw_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_with_subnet_name_specified_the_create_nat_gateway_method_returns_true(self): ''' Tests creating an nat gateway with subnet name specified. @@ -1000,7 +1000,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_customer_gateway_the_create_customer_gateway_method_returns_true(self): ''' @@ -1010,7 +1010,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): gw_creation_result = boto_vpc.create_customer_gateway('ipsec.1', '10.1.1.1', None) self.assertTrue(gw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self): ''' @@ -1021,7 +1021,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): gw_exists_result = boto_vpc.customer_gateway_exists(customer_gateway_id=gw_creation_result['id']) self.assertTrue(gw_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self): ''' @@ -1039,7 +1039,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): .format(required_boto_version, _get_boto_version())) @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_succeeds_the_create_dhcp_options_method_returns_true(self): ''' Tests creating dhcp options successfully @@ -1048,7 +1048,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_dhcp_options_and_specifying_a_name_succeeds_the_create_dhcp_options_method_returns_true( self): @@ -1060,7 +1060,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_and_specifying_tags_succeeds_the_create_dhcp_options_method_returns_true( self): ''' @@ -1071,7 +1071,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_dhcp_options_fails_the_create_dhcp_options_method_returns_error(self): ''' @@ -1082,7 +1082,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = dhcp_options_creation_result = boto_vpc.create_dhcp_options(**dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_an_existing_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_true( self): ''' @@ -1096,7 +1096,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_association_result['associated']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_a_non_existent_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_error( self): ''' @@ -1108,7 +1108,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in dhcp_options_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_an_existing_dhcp_options_set_to_a_non_existent_vpc_the_associate_dhcp_options_method_returns_false( self): ''' @@ -1121,7 +1121,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in dhcp_options_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_set_to_an_existing_vpc_succeeds_the_associate_new_dhcp_options_method_returns_true( self): ''' @@ -1133,7 +1133,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_creating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception( self): @@ -1147,7 +1147,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_associating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception(self): ''' @@ -1160,7 +1160,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_set_to_a_non_existent_vpc_the_dhcp_options_the_associate_new_dhcp_options_method_returns_false( self): ''' @@ -1170,7 +1170,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.create_dhcp_options(vpc_name='fake', **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_dhcp_options_exists_the_dhcp_options_exists_method_returns_true(self): ''' Tests existence of dhcp options successfully @@ -1181,7 +1181,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_dhcp_options_do_not_exist_the_dhcp_options_exists_method_returns_false(self): ''' Tests existence of dhcp options failure @@ -1189,7 +1189,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.dhcp_options_exists('fake', **conn_parameters) self.assertFalse(r['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_dhcp_options_exists_but_providing_no_filters_the_dhcp_options_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1206,7 +1206,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_the_create_network_acl_method_returns_true(self): ''' Tests creation of network acl with existing vpc @@ -1217,7 +1217,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_a_name_the_create_network_acl_method_returns_true( self): ''' @@ -1229,7 +1229,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_tags_the_create_network_acl_method_returns_true( self): ''' @@ -1241,7 +1241,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_a_non_existent_vpc_the_create_network_acl_method_returns_an_error(self): ''' Tests creation of network acl with a non-existent vpc @@ -1250,7 +1250,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_network_acl_fails_the_create_network_acl_method_returns_false(self): ''' @@ -1264,7 +1264,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_network_acl_the_delete_network_acl_method_returns_true(self): ''' Tests deletion of existing network acl successfully @@ -1276,7 +1276,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_network_acl_the_delete_network_acl_method_returns_an_error(self): ''' Tests deleting a non-existent network acl @@ -1285,7 +1285,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_network_acl_exists_the_network_acl_exists_method_returns_true(self): ''' Tests existence of network acl @@ -1297,7 +1297,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_network_acl_does_not_exist_the_network_acl_exists_method_returns_false(self): ''' Tests checking network acl does not exist @@ -1306,7 +1306,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_deletion_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_network_acl_exists_but_providing_no_filters_the_network_acl_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1318,7 +1318,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ): boto_vpc.dhcp_options_exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_network_acl_entry_successfully_the_create_network_acl_entry_method_returns_true(self): ''' @@ -1333,7 +1333,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_network_acl_entry_for_a_non_existent_network_acl_the_create_network_acl_entry_method_returns_false( self): @@ -1345,7 +1345,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_network_acl_entry_successfully_the_replace_network_acl_entry_method_returns_true( self): @@ -1362,7 +1362,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_network_acl_entry_for_a_non_existent_network_acl_the_replace_network_acl_entry_method_returns_false( self): @@ -1373,7 +1373,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertFalse(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_an_existing_network_acl_entry_the_delete_network_acl_entry_method_returns_true(self): ''' @@ -1388,7 +1388,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_non_existent_network_acl_entry_the_delete_network_acl_entry_method_returns_false( self): @@ -1400,7 +1400,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_entry_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_an_existing_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_true( self): @@ -1416,7 +1416,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_a_non_existent_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_an_error( self): ''' @@ -1430,7 +1430,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_an_existing_network_acl_to_a_non_existent_subnet_the_associate_network_acl_method_returns_false( self): @@ -1445,7 +1445,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1460,7 +1460,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_a_name_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1476,7 +1476,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_tags_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1493,7 +1493,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_non_existent_subnet_the_associate_new_network_acl_to_subnet_method_returns_false( self): @@ -1507,7 +1507,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_network_acl_to_a_non_existent_vpc_the_associate_new_network_acl_to_subnet_method_returns_an_error( self): ''' @@ -1520,7 +1520,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_succeeds_the_disassociate_network_acl_method_should_return_true(self): ''' @@ -1533,7 +1533,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_for_a_non_existent_vpc_the_disassociate_network_acl_method_should_return_false( self): @@ -1547,7 +1547,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_for_a_non_existent_subnet_the_disassociate_network_acl_method_should_return_false( self): @@ -1568,7 +1568,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_table_succeeds_the_create_route_table_method_returns_true(self): ''' @@ -1580,7 +1580,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_table_on_a_non_existent_vpc_the_create_route_table_method_returns_false(self): ''' @@ -1590,7 +1590,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_table_succeeds_the_delete_route_table_method_returns_true(self): ''' @@ -1603,7 +1603,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_non_existent_route_table_the_delete_route_table_method_returns_false(self): ''' @@ -1613,7 +1613,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_table_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_route_table_exists_the_route_table_exists_method_returns_true(self): ''' @@ -1626,7 +1626,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_existence_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_route_table_does_not_exist_the_route_table_exists_method_returns_false(self): ''' @@ -1636,7 +1636,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_table_existence_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_route_table_exists_but_providing_no_filters_the_route_table_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1648,7 +1648,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ): boto_vpc.dhcp_options_exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_succeeds_the_associate_route_table_method_should_return_the_association_id( self): @@ -1663,7 +1663,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_with_a_non_existent_route_table_the_associate_route_table_method_should_return_false( self): @@ -1677,7 +1677,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_with_a_non_existent_subnet_the_associate_route_table_method_should_return_false( self): @@ -1691,7 +1691,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_a_route_table_succeeds_the_disassociate_route_table_method_should_return_true( self): @@ -1708,7 +1708,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_succeeds_the_create_route_method_should_return_true(self): ''' @@ -1721,7 +1721,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_with_a_non_existent_route_table_the_create_route_method_should_return_false( self): @@ -1732,7 +1732,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_succeeds_the_delete_route_method_should_return_true(self): ''' @@ -1745,7 +1745,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_with_a_non_existent_route_table_the_delete_route_method_should_return_false( self): @@ -1756,7 +1756,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_route_succeeds_the_replace_route_method_should_return_true(self): ''' @@ -1769,7 +1769,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_replacing_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_route_with_a_non_existent_route_table_the_replace_route_method_should_return_false( self): @@ -1790,7 +1790,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcPeeringConnectionsTest(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_request_vpc_peering_connection(self): ''' Run with 2 vpc ids and returns a message @@ -1803,7 +1803,7 @@ class BotoVpcPeeringConnectionsTest(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): peer_vpc_id=other_vpc.id, **conn_parameters)) - @mock_ec2 + @mock_ec2_deprecated def test_raises_error_if_both_vpc_name_and_vpc_id_are_specified(self): ''' Must specify only one diff --git a/tests/unit/modules/test_chef.py b/tests/unit/modules/test_chef.py index 0899ce3a47..03a892a241 100644 --- a/tests/unit/modules/test_chef.py +++ b/tests/unit/modules/test_chef.py @@ -36,7 +36,8 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it execute a chef client run and return a dict ''' - self.assertDictEqual(chef.client(), {}) + with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}): + self.assertDictEqual(chef.client(), {}) # 'solo' function tests: 1 @@ -44,4 +45,5 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it execute a chef solo run and return a dict ''' - self.assertDictEqual(chef.solo('/dev/sda1'), {}) + with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}): + self.assertDictEqual(chef.solo('/dev/sda1'), {}) diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py index ac57e059d7..2d5ae79472 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py @@ -263,6 +263,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.utils.fopen', mock_open(read_data=MOCK_SHELL_FILE)): self.assertFalse(cmdmod._is_valid_shell('foo')) + @skipIf(salt.utils.is_windows(), 'Do not run on Windows') def test_os_environment_remains_intact(self): ''' Make sure the OS environment is not tainted after running a command diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 2800bc92f2..3d0328c180 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -136,10 +136,11 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(docker_mod.__salt__, {'mine.send': mine_send, 'container_resource.run': MagicMock(), - 'cp.cache_file': MagicMock(return_value=False), - 'docker.get_client_args': client_args_mock}): - with patch.object(docker_mod, '_get_client', client): - command('container', *args) + 'cp.cache_file': MagicMock(return_value=False)}): + with patch.dict(docker_mod.__utils__, + {'docker.get_client_args': client_args_mock}): + with patch.object(docker_mod, '_get_client', client): + command('container', *args) mine_send.assert_called_with('docker.ps', verbose=True, all=True, host=True) @@ -678,9 +679,9 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret) def test_images_with_empty_tags(self): - """ + ''' docker 1.12 reports also images without tags with `null`. - """ + ''' client = Mock() client.api_version = '1.24' client.images = Mock( @@ -696,3 +697,51 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): result = docker_mod.images() self.assertEqual(result, {'sha256:abcdefg': {'RepoTags': ['image:latest']}}) + + def test_compare_container_image_id_resolution(self): + ''' + Test comparing two containers when one's inspect output is an ID and + not formatted in image:tag notation. + ''' + def _inspect_container_effect(id_): + return { + 'container1': {'Config': {'Image': 'realimage:latest'}, + 'HostConfig': {}}, + 'container2': {'Config': {'Image': 'image_id'}, + 'HostConfig': {}}, + }[id_] + + def _inspect_image_effect(id_): + return { + 'realimage:latest': {'Id': 'image_id'}, + 'image_id': {'Id': 'image_id'}, + }[id_] + + inspect_container_mock = MagicMock(side_effect=_inspect_container_effect) + inspect_image_mock = MagicMock(side_effect=_inspect_image_effect) + + with patch.object(docker_mod, 'inspect_container', inspect_container_mock): + with patch.object(docker_mod, 'inspect_image', inspect_image_mock): + ret = docker_mod.compare_container('container1', 'container2') + self.assertEqual(ret, {}) + + def test_resolve_tag(self): + ''' + Test the resolve_tag function + ''' + with_prefix = 'docker.io/foo:latest' + no_prefix = 'bar:latest' + with patch.object(docker_mod, + 'list_tags', + MagicMock(return_value=[with_prefix])): + self.assertEqual(docker_mod.resolve_tag('foo'), with_prefix) + self.assertEqual(docker_mod.resolve_tag('foo:latest'), with_prefix) + self.assertEqual(docker_mod.resolve_tag(with_prefix), with_prefix) + self.assertEqual(docker_mod.resolve_tag('foo:bar'), False) + + with patch.object(docker_mod, + 'list_tags', + MagicMock(return_value=[no_prefix])): + self.assertEqual(docker_mod.resolve_tag('bar'), no_prefix) + self.assertEqual(docker_mod.resolve_tag(no_prefix), no_prefix) + self.assertEqual(docker_mod.resolve_tag('bar:baz'), False) diff --git a/tests/unit/modules/test_genesis.py b/tests/unit/modules/test_genesis.py index af135a0409..04649ae3b5 100644 --- a/tests/unit/modules/test_genesis.py +++ b/tests/unit/modules/test_genesis.py @@ -46,12 +46,57 @@ class GenesisTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(genesis.__salt__, {'disk.blkid': MagicMock(return_value={})}): self.assertEqual(genesis.bootstrap('rpm', 'root', 'dir'), None) - with patch.object(genesis, '_bootstrap_deb', return_value='A'): + common_parms = {'platform': 'deb', + 'root': 'root', + 'img_format': 'dir', + 'arch': 'amd64', + 'flavor': 'stable', + 'static_qemu': 'qemu'} + + param_sets = [ + + {'params': {}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, + + {'params': {'pkgs': 'vim'}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, + + {'params': {'pkgs': 'vim,emacs'}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, + + {'params': {'pkgs': ['vim', 'emacs']}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, + + {'params': {'pkgs': ['vim', 'emacs'], 'exclude_pkgs': ['vim', 'foo']}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', '--exclude', 'vim,foo', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, + + ] + + for param_set in param_sets: + with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), 'file.rmdir': MagicMock(), - 'file.directory_exists': MagicMock()}): - with patch.dict(genesis.__salt__, {'disk.blkid': MagicMock(return_value={})}): - self.assertEqual(genesis.bootstrap('deb', 'root', 'dir'), None) + 'file.directory_exists': MagicMock(), + 'cmd.run': MagicMock(), + 'disk.blkid': MagicMock(return_value={})}): + with patch('salt.modules.genesis.salt.utils.which', return_value=True): + param_set['params'].update(common_parms) + self.assertEqual(genesis.bootstrap(**param_set['params']), None) + genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False) with patch.object(genesis, '_bootstrap_pacman', return_value='A') as pacman_patch: with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), diff --git a/tests/unit/modules/test_groupadd.py b/tests/unit/modules/test_groupadd.py index b836bd8805..29dfd15ed7 100644 --- a/tests/unit/modules/test_groupadd.py +++ b/tests/unit/modules/test_groupadd.py @@ -5,7 +5,10 @@ # Import Python libs from __future__ import absolute_import -import grp +try: + import grp +except ImportError: + pass # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -13,10 +16,12 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON # Import Salt Libs +import salt.utils import salt.modules.groupadd as groupadd @skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(salt.utils.is_windows(), "Module not available on Windows") class GroupAddTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for salt.modules.groupadd diff --git a/tests/unit/modules/test_inspect_collector.py b/tests/unit/modules/test_inspect_collector.py index cdcb689eb7..0d37519a9e 100644 --- a/tests/unit/modules/test_inspect_collector.py +++ b/tests/unit/modules/test_inspect_collector.py @@ -49,9 +49,15 @@ class InspectorCollectorTestCase(TestCase): :return: ''' - inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid') - self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db') - self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid') + cachedir = os.sep + os.sep.join(['foo', 'cache']) + piddir = os.sep + os.sep.join(['foo', 'pid']) + inspector = Inspector(cachedir=cachedir, piddir=piddir, pidfilename='bar.pid') + self.assertEqual( + inspector.dbfile, + os.sep + os.sep.join(['foo', 'cache', '_minion_collector.db'])) + self.assertEqual( + inspector.pidfile, + os.sep + os.sep.join(['foo', 'pid', 'bar.pid'])) def test_file_tree(self): ''' @@ -60,12 +66,29 @@ class InspectorCollectorTestCase(TestCase): :return: ''' - inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid') + inspector = Inspector(cachedir=os.sep + 'test', + piddir=os.sep + 'test', + pidfilename='bar.pid') tree_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'inspectlib', 'tree_test') - expected_tree = (['/a/a/dummy.a', '/a/b/dummy.b', '/b/b.1', '/b/b.2', '/b/b.3'], - ['/a', '/a/a', '/a/b', '/a/c', '/b', '/c'], - ['/a/a/dummy.ln.a', '/a/b/dummy.ln.b', '/a/c/b.1', '/b/b.4', - '/b/b.5', '/c/b.1', '/c/b.2', '/c/b.3']) + expected_tree = ([os.sep + os.sep.join(['a', 'a', 'dummy.a']), + os.sep + os.sep.join(['a', 'b', 'dummy.b']), + os.sep + os.sep.join(['b', 'b.1']), + os.sep + os.sep.join(['b', 'b.2']), + os.sep + os.sep.join(['b', 'b.3'])], + [os.sep + 'a', + os.sep + os.sep.join(['a', 'a']), + os.sep + os.sep.join(['a', 'b']), + os.sep + os.sep.join(['a', 'c']), + os.sep + 'b', + os.sep + 'c'], + [os.sep + os.sep.join(['a', 'a', 'dummy.ln.a']), + os.sep + os.sep.join(['a', 'b', 'dummy.ln.b']), + os.sep + os.sep.join(['a', 'c', 'b.1']), + os.sep + os.sep.join(['b', 'b.4']), + os.sep + os.sep.join(['b', 'b.5']), + os.sep + os.sep.join(['c', 'b.1']), + os.sep + os.sep.join(['c', 'b.2']), + os.sep + os.sep.join(['c', 'b.3'])]) tree_result = [] for chunk in inspector._get_all_files(tree_root): buff = [] diff --git a/tests/unit/modules/test_iptables.py b/tests/unit/modules/test_iptables.py index 1c4f34118f..6fe9e91285 100644 --- a/tests/unit/modules/test_iptables.py +++ b/tests/unit/modules/test_iptables.py @@ -60,6 +60,9 @@ class IptablesTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(iptables.build_rule(**{'if': 'not eth0'}), '! -i eth0') + self.assertEqual(iptables.build_rule(**{'proto': 'tcp', 'syn': '!'}), + '-p tcp ! --syn') + self.assertEqual(iptables.build_rule(dports=[80, 443], proto='tcp'), '-p tcp -m multiport --dports 80,443') diff --git a/tests/unit/modules/test_junos.py b/tests/unit/modules/test_junos.py index b40793e71e..34176d9d71 100644 --- a/tests/unit/modules/test_junos.py +++ b/tests/unit/modules/test_junos.py @@ -52,6 +52,7 @@ class Test_Junos_Module(TestCase, LoaderModuleMockMixin, XMLEqualityMixin): host='1.1.1.1', user='test', password='test123', + fact_style='old', gather_facts=False) self.dev.open() self.dev.timeout = 30 diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py new file mode 100644 index 0000000000..1de939f6b0 --- /dev/null +++ b/tests/unit/modules/test_kubernetes.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Jochen Breuer ` +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + Mock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +try: + from salt.modules import kubernetes +except ImportError: + kubernetes = False + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(kubernetes is False, "Probably Kubernetes client lib is not installed. \ + Skipping test_kubernetes.py") +class KubernetesTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.kubernetes + ''' + + def setup_loader_modules(self): + return { + kubernetes: { + '__salt__': {}, + } + } + + def test_nodes(self): + ''' + Test node listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_node.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_node_name'}}]}} + ) + self.assertEqual(kubernetes.nodes(), ['mock_node_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_node().to_dict.called) + + def test_deployments(self): + ''' + Tests deployment listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"list_namespaced_deployment.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_deployment_name'}}]}} + ) + self.assertEqual(kubernetes.deployments(), ['mock_deployment_name']) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api().list_namespaced_deployment().to_dict.called) + + def test_services(self): + ''' + Tests services listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_namespaced_service.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_service_name'}}]}} + ) + self.assertEqual(kubernetes.services(), ['mock_service_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_namespaced_service().to_dict.called) + + def test_pods(self): + ''' + Tests pods listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_namespaced_pod.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_pod_name'}}]}} + ) + self.assertEqual(kubernetes.pods(), ['mock_pod_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api(). + list_namespaced_pod().to_dict.called) + + def test_delete_deployments(self): + ''' + Tests deployment creation. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"delete_namespaced_deployment.return_value.to_dict.return_value": {}} + ) + self.assertEqual(kubernetes.delete_deployment("test"), {}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + delete_namespaced_deployment().to_dict.called) + + def test_create_deployments(self): + ''' + Tests deployment creation. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"create_namespaced_deployment.return_value.to_dict.return_value": {}} + ) + self.assertEqual(kubernetes.create_deployment("test", "default", {}, {}, + None, None, None), {}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + create_namespaced_deployment().to_dict.called) diff --git a/tests/unit/modules/test_mac_group.py b/tests/unit/modules/test_mac_group.py index 2c03deb357..d69288ccb9 100644 --- a/tests/unit/modules/test_mac_group.py +++ b/tests/unit/modules/test_mac_group.py @@ -5,11 +5,15 @@ # Import python libs from __future__ import absolute_import -import grp +HAS_GRP = True +try: + import grp +except ImportError: + HAS_GRP = False # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch # Import Salt Libs @@ -17,6 +21,7 @@ import salt.modules.mac_group as mac_group from salt.exceptions import SaltInvocationError, CommandExecutionError +@skipIf(not HAS_GRP, "Missing required library 'grp'") class MacGroupTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for the salt.modules.mac_group module diff --git a/tests/unit/modules/test_mac_user.py b/tests/unit/modules/test_mac_user.py index 51402e6cd0..c639f022da 100644 --- a/tests/unit/modules/test_mac_user.py +++ b/tests/unit/modules/test_mac_user.py @@ -2,10 +2,13 @@ ''' :codeauthor: :email:`Nicole Thomas ` ''' - # Import python libs from __future__ import absolute_import -import pwd +HAS_PWD = True +try: + import pwd +except ImportError: + HAS_PWD = False # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -17,6 +20,7 @@ import salt.modules.mac_user as mac_user from salt.exceptions import SaltInvocationError, CommandExecutionError +@skipIf(not HAS_PWD, "Missing required library 'pwd'") @skipIf(NO_MOCK, NO_MOCK_REASON) class MacUserTestCase(TestCase, LoaderModuleMockMixin): ''' @@ -26,14 +30,15 @@ class MacUserTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {mac_user: {}} - mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon', - '/var/virusmails', '/usr/bin/false')), - pwd.struct_passwd(('_appleevents', '*', 55, 55, - 'AppleEvents Daemon', - '/var/empty', '/usr/bin/false')), - pwd.struct_passwd(('_appowner', '*', 87, 87, - 'Application Owner', - '/var/empty', '/usr/bin/false'))] + if HAS_PWD: + mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon', + '/var/virusmails', '/usr/bin/false')), + pwd.struct_passwd(('_appleevents', '*', 55, 55, + 'AppleEvents Daemon', + '/var/empty', '/usr/bin/false')), + pwd.struct_passwd(('_appowner', '*', 87, 87, + 'Application Owner', + '/var/empty', '/usr/bin/false'))] mock_info_ret = {'shell': '/bin/bash', 'name': 'test', 'gid': 4376, 'groups': ['TEST_GROUP'], 'home': '/Users/foo', 'fullname': 'TEST USER', 'uid': 4376} diff --git a/tests/unit/modules/test_pip.py b/tests/unit/modules/test_pip.py index ec4b379928..8a5229b0e6 100644 --- a/tests/unit/modules/test_pip.py +++ b/tests/unit/modules/test_pip.py @@ -10,6 +10,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs +import salt.utils import salt.modules.pip as pip from salt.exceptions import CommandExecutionError @@ -289,17 +290,23 @@ class PipTestCase(TestCase, LoaderModuleMockMixin): mock_path.isdir.return_value = True pkg = 'mock' - venv_path = '/test_env' def join(*args): - return '/'.join(args) + return os.sep.join(args) + mock_path.join = join mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(pip.__salt__, {'cmd.run_all': mock}): + if salt.utils.is_windows(): + venv_path = 'c:\\test_env' + bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe').encode('string-escape') + else: + venv_path = '/test_env' + bin_path = os.path.join(venv_path, 'bin', 'pip') pip.install(pkg, bin_env=venv_path) mock.assert_called_once_with( - [os.path.join(venv_path, 'bin', 'pip'), 'install', pkg], - env={'VIRTUAL_ENV': '/test_env'}, + [bin_path, 'install', pkg], + env={'VIRTUAL_ENV': venv_path}, saltenv='base', runas=None, use_vt=False, diff --git a/tests/unit/modules/test_reg_win.py b/tests/unit/modules/test_reg_win.py index 960c00e5d0..e83d89b127 100644 --- a/tests/unit/modules/test_reg_win.py +++ b/tests/unit/modules/test_reg_win.py @@ -27,7 +27,7 @@ except ImportError: PY2 = sys.version_info[0] == 2 # The following used to make sure we are not # testing already existing data -# Note strftime retunrns a str, so we need to make it unicode +# Note strftime returns a str, so we need to make it unicode TIMEINT = int(time.time()) if PY2: diff --git a/tests/unit/modules/test_timezone.py b/tests/unit/modules/test_timezone.py index 7cc6a18d19..0474ca9627 100644 --- a/tests/unit/modules/test_timezone.py +++ b/tests/unit/modules/test_timezone.py @@ -161,6 +161,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_aix', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -175,6 +176,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="UTC"') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -189,6 +191,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -207,6 +210,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] assert args == ('UTC',) + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -225,6 +229,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] assert args == ('UTC',) + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=True)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -239,6 +244,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz:yes'})): assert timezone.get_hwclock() == 'localtime' + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -254,6 +260,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['tail', '-n', '1', '/etc/adjtime'],) assert kwarg == {'python_shell': False} + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -284,6 +291,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['tail', '-n', '1', '/etc/adjtime'],) assert kwarg == {'python_shell': False} + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -299,6 +307,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.utils.fopen', mock_open()): assert timezone.get_hwclock() == 'localtime' + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -315,6 +324,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(timezone.__grains__, {'os_family': ['AIX']}): assert timezone.get_hwclock() == hwclock + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -329,6 +339,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert timezone.set_hwclock('forty two') assert timezone.set_hwclock('UTC') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -348,6 +359,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['rtc', '-z', 'GMT'],) assert kwargs == {'python_shell': False} + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -364,6 +376,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['timezonectl', 'set-local-rtc', 'false'],) assert kwargs == {'python_shell': False} + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -379,6 +392,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="TEST_TIMEZONE"') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -394,6 +408,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="TEST_TIMEZONE"') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @@ -413,6 +428,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1] assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=no') + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) diff --git a/tests/unit/modules/test_useradd.py b/tests/unit/modules/test_useradd.py index 2bcbee38d1..7fd457425f 100644 --- a/tests/unit/modules/test_useradd.py +++ b/tests/unit/modules/test_useradd.py @@ -72,6 +72,7 @@ class UserAddTestCase(TestCase, LoaderModuleMockMixin): # 'getent' function tests: 2 + @skipIf(HAS_PWD is False, 'The pwd module is not available') def test_getent(self): ''' Test if user.getent already have a value @@ -359,6 +360,7 @@ class UserAddTestCase(TestCase, LoaderModuleMockMixin): # 'list_users' function tests: 1 + @skipIf(HAS_PWD is False, 'The pwd module is not available') def test_list_users(self): ''' Test if it returns a list of all users diff --git a/tests/unit/modules/test_win_system.py b/tests/unit/modules/test_win_system.py index 3506a4ddf6..8b8a05356f 100644 --- a/tests/unit/modules/test_win_system.py +++ b/tests/unit/modules/test_win_system.py @@ -67,30 +67,31 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to reboot the system ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(), True) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_timeout_in_minutes(self): ''' Test to reboot the system with a timeout ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(5, in_seconds=False), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(5, in_seconds=False), True) + shutdown.assert_called_with(timeout=5, in_seconds=False, reboot=True, + only_on_pending_reboot=False) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_timeout_in_seconds(self): ''' Test to reboot the system with a timeout ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(5, in_seconds=True), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '5'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(5, in_seconds=True), True) + shutdown.assert_called_with(timeout=5, in_seconds=True, reboot=True, + only_on_pending_reboot=False) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_wait(self): @@ -98,50 +99,49 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): Test to reboot the system with a timeout and wait for it to finish ''' - mock = MagicMock(return_value='salt') - sleep_mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - with patch('time.sleep', sleep_mock): - self.assertEqual(win_system.reboot(wait_for_reboot=True), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) - sleep_mock.assert_called_once_with(330) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)), \ + patch('salt.modules.win_system.time.sleep', + MagicMock()) as time: + self.assertEqual(win_system.reboot(wait_for_reboot=True), True) + time.assert_called_with(330) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_shutdown(self): ''' Test to shutdown a running system ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.shutdown(), 'salt') + with patch('salt.modules.win_system.win32api.InitiateSystemShutdown', + MagicMock()): + self.assertEqual(win_system.shutdown(), True) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_shutdown_hard(self): ''' Test to shutdown a running system with no timeout or warning ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.shutdown_hard(), 'salt') + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.shutdown_hard(), True) + shutdown.assert_called_with(timeout=0) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_set_computer_name(self): ''' Test to set the Windows computer name ''' - mock = MagicMock(side_effect=[{'Computer Name': {'Current': ""}, - 'ReturnValue = 0;': True}, - {'Computer Name': {'Current': 'salt'}}]) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - mock = MagicMock(return_value='salt') - with patch.object(win_system, 'get_computer_name', mock): - mock = MagicMock(return_value=True) - with patch.object(win_system, - 'get_pending_computer_name', mock): - self.assertDictEqual(win_system.set_computer_name("salt"), + with patch('salt.modules.win_system.windll.kernel32.SetComputerNameExW', + MagicMock(return_value=True)): + with patch.object(win_system, 'get_computer_name', + MagicMock(return_value='salt')): + with patch.object(win_system, 'get_pending_computer_name', + MagicMock(return_value='salt_new')): + self.assertDictEqual(win_system.set_computer_name("salt_new"), {'Computer Name': {'Current': 'salt', - 'Pending': True}}) - + 'Pending': 'salt_new'}}) + # Test set_computer_name failure + with patch('salt.modules.win_system.windll.kernel32.SetComputerNameExW', + MagicMock(return_value=False)): self.assertFalse(win_system.set_computer_name("salt")) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') @@ -149,25 +149,25 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to get a pending computer name. ''' - mock = MagicMock(return_value='salt') - with patch.object(win_system, 'get_computer_name', mock): - mock = MagicMock(side_effect=['salt0', - 'ComputerName REG_SZ (salt)']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): + with patch.object(win_system, 'get_computer_name', + MagicMock(return_value='salt')): + reg_mock = MagicMock(return_value={'vdata': 'salt'}) + with patch.dict(win_system.__salt__, {'reg.read_value': reg_mock}): self.assertFalse(win_system.get_pending_computer_name()) + reg_mock = MagicMock(return_value={'vdata': 'salt_pending'}) + with patch.dict(win_system.__salt__, {'reg.read_value': reg_mock}): self.assertEqual(win_system.get_pending_computer_name(), - '(salt)') + 'salt_pending') @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_get_computer_name(self): ''' Test to get the Windows computer name ''' - mock = MagicMock(side_effect=['Server Name Salt', 'Salt']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.get_computer_name(), 'Salt') - + with patch('salt.modules.win_system.win32api.GetComputerNameEx', + MagicMock(side_effect=['computer name', ''])): + self.assertEqual(win_system.get_computer_name(), 'computer name') self.assertFalse(win_system.get_computer_name()) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') @@ -189,10 +189,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to get the Windows computer description ''' - mock = MagicMock(side_effect=['Server Comment Salt', 'Salt']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.get_computer_desc(), 'Salt') - + with patch('salt.modules.win_system.get_system_info', + MagicMock(side_effect=[{'description': 'salt description'}, + {'description': None}])): + self.assertEqual(win_system.get_computer_desc(), 'salt description') self.assertFalse(win_system.get_computer_desc()) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs w32net and other windows libraries') @@ -200,17 +200,20 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to join a computer to an Active Directory domain ''' - mock = MagicMock(side_effect=[{'ReturnValue = 0;': True}, - {'Salt': True}]) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertDictEqual(win_system.join_domain("saltstack", - "salt", - "salt@123"), - {'Domain': 'saltstack'}) + with patch('salt.modules.win_system._join_domain', + MagicMock(return_value=0)): + with patch('salt.modules.win_system.get_domain_workgroup', + MagicMock(return_value={'Workgroup': 'Workgroup'})): + self.assertDictEqual( + win_system.join_domain( + "saltstack", "salt", "salt@123"), + {'Domain': 'saltstack', 'Restart': False}) - self.assertFalse(win_system.join_domain("saltstack", - "salt", - "salt@123")) + with patch('salt.modules.win_system.get_domain_workgroup', + MagicMock(return_value={'Domain': 'saltstack'})): + self.assertEqual( + win_system.join_domain("saltstack", "salt", "salt@123"), + 'Already joined to saltstack') def test_get_system_time(self): ''' @@ -230,13 +233,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to set system time ''' - mock = MagicMock(side_effect=[False, True]) - with patch.object(win_system, '_validate_time', mock): + with patch('salt.modules.win_system.set_system_date_time', + MagicMock(side_effect=[False, True])): self.assertFalse(win_system.set_system_time("11:31:15 AM")) - - mock = MagicMock(return_value=True) - with patch.dict(win_system.__salt__, {'cmd.retcode': mock}): - self.assertFalse(win_system.set_system_time("11:31:15 AM")) + self.assertTrue(win_system.set_system_time("11:31:15 AM")) def test_get_system_date(self): ''' @@ -250,13 +250,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to set system date ''' - mock = MagicMock(side_effect=[False, True]) - with patch.object(win_system, '_validate_date', mock): + with patch('salt.modules.win_system.set_system_date_time', + MagicMock(side_effect=[False, True])): self.assertFalse(win_system.set_system_date("03-28-13")) - - mock = MagicMock(return_value=True) - with patch.dict(win_system.__salt__, {'cmd.retcode': mock}): - self.assertFalse(win_system.set_system_date("03-28-13")) + self.assertTrue(win_system.set_system_date("03-28-13")) def test_start_time_service(self): ''' diff --git a/tests/unit/pillar/test_git.py b/tests/unit/pillar/test_git.py index 7c55175416..f46011d6da 100644 --- a/tests/unit/pillar/test_git.py +++ b/tests/unit/pillar/test_git.py @@ -16,6 +16,7 @@ import tempfile import shutil import subprocess import yaml +import stat # Import Salt Testing libs from tests.integration import AdaptedConfigurationTestCaseMixin @@ -72,9 +73,13 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul git_pillar._update('master', 'file://{0}'.format(self.repo_path)) def tearDown(self): - shutil.rmtree(self.tmpdir) + shutil.rmtree(self.tmpdir, onerror=self._rmtree_error) super(GitPillarTestCase, self).tearDown() + def _rmtree_error(self, func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + def _create_repo(self): 'create source Git repo in temp directory' repo = os.path.join(self.tmpdir, 'repo_pillar') diff --git a/tests/unit/states/test_archive.py b/tests/unit/states/test_archive.py index 2823711fe7..30d256773d 100644 --- a/tests/unit/states/test_archive.py +++ b/tests/unit/states/test_archive.py @@ -102,16 +102,17 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': mock_run, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts): - ret = archive.extracted(tmp_dir, - source, - options=test_opts, - enforce_toplevel=False) - ret_opts.append(source) - mock_run.assert_called_with(ret_opts, - cwd=tmp_dir + os.sep, - python_shell=False) + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts): + ret = archive.extracted(tmp_dir, + source, + options=test_opts, + enforce_toplevel=False) + ret_opts.append(source) + mock_run.assert_called_with(ret_opts, + cwd=tmp_dir + os.sep, + python_shell=False) def test_tar_gnutar(self): ''' @@ -142,13 +143,14 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': run_all, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - ret = archive.extracted('/tmp/out', - source, - options='xvzf', - enforce_toplevel=False, - keep=True) - self.assertEqual(ret['changes']['extracted_files'], 'stdout') + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + ret = archive.extracted('/tmp/out', + source, + options='xvzf', + enforce_toplevel=False, + keep=True) + self.assertEqual(ret['changes']['extracted_files'], 'stdout') def test_tar_bsdtar(self): ''' @@ -179,10 +181,11 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': run_all, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - ret = archive.extracted('/tmp/out', - source, - options='xvzf', - enforce_toplevel=False, - keep=True) - self.assertEqual(ret['changes']['extracted_files'], 'stderr') + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + ret = archive.extracted('/tmp/out', + source, + options='xvzf', + enforce_toplevel=False, + keep=True) + self.assertEqual(ret['changes']['extracted_files'], 'stderr') diff --git a/tests/unit/states/test_boto_vpc.py b/tests/unit/states/test_boto_vpc.py index e632b1ec2e..1d5e334a2f 100644 --- a/tests/unit/states/test_boto_vpc.py +++ b/tests/unit/states/test_boto_vpc.py @@ -11,14 +11,11 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch # Import Salt libs -import salt.config -import salt.loader import salt.utils.boto +from salt.ext import six from salt.utils.versions import LooseVersion import salt.states.boto_vpc as boto_vpc -# Import test suite libs - # pylint: disable=import-error,unused-import from tests.unit.modules.test_boto_vpc import BotoVpcTestCaseMixin @@ -34,18 +31,18 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_vpc unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): @@ -131,7 +128,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): TestCase for salt.states.boto_vpc state.module ''' - @mock_ec2 + @mock_ec2_deprecated def test_present_when_vpc_does_not_exist(self): ''' Tests present on a VPC that does not exist. @@ -142,14 +139,14 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_present_result['result']) self.assertEqual(vpc_present_result['changes']['new']['vpc']['state'], 'available') - @mock_ec2 + @mock_ec2_deprecated def test_present_when_vpc_exists(self): vpc = self._create_vpc(name='test') vpc_present_result = self.salt_states['boto_vpc.present']('test', cidr_block) self.assertTrue(vpc_present_result['result']) self.assertEqual(vpc_present_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_present_with_failure(self): with patch('moto.ec2.models.VPCBackend.create_vpc', side_effect=BotoServerError(400, 'Mocked error')): @@ -157,7 +154,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_present_result['result']) self.assertTrue('Mocked error' in vpc_present_result['comment']) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_vpc_does_not_exist(self): ''' Tests absent on a VPC that does not exist. @@ -167,7 +164,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_absent_result['result']) self.assertEqual(vpc_absent_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_vpc_exists(self): vpc = self._create_vpc(name='test') with patch.dict(salt.utils.boto.__salt__, self.funcs): @@ -175,7 +172,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_absent_result['result']) self.assertEqual(vpc_absent_result['changes']['new']['vpc'], None) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_absent_with_failure(self): vpc = self._create_vpc(name='test') @@ -195,7 +192,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): _create = getattr(self, '_create_' + self.resource_type) _create(vpc_id=vpc_id, name=name, **self.extra_kwargs) - @mock_ec2 + @mock_ec2_deprecated def test_present_when_resource_does_not_exist(self): ''' Tests present on a resource that does not exist. @@ -210,7 +207,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): exists = self.funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists') self.assertTrue(exists) - @mock_ec2 + @mock_ec2_deprecated def test_present_when_resource_exists(self): vpc = self._create_vpc(name='test') resource = self._create_resource(vpc_id=vpc.id, name='test') @@ -220,7 +217,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertTrue(resource_present_result['result']) self.assertEqual(resource_present_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_present_with_failure(self): vpc = self._create_vpc(name='test') @@ -231,7 +228,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertFalse(resource_present_result['result']) self.assertTrue('Mocked error' in resource_present_result['comment']) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_resource_does_not_exist(self): ''' Tests absent on a resource that does not exist. @@ -241,7 +238,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertTrue(resource_absent_result['result']) self.assertEqual(resource_absent_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_resource_exists(self): vpc = self._create_vpc(name='test') self._create_resource(vpc_id=vpc.id, name='test') @@ -253,7 +250,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): exists = self.funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists') self.assertFalse(exists) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_absent_with_failure(self): vpc = self._create_vpc(name='test') @@ -291,6 +288,9 @@ class BotoVpcInternetGatewayTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTe @skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(six.PY3, 'Disabled for Python 3 due to upstream bugs: ' + 'https://github.com/spulec/moto/issues/548 and ' + 'https://github.com/gabrielfalcao/HTTPretty/issues/325') @skipIf(HAS_BOTO is False, 'The boto module must be installed.') @skipIf(HAS_MOTO is False, 'The moto module must be installed.') @skipIf(_has_required_boto() is False, 'The boto module must be greater than' @@ -301,7 +301,7 @@ class BotoVpcRouteTableTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCas backend_create = 'RouteTableBackend.create_route_table' backend_delete = 'RouteTableBackend.delete_route_table' - @mock_ec2 + @mock_ec2_deprecated def test_present_with_subnets(self): vpc = self._create_vpc(name='test') subnet1 = self._create_subnet(vpc_id=vpc.id, name='test1') @@ -326,7 +326,7 @@ class BotoVpcRouteTableTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCas new_subnets = changes['new']['subnets_associations'] self.assertEqual(new_subnets[0]['subnet_id'], subnet2.id) - @mock_ec2 + @mock_ec2_deprecated def test_present_with_routes(self): vpc = self._create_vpc(name='test') igw = self._create_internet_gateway(name='test', vpc_id=vpc.id) diff --git a/tests/unit/states/test_docker_image.py b/tests/unit/states/test_docker_image.py index 4d94c2e239..e96dbc9f9d 100644 --- a/tests/unit/states/test_docker_image.py +++ b/tests/unit/states/test_docker_image.py @@ -10,14 +10,13 @@ from __future__ import absolute_import from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase from tests.support.mock import ( - Mock, + MagicMock, NO_MOCK, NO_MOCK_REASON, patch ) # Import Salt Libs -from salt.exceptions import CommandExecutionError import salt.modules.dockermod as docker_mod import salt.states.docker_image as docker_state @@ -50,21 +49,19 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin): if ``image:latest`` is already downloaded locally the state should not report changes. ''' - docker_inspect_image = Mock( - return_value={'Id': 'abcdefghijk'}) - docker_pull = Mock( + docker_inspect_image = MagicMock(return_value={'Id': 'abcdefghijkl'}) + docker_pull = MagicMock( return_value={'Layers': - {'Already_Pulled': ['abcdefghijk'], + {'Already_Pulled': ['abcdefghijkl'], 'Pulled': []}, 'Status': 'Image is up to date for image:latest', 'Time_Elapsed': 1.1}) - docker_list_tags = Mock( - return_value=['image:latest'] - ) + docker_list_tags = MagicMock(return_value=['image:latest']) + docker_resolve_tag = MagicMock(return_value='image:latest') __salt__ = {'docker.list_tags': docker_list_tags, 'docker.pull': docker_pull, 'docker.inspect_image': docker_inspect_image, - } + 'docker.resolve_tag': docker_resolve_tag} with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): ret = docker_state.present('image:latest', force=True) @@ -89,29 +86,24 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin): if ``image:latest`` is not downloaded and force is true should pull a new image successfuly. ''' - docker_inspect_image = Mock( - side_effect=CommandExecutionError( - 'Error 404: No such image/container: image:latest')) - docker_pull = Mock( + docker_inspect_image = MagicMock(return_value={'Id': '1234567890ab'}) + docker_pull = MagicMock( return_value={'Layers': - {'Already_Pulled': ['abcdefghijk'], - 'Pulled': ['abcdefghijk']}, - 'Status': "Image 'image:latest' was pulled", - 'Time_Elapsed': 1.1}) - docker_list_tags = Mock( - side_effect=[[], ['image:latest']] - ) + {'Pulled': ['abcdefghijkl']}, + 'Status': "Image 'image:latest' was pulled", + 'Time_Elapsed': 1.1}) + docker_list_tags = MagicMock(side_effect=[[], ['image:latest']]) + docker_resolve_tag = MagicMock(return_value='image:latest') __salt__ = {'docker.list_tags': docker_list_tags, 'docker.pull': docker_pull, 'docker.inspect_image': docker_inspect_image, - } + 'docker.resolve_tag': docker_resolve_tag} with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): ret = docker_state.present('image:latest', force=True) self.assertEqual(ret, {'changes': { - 'Layers': {'Already_Pulled': ['abcdefghijk'], - 'Pulled': ['abcdefghijk']}, + 'Layers': {'Pulled': ['abcdefghijkl']}, 'Status': "Image 'image:latest' was pulled", 'Time_Elapsed': 1.1}, 'result': True, diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index 7b0037a7d4..552170bdfb 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -43,10 +43,18 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): docker_create_network = Mock(return_value='created') docker_connect_container_to_network = Mock(return_value='connected') docker_inspect_container = Mock(return_value={'Id': 'abcd'}) + # Get docker.networks to return a network with a name which is a superset of the name of + # the network which is to be created, despite this network existing we should still expect + # that the new network will be created. + # Regression test for #41982. + docker_networks = Mock(return_value=[{ + 'Name': 'network_foobar', + 'Containers': {'container': {}} + }]) __salt__ = {'docker.create_network': docker_create_network, 'docker.inspect_container': docker_inspect_container, 'docker.connect_container_to_network': docker_connect_container_to_network, - 'docker.networks': Mock(return_value=[]), + 'docker.networks': docker_networks, } with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): @@ -69,10 +77,14 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): ''' docker_remove_network = Mock(return_value='removed') docker_disconnect_container_from_network = Mock(return_value='disconnected') + docker_networks = Mock(return_value=[{ + 'Name': 'network_foo', + 'Containers': {'container': {}} + }]) __salt__ = { 'docker.remove_network': docker_remove_network, 'docker.disconnect_container_from_network': docker_disconnect_container_from_network, - 'docker.networks': Mock(return_value=[{'Containers': {'container': {}}}]), + 'docker.networks': docker_networks, } with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): @@ -85,3 +97,32 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): 'changes': {'disconnected': 'disconnected', 'removed': 'removed'}, 'result': True}) + + def test_absent_with_matching_network(self): + ''' + Test docker_network.absent when the specified network does not exist, + but another network with a name which is a superset of the specified + name does exist. In this case we expect there to be no attempt to remove + any network. + Regression test for #41982. + ''' + docker_remove_network = Mock(return_value='removed') + docker_disconnect_container_from_network = Mock(return_value='disconnected') + docker_networks = Mock(return_value=[{ + 'Name': 'network_foobar', + 'Containers': {'container': {}} + }]) + __salt__ = { + 'docker.remove_network': docker_remove_network, + 'docker.disconnect_container_from_network': docker_disconnect_container_from_network, + 'docker.networks': docker_networks, + } + with patch.dict(docker_state.__dict__, + {'__salt__': __salt__}): + ret = docker_state.absent('network_foo') + docker_disconnect_container_from_network.assert_not_called() + docker_remove_network.assert_not_called() + self.assertEqual(ret, {'name': 'network_foo', + 'comment': 'Network \'network_foo\' already absent', + 'changes': {}, + 'result': True}) diff --git a/tests/unit/states/test_environ.py b/tests/unit/states/test_environ.py index 28c35c525f..45a8e5fb1a 100644 --- a/tests/unit/states/test_environ.py +++ b/tests/unit/states/test_environ.py @@ -96,11 +96,14 @@ class TestEnvironState(TestCase, LoaderModuleMockMixin): ret = envstate.setenv('notimportant', {'foo': 'bar'}) self.assertEqual(ret['changes'], {'foo': 'bar'}) - ret = envstate.setenv('notimportant', {'test': False, 'foo': 'baz'}, false_unsets=True) + with patch.dict(envstate.__salt__, {'reg.read_value': MagicMock()}): + ret = envstate.setenv( + 'notimportant', {'test': False, 'foo': 'baz'}, false_unsets=True) self.assertEqual(ret['changes'], {'test': None, 'foo': 'baz'}) self.assertEqual(envstate.os.environ, {'INITIAL': 'initial', 'foo': 'baz'}) - ret = envstate.setenv('notimportant', {'test': False, 'foo': 'bax'}) + with patch.dict(envstate.__salt__, {'reg.read_value': MagicMock()}): + ret = envstate.setenv('notimportant', {'test': False, 'foo': 'bax'}) self.assertEqual(ret['changes'], {'test': '', 'foo': 'bax'}) self.assertEqual(envstate.os.environ, {'INITIAL': 'initial', 'foo': 'bax', 'test': ''}) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index dc58ca2500..e106423c81 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -100,7 +100,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): def test_contents_pillar_doesnt_add_more_newlines(self): # make sure the newline - pillar_value = 'i am the pillar value\n' + pillar_value = 'i am the pillar value{0}'.format(os.linesep) self.run_contents_pillar(pillar_value, expected=pillar_value) @@ -167,7 +167,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_empty, - 'file.group_to_gid': mock_empty}): + 'file.group_to_gid': mock_empty, + 'user.info': mock_empty, + 'user.current': mock_user}): comt = ('User {0} does not exist. Group {1} does not exist.'.format(user, group)) ret.update({'comment': comt, 'name': name}) self.assertDictEqual(filestate.symlink(name, target, user=user, @@ -176,7 +178,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, - 'file.is_link': mock_f}): + 'file.is_link': mock_f, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': True}): with patch.object(os.path, 'exists', mock_f): comt = ('Symlink {0} to {1}' @@ -191,7 +195,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, - 'file.is_link': mock_f}): + 'file.is_link': mock_f, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_f): with patch.object(os.path, 'exists', mock_f): @@ -207,7 +213,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_t, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(salt.states.file, '_check_symlink_ownership', mock_t): @@ -224,7 +232,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_f, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(os.path, 'exists', mock_f): @@ -243,7 +253,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_f, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(os.path, 'exists', mock_f): @@ -538,7 +550,10 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'G12', 'G12', 'G12', 'G12', 'G12']) mock_if = MagicMock(side_effect=[True, False, False, False, False, False, False, False]) - mock_ret = MagicMock(return_value=(ret, None)) + if salt.utils.is_windows(): + mock_ret = MagicMock(return_value=ret) + else: + mock_ret = MagicMock(return_value=(ret, None)) mock_dict = MagicMock(return_value={}) mock_cp = MagicMock(side_effect=[Exception, True]) mock_ex = MagicMock(side_effect=[Exception, {'changes': {name: name}}, @@ -573,8 +588,14 @@ class TestFileState(TestCase, LoaderModuleMockMixin): self.assertDictEqual(filestate.managed(name, create=False), ret) - comt = ('User salt is not available Group saltstack' - ' is not available') + # Group argument is ignored on Windows systems. Group is set to + # user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.managed(name, user=user, group=group), ret) @@ -711,17 +732,28 @@ class TestFileState(TestCase, LoaderModuleMockMixin): mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) - mock_perms = MagicMock(return_value=(ret, '')) + if salt.utils.is_windows(): + mock_perms = MagicMock(return_value=ret) + else: + mock_perms = MagicMock(return_value=(ret, '')) mock_uid = MagicMock(side_effect=['', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12']) mock_gid = MagicMock(side_effect=['', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12']) + mock_check = MagicMock(return_value=( + None, + 'The directory "{0}" will be changed'.format(name), + {'directory': 'new'})) + mock_error = CommandExecutionError with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.stats': mock_f, 'file.check_perms': mock_perms, - 'file.mkdir': mock_t}): + 'file.mkdir': mock_t}), \ + patch('salt.utils.win_dacl.get_sid', mock_error), \ + patch('os.path.isdir', mock_t), \ + patch('salt.states.file._check_directory_win', mock_check): if salt.utils.is_windows(): comt = ('User salt is not available Group salt' ' is not available') @@ -729,8 +761,8 @@ class TestFileState(TestCase, LoaderModuleMockMixin): comt = ('User salt is not available Group saltstack' ' is not available') ret.update({'comment': comt, 'name': name}) - self.assertDictEqual(filestate.directory(name, user=user, - group=group), ret) + self.assertDictEqual( + filestate.directory(name, user=user, group=group), ret) with patch.object(os.path, 'isabs', mock_f): comt = ('Specified file {0} is not an absolute path' @@ -771,9 +803,19 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.object(os.path, 'isfile', mock_f): with patch.dict(filestate.__opts__, {'test': True}): - comt = ('The following files will be changed:\n{0}:' - ' directory - new\n'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {'/etc/grub.conf': {'directory': 'new'}}}) + if salt.utils.is_windows(): + comt = 'The directory "{0}" will be changed' \ + ''.format(name) + p_chg = {'directory': 'new'} + else: + comt = ('The following files will be changed:\n{0}:' + ' directory - new\n'.format(name)) + p_chg = {'/etc/grub.conf': {'directory': 'new'}} + ret.update({ + 'comment': comt, + 'result': None, + 'pchanges': p_chg + }) self.assertDictEqual(filestate.directory(name, user=user, group=group), @@ -845,8 +887,14 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.source_list': mock_lst, 'cp.list_master_dirs': mock_emt, 'cp.list_master': mock_l}): - comt = ('User salt is not available Group saltstack' - ' is not available') + + # Group argument is ignored on Windows systems. Group is set to user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt}) self.assertDictEqual(filestate.recurse(name, source, user=user, group=group), ret) @@ -954,7 +1002,8 @@ class TestFileState(TestCase, LoaderModuleMockMixin): ret.update({'comment': comt, 'name': name}) self.assertDictEqual(filestate.blockreplace(name), ret) - with patch.object(os.path, 'isabs', mock_t): + with patch.object(os.path, 'isabs', mock_t), \ + patch.object(os.path, 'exists', mock_t): with patch.dict(filestate.__salt__, {'file.blockreplace': mock_t}): with patch.dict(filestate.__opts__, {'test': True}): comt = ('Changes would be made') @@ -1312,8 +1361,15 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.get_group': mock_grp, 'file.get_mode': mock_grp, 'file.check_perms': mock_t}): - comt = ('User salt is not available Group ' - 'saltstack is not available') + + # Group argument is ignored on Windows systems. Group is set + # to user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.copy(name, source, user=user, group=group), ret) diff --git a/tests/unit/states/test_module.py b/tests/unit/states/test_module.py index 54f3768d37..4588e20ca8 100644 --- a/tests/unit/states/test_module.py +++ b/tests/unit/states/test_module.py @@ -122,7 +122,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin): with patch.dict(module.__salt__, {CMD: _mocked_func_named}): with patch.dict(module.__opts__, {'use_superseded': ['module.run']}): ret = module.run(**{CMD: None}) - assert ret['comment'] == "'{0}' failed: Missing arguments: name".format(CMD) + assert ret['comment'] == "'{0}' failed: Function expects 1 parameters, got only 0".format(CMD) def test_run_correct_arg(self): ''' @@ -131,7 +131,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin): ''' with patch.dict(module.__salt__, {CMD: _mocked_func_named}): with patch.dict(module.__opts__, {'use_superseded': ['module.run']}): - ret = module.run(**{CMD: [{'name': 'Fred'}]}) + ret = module.run(**{CMD: ['Fred']}) assert ret['comment'] == '{0}: Success'.format(CMD) assert ret['result'] diff --git a/tests/unit/states/test_selinux.py b/tests/unit/states/test_selinux.py index 36474b95c4..f6ce3bfb8f 100644 --- a/tests/unit/states/test_selinux.py +++ b/tests/unit/states/test_selinux.py @@ -118,9 +118,11 @@ class SelinuxTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(selinux.__opts__, {'test': False}): comt = ('Boolean samba_create_home_dirs has been set to on') ret.update({'comment': comt, 'result': True}) + ret.update({'changes': {'State': {'old': 'off', 'new': 'on'}}}) self.assertDictEqual(selinux.boolean(name, value), ret) comt = ('Failed to set the boolean ' 'samba_create_home_dirs to on') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': False}) + ret.update({'changes': {}}) self.assertDictEqual(selinux.boolean(name, value), ret) diff --git a/tests/unit/states/test_winrepo.py b/tests/unit/states/test_winrepo.py index a702bbd2de..d5e4321620 100644 --- a/tests/unit/states/test_winrepo.py +++ b/tests/unit/states/test_winrepo.py @@ -67,9 +67,8 @@ class WinrepoTestCase(TestCase, LoaderModuleMockMixin): 'changes': {}, 'result': False, 'comment': ''} - ret.update({'comment': - '{file_roots}/win/repo is ' - 'missing'.format(file_roots=BASE_FILE_ROOTS_DIR)}) + ret.update({'comment': '{0} is missing'.format( + os.sep.join([BASE_FILE_ROOTS_DIR, 'win', 'repo']))}) self.assertDictEqual(winrepo.genrepo('salt'), ret) mock = MagicMock(return_value={'winrepo_dir': 'salt', diff --git a/tests/unit/templates/test_jinja.py b/tests/unit/templates/test_jinja.py index 471d626540..7a96608825 100644 --- a/tests/unit/templates/test_jinja.py +++ b/tests/unit/templates/test_jinja.py @@ -486,7 +486,7 @@ class TestCustomExtensions(TestCase): env = Environment(extensions=[SerializerExtension]) if six.PY3: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '") - self.assertEqual(rendered, list(unique)) + self.assertEqual(sorted(rendered), sorted(list(unique))) else: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset) self.assertEqual(rendered, u"{0}".format(unique)) @@ -497,7 +497,7 @@ class TestCustomExtensions(TestCase): env = Environment(extensions=[SerializerExtension]) if six.PY3: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '") - self.assertEqual(rendered, list(unique)) + self.assertEqual(sorted(rendered), sorted(list(unique))) else: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset) self.assertEqual(rendered, u"{0}".format(unique)) diff --git a/tests/unit/test_crypt.py b/tests/unit/test_crypt.py index a26b7583f8..e5713fb641 100644 --- a/tests/unit/test_crypt.py +++ b/tests/unit/test_crypt.py @@ -88,13 +88,13 @@ SIG = ( class CryptTestCase(TestCase): def test_gen_keys(self): - with patch.multiple(os, umask=MagicMock(), chmod=MagicMock(), chown=MagicMock, + with patch.multiple(os, umask=MagicMock(), chmod=MagicMock(), access=MagicMock(return_value=True)): with patch('salt.utils.fopen', mock_open()): - open_priv_wb = call('/keydir/keyname.pem', 'wb+') - open_pub_wb = call('/keydir/keyname.pub', 'wb+') + open_priv_wb = call('/keydir{0}keyname.pem'.format(os.sep), 'wb+') + open_pub_wb = call('/keydir{0}keyname.pub'.format(os.sep), 'wb+') with patch('os.path.isfile', return_value=True): - self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048), '/keydir/keyname.pem') + self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048), '/keydir{0}keyname.pem'.format(os.sep)) self.assertNotIn(open_priv_wb, salt.utils.fopen.mock_calls) self.assertNotIn(open_pub_wb, salt.utils.fopen.mock_calls) with patch('os.path.isfile', return_value=False): diff --git a/tests/unit/test_doc.py b/tests/unit/test_doc.py index 336833a46c..52311d2f9e 100644 --- a/tests/unit/test_doc.py +++ b/tests/unit/test_doc.py @@ -6,6 +6,7 @@ # Import Python libs from __future__ import absolute_import +import os # Import Salt Testing libs from tests.support.unit import TestCase @@ -13,6 +14,7 @@ from tests.support.unit import TestCase # Import Salt libs import tests.integration as integration import salt.modules.cmdmod +import salt.utils class DocTestCase(TestCase): @@ -32,8 +34,15 @@ class DocTestCase(TestCase): https://github.com/saltstack/salt/issues/12788 ''' salt_dir = integration.CODE_DIR - salt_dir += '/' - cmd = 'grep -r :doc: ' + salt_dir + + if salt.utils.is_windows(): + # No grep in Windows, use findstr + # findstr in windows doesn't prepend 'Binary` to binary files, so + # use the '/P' switch to skip files with unprintable characters + cmd = 'findstr /C:":doc:" /S /P {0}\\*'.format(salt_dir) + else: + salt_dir += '/' + cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') @@ -43,25 +52,30 @@ class DocTestCase(TestCase): if line.startswith('Binary'): continue - key, val = line.split(':', 1) + if salt.utils.is_windows(): + # Need the space after the colon so it doesn't split the drive + # letter + key, val = line.split(': ', 1) + else: + key, val = line.split(':', 1) # Don't test man pages, this file, # the page that documents to not use ":doc:", or # the doc/conf.py file if 'man' in key \ or key.endswith('test_doc.py') \ - or key.endswith('doc/conf.py') \ - or key.endswith('/conventions/documentation.rst') \ - or key.endswith('doc/topics/releases/2016.11.2.rst') \ - or key.endswith('doc/topics/releases/2016.11.3.rst') \ - or key.endswith('doc/topics/releases/2016.3.5.rst'): + or key.endswith(os.sep.join(['doc', 'conf.py'])) \ + or key.endswith(os.sep.join(['conventions', 'documentation.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.11.2.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.11.3.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.3.5.rst'])): continue # Set up test return dict if test_ret.get(key) is None: - test_ret[key] = [val.lstrip()] + test_ret[key] = [val.strip()] else: - test_ret[key].append(val.lstrip()) + test_ret[key].append(val.strip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None diff --git a/tests/unit/test_fileclient.py b/tests/unit/test_fileclient.py index 077630693a..b6bd2207e0 100644 --- a/tests/unit/test_fileclient.py +++ b/tests/unit/test_fileclient.py @@ -6,6 +6,7 @@ # Import Python libs from __future__ import absolute_import import errno +import os # Import Salt Testing libs from tests.support.mock import patch, Mock @@ -38,7 +39,7 @@ class FileclientTestCase(TestCase): for exists in range(2): with patch('os.makedirs', self._fake_makedir()): with Client(self.opts)._cache_loc('testfile') as c_ref_itr: - assert c_ref_itr == '/__test__/files/base/testfile' + assert c_ref_itr == os.sep + os.sep.join(['__test__', 'files', 'base', 'testfile']) def test_cache_raises_exception_on_non_eexist_ioerror(self): ''' diff --git a/tests/unit/test_stateconf.py b/tests/unit/test_stateconf.py index 5396d0dac0..9edb7d1eca 100644 --- a/tests/unit/test_stateconf.py +++ b/tests/unit/test_stateconf.py @@ -102,8 +102,9 @@ test: - name: echo sls_dir={{sls_dir}} - cwd: / ''', sls='path.to.sls') - self.assertEqual(result['test']['cmd.run'][0]['name'], - 'echo sls_dir=path/to') + self.assertEqual( + result['test']['cmd.run'][0]['name'], + 'echo sls_dir=path{0}to'.format(os.sep)) def test_states_declared_with_shorthand_no_args(self): result = self._render_sls(''' diff --git a/tests/unit/test_test_module_names.py b/tests/unit/test_test_module_names.py index a7b2bf94ad..485f737685 100644 --- a/tests/unit/test_test_module_names.py +++ b/tests/unit/test_test_module_names.py @@ -13,33 +13,33 @@ from tests.support.unit import TestCase from tests.support.paths import CODE_DIR EXCLUDED_DIRS = [ - 'tests/pkg', - 'tests/perf', - 'tests/support', - 'tests/unit/utils/cache_mods', - 'tests/unit/modules/inspectlib', - 'tests/unit/modules/zypp/', - 'tests/unit/templates/files', - 'tests/integration/files/', - 'tests/integration/cloud/helpers', + os.path.join('tests', 'pkg'), + os.path.join('tests', 'perf'), + os.path.join('tests', 'support'), + os.path.join('tests', 'unit', 'utils', 'cache_mods'), + os.path.join('tests', 'unit', 'modules', 'inspectlib'), + os.path.join('tests', 'unit', 'modules', 'zypp'), + os.path.join('tests', 'unit', 'templates', 'files'), + os.path.join('tests', 'integration', 'files'), + os.path.join('tests', 'integration', 'cloud', 'helpers'), ] EXCLUDED_FILES = [ - 'tests/eventlisten.py', - 'tests/buildpackage.py', - 'tests/saltsh.py', - 'tests/minionswarm.py', - 'tests/wheeltest.py', - 'tests/runtests.py', - 'tests/jenkins.py', - 'tests/salt-tcpdump.py', - 'tests/conftest.py', - 'tests/packdump.py', - 'tests/consist.py', - 'tests/modparser.py', - 'tests/committer_parser.py', - 'tests/zypp_plugin.py', - 'tests/unit/transport/mixins.py', - 'tests/integration/utils/testprogram.py', + os.path.join('tests', 'eventlisten.py'), + os.path.join('tests', 'buildpackage.py'), + os.path.join('tests', 'saltsh.py'), + os.path.join('tests', 'minionswarm.py'), + os.path.join('tests', 'wheeltest.py'), + os.path.join('tests', 'runtests.py'), + os.path.join('tests', 'jenkins.py'), + os.path.join('tests', 'salt-tcpdump.py'), + os.path.join('tests', 'conftest.py'), + os.path.join('tests', 'packdump.py'), + os.path.join('tests', 'consist.py'), + os.path.join('tests', 'modparser.py'), + os.path.join('tests', 'committer_parser.py'), + os.path.join('tests', 'zypp_plugin.py'), + os.path.join('tests', 'unit', 'transport', 'mixins.py'), + os.path.join('tests', 'integration', 'utils', 'testprogram.py'), ] diff --git a/tests/unit/utils/test_decorators.py b/tests/unit/utils/test_decorators.py index a729ec024c..35ea6da502 100644 --- a/tests/unit/utils/test_decorators.py +++ b/tests/unit/utils/test_decorators.py @@ -59,6 +59,7 @@ class DecoratorsTest(TestCase): self.globs = { '__virtualname__': 'test', '__opts__': {}, + '__pillar__': {}, 'old_function': self.old_function, 'new_function': self.new_function, '_new_function': self._new_function, @@ -149,6 +150,23 @@ class DecoratorsTest(TestCase): ['The function "test.new_function" is using its deprecated ' 'version and will expire in version "Beryllium".']) + def test_with_deprecated_notfound_in_pillar(self): + ''' + Test with_deprecated should raise an exception, if a same name + function with the "_" prefix not implemented. + + :return: + ''' + del self.globs['_new_function'] + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + with self.assertRaises(CommandExecutionError): + depr(self.new_function)() + self.assertEqual(self.messages, + ['The function "test.new_function" is using its deprecated ' + 'version and will expire in version "Beryllium".']) + def test_with_deprecated_found(self): ''' Test with_deprecated should not raise an exception, if a same name @@ -166,6 +184,23 @@ class DecoratorsTest(TestCase): 'and will expire in version "Beryllium".'] self.assertEqual(self.messages, log_msg) + def test_with_deprecated_found_in_pillar(self): + ''' + Test with_deprecated should not raise an exception, if a same name + function with the "_" prefix is implemented, but should use + an old version instead, if "use_deprecated" is requested. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['_new_function'] = self.old_function + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + self.assertEqual(depr(self.new_function)(), self.old_function()) + log_msg = ['The function "test.new_function" is using its deprecated version ' + 'and will expire in version "Beryllium".'] + self.assertEqual(self.messages, log_msg) + def test_with_deprecated_found_eol(self): ''' Test with_deprecated should raise an exception, if a same name @@ -185,6 +220,25 @@ class DecoratorsTest(TestCase): 'is configured as its deprecated version. The lifetime of the function ' '"new_function" expired. Please use its successor "new_function" instead.']) + def test_with_deprecated_found_eol_in_pillar(self): + ''' + Test with_deprecated should raise an exception, if a same name + function with the "_" prefix is implemented, "use_deprecated" is requested + and EOL is reached. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['_new_function'] = self.old_function + depr = decorators.with_deprecated(self.globs, "Helium") + depr._curr_version = self._mk_version("Beryllium")[1] + with self.assertRaises(CommandExecutionError): + depr(self.new_function)() + self.assertEqual(self.messages, + ['Although function "new_function" is called, an alias "new_function" ' + 'is configured as its deprecated version. The lifetime of the function ' + '"new_function" expired. Please use its successor "new_function" instead.']) + def test_with_deprecated_no_conf(self): ''' Test with_deprecated should not raise an exception, if a same name @@ -260,6 +314,19 @@ class DecoratorsTest(TestCase): assert depr(self.new_function)() == self.new_function() assert not self.messages + def test_with_deprecated_opt_in_use_superseded_in_pillar(self): + ''' + Test with_deprecated using opt-in policy, + where newer function is used as per configuration. + + :return: + ''' + self.globs['__pillar__']['use_superseded'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium", policy=decorators._DeprecationDecorator.OPT_IN) + depr._curr_version = self._mk_version("Helium")[1] + assert depr(self.new_function)() == self.new_function() + assert not self.messages + def test_with_deprecated_opt_in_use_superseded_and_deprecated(self): ''' Test with_deprecated misconfiguration. @@ -272,3 +339,16 @@ class DecoratorsTest(TestCase): depr._curr_version = self._mk_version("Helium")[1] with self.assertRaises(SaltConfigurationError): assert depr(self.new_function)() == self.new_function() + + def test_with_deprecated_opt_in_use_superseded_and_deprecated_in_pillar(self): + ''' + Test with_deprecated misconfiguration. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['__pillar__']['use_superseded'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + with self.assertRaises(SaltConfigurationError): + assert depr(self.new_function)() == self.new_function() diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index 5b0587f18c..5ee6edc236 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -820,7 +820,7 @@ class TranslateInputTestCase(TestCase): expected ) - @assert_stringlist + @assert_string def test_domainname(self): ''' Should be a list of strings or converted to one diff --git a/tests/unit/utils/test_find.py b/tests/unit/utils/test_find.py index 86af790b0e..a6a48fb473 100644 --- a/tests/unit/utils/test_find.py +++ b/tests/unit/utils/test_find.py @@ -13,7 +13,6 @@ from tests.support.unit import skipIf, TestCase from tests.support.paths import TMP # Import salt libs -import salt.ext.six as six import salt.utils import salt.utils.find @@ -121,19 +120,11 @@ class TestFind(TestCase): min_size, max_size = salt.utils.find._parse_size('+1m') self.assertEqual(min_size, 1048576) - # sys.maxint has been removed in Python3. Use maxsize instead. - if six.PY3: - self.assertEqual(max_size, sys.maxsize) - else: - self.assertEqual(max_size, sys.maxint) + self.assertEqual(max_size, sys.maxsize) min_size, max_size = salt.utils.find._parse_size('+1M') self.assertEqual(min_size, 1048576) - # sys.maxint has been removed in Python3. Use maxsize instead. - if six.PY3: - self.assertEqual(max_size, sys.maxsize) - else: - self.assertEqual(max_size, sys.maxint) + self.assertEqual(max_size, sys.maxsize) def test_option_requires(self): option = salt.utils.find.Option() @@ -217,7 +208,7 @@ class TestFind(TestCase): option = salt.utils.find.TypeOption('type', 's') self.assertEqual(option.match('', '', [stat.S_IFSOCK]), True) - @skipIf(sys.platform.startswith('win'), 'No /dev/null on Windows') + @skipIf(sys.platform.startswith('win'), 'pwd not available on Windows') def test_owner_option_requires(self): self.assertRaises( ValueError, salt.utils.find.OwnerOption, 'owner', 'notexist' @@ -226,7 +217,7 @@ class TestFind(TestCase): option = salt.utils.find.OwnerOption('owner', 'root') self.assertEqual(option.requires(), salt.utils.find._REQUIRES_STAT) - @skipIf(sys.platform.startswith('win'), 'No /dev/null on Windows') + @skipIf(sys.platform.startswith('win'), 'pwd not available on Windows') def test_owner_option_match(self): option = salt.utils.find.OwnerOption('owner', 'root') self.assertEqual(option.match('', '', [0] * 5), True) @@ -234,7 +225,7 @@ class TestFind(TestCase): option = salt.utils.find.OwnerOption('owner', '500') self.assertEqual(option.match('', '', [500] * 5), True) - @skipIf(sys.platform.startswith('win'), 'No /dev/null on Windows') + @skipIf(sys.platform.startswith('win'), 'grp not available on Windows') def test_group_option_requires(self): self.assertRaises( ValueError, salt.utils.find.GroupOption, 'group', 'notexist' @@ -247,7 +238,7 @@ class TestFind(TestCase): option = salt.utils.find.GroupOption('group', group_name) self.assertEqual(option.requires(), salt.utils.find._REQUIRES_STAT) - @skipIf(sys.platform.startswith('win'), 'No /dev/null on Windows') + @skipIf(sys.platform.startswith('win'), 'grp not available on Windows') def test_group_option_match(self): if sys.platform.startswith(('darwin', 'freebsd', 'openbsd')): group_name = 'wheel' @@ -412,7 +403,7 @@ class TestPrintOption(TestCase): option.execute('test_name', [0] * 9), [0, 'test_name'] ) - @skipIf(sys.platform.startswith('Windows'), "no /dev/null on windows") + @skipIf(sys.platform.startswith('win'), "pwd not available on Windows") def test_print_user(self): option = salt.utils.find.PrintOption('print', 'user') self.assertEqual(option.execute('', [0] * 10), 'root') @@ -420,7 +411,7 @@ class TestPrintOption(TestCase): option = salt.utils.find.PrintOption('print', 'user') self.assertEqual(option.execute('', [2 ** 31] * 10), 2 ** 31) - @skipIf(sys.platform.startswith('Windows'), "no /dev/null on windows") + @skipIf(sys.platform.startswith('win'), "grp not available on Windows") def test_print_group(self): option = salt.utils.find.PrintOption('print', 'group') if sys.platform.startswith(('darwin', 'freebsd', 'openbsd')): @@ -433,7 +424,7 @@ class TestPrintOption(TestCase): #option = salt.utils.find.PrintOption('print', 'group') #self.assertEqual(option.execute('', [2 ** 31] * 10), 2 ** 31) - @skipIf(sys.platform.startswith('Windows'), "no /dev/null on windows") + @skipIf(sys.platform.startswith('win'), "no /dev/null on windows") def test_print_md5(self): option = salt.utils.find.PrintOption('print', 'md5') self.assertEqual(option.execute('/dev/null', os.stat('/dev/null')), '') diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py index c8942e4695..070a46fe75 100644 --- a/tests/unit/utils/test_gitfs.py +++ b/tests/unit/utils/test_gitfs.py @@ -66,7 +66,7 @@ class TestGitFSProvider(TestCase): ('git_pillar', salt.utils.gitfs.GitPillar), ('winrepo', salt.utils.gitfs.WinRepo)): key = '{0}_provider'.format(role_name) - for provider in salt.utils.gitfs.VALID_PROVIDERS: + for provider in salt.utils.gitfs.GIT_PROVIDERS: verify = 'verify_gitpython' mock1 = _get_mock(verify, provider) with patch.object(role_class, verify, mock1): diff --git a/tests/unit/utils/test_network.py b/tests/unit/utils/test_network.py index 16b88ec77d..4b5f88fe5b 100644 --- a/tests/unit/utils/test_network.py +++ b/tests/unit/utils/test_network.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Import Python libs from __future__ import absolute_import +import socket # Import Salt Testing libs from tests.support.unit import skipIf @@ -109,8 +110,31 @@ class NetworkTestCase(TestCase): changed. In these cases, we just need to update the IP address in the assertion. ''' - ret = network.host_to_ips('www.saltstack.com') - self.assertEqual(ret, ['104.197.168.128']) + def _side_effect(host, *args): + try: + return { + 'github.com': [ + (2, 1, 6, '', ('192.30.255.112', 0)), + (2, 1, 6, '', ('192.30.255.113', 0)), + ], + 'ipv6host.foo': [ + (socket.AF_INET6, 1, 6, '', ('2001:a71::1', 0, 0, 0)), + ], + }[host] + except KeyError: + raise socket.gaierror(-2, 'Name or service not known') + + getaddrinfo_mock = MagicMock(side_effect=_side_effect) + with patch.object(socket, 'getaddrinfo', getaddrinfo_mock): + # Test host that can be resolved + ret = network.host_to_ips('github.com') + self.assertEqual(ret, ['192.30.255.112', '192.30.255.113']) + # Test ipv6 + ret = network.host_to_ips('ipv6host.foo') + self.assertEqual(ret, ['2001:a71::1']) + # Test host that can't be resolved + ret = network.host_to_ips('someothersite.com') + self.assertEqual(ret, None) def test_generate_minion_id(self): self.assertTrue(network.generate_minion_id()) diff --git a/tests/unit/utils/test_url.py b/tests/unit/utils/test_url.py index 95caa0253e..af7efba323 100644 --- a/tests/unit/utils/test_url.py +++ b/tests/unit/utils/test_url.py @@ -38,6 +38,8 @@ class UrlTestCase(TestCase): ''' path = '?funny/path with {interesting|chars}' url = 'salt://' + path + if salt.utils.is_windows(): + path = '_funny/path with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, None)) @@ -48,6 +50,8 @@ class UrlTestCase(TestCase): saltenv = 'ambience' path = '?funny/path&with {interesting|chars}' url = 'salt://' + path + '?saltenv=' + saltenv + if salt.utils.is_windows(): + path = '_funny/path&with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, saltenv)) @@ -59,6 +63,8 @@ class UrlTestCase(TestCase): ''' path = '? interesting/&path.filetype' url = 'salt://' + path + if salt.utils.is_windows(): + url = 'salt://_ interesting/&path.filetype' self.assertEqual(salt.utils.url.create(path), url) @@ -68,6 +74,8 @@ class UrlTestCase(TestCase): ''' saltenv = 'raumklang' path = '? interesting/&path.filetype' + if salt.utils.is_windows(): + path = '_ interesting/&path.filetype' url = 'salt://' + path + '?saltenv=' + saltenv @@ -149,6 +157,8 @@ class UrlTestCase(TestCase): ''' path = 'dir/file.conf' escaped_path = '|' + path + if salt.utils.is_windows(): + escaped_path = path self.assertEqual(salt.utils.url.escape(path), escaped_path) @@ -167,6 +177,8 @@ class UrlTestCase(TestCase): path = 'dir/file.conf' url = 'salt://' + path escaped_url = 'salt://|' + path + if salt.utils.is_windows(): + escaped_url = url self.assertEqual(salt.utils.url.escape(url), escaped_url) diff --git a/tests/unit/utils/test_which.py b/tests/unit/utils/test_which.py index 9ab674791d..6bb4cf6e1a 100644 --- a/tests/unit/utils/test_which.py +++ b/tests/unit/utils/test_which.py @@ -44,18 +44,21 @@ class TestWhich(TestCase): # The second, iterating through $PATH, should also return False, # still checking for Linux False, + # We will now also return False once so we get a .EXE back from + # the function, see PATHEXT below. + False, # Lastly return True, this is the windows check. True ] # Let's patch os.environ to provide a custom PATH variable - with patch.dict(os.environ, {'PATH': '/bin'}): + with patch.dict(os.environ, {'PATH': '/bin', + 'PATHEXT': '.COM;.EXE;.BAT;.CMD'}): # Let's also patch is_windows to return True with patch('salt.utils.is_windows', lambda: True): with patch('os.path.isfile', lambda x: True): self.assertEqual( salt.utils.which('this-binary-exists-under-windows'), - # The returned path should return the .exe suffix - '/bin/this-binary-exists-under-windows.EXE' + os.path.join('/bin', 'this-binary-exists-under-windows.EXE') ) def test_missing_binary_in_windows(self): @@ -106,6 +109,5 @@ class TestWhich(TestCase): with patch('os.path.isfile', lambda x: True): self.assertEqual( salt.utils.which('this-binary-exists-under-windows'), - # The returned path should return the .exe suffix - '/bin/this-binary-exists-under-windows.CMD' + os.path.join('/bin', 'this-binary-exists-under-windows.CMD') ) diff --git a/tests/unit/utils/test_yamlloader.py b/tests/unit/utils/test_yamlloader.py index 69a2724f5d..11228ff7e0 100644 --- a/tests/unit/utils/test_yamlloader.py +++ b/tests/unit/utils/test_yamlloader.py @@ -5,6 +5,7 @@ # Import python libs from __future__ import absolute_import +import textwrap # Import Salt Libs from yaml.constructor import ConstructorError @@ -36,12 +37,11 @@ class YamlLoaderTestCase(TestCase): ''' Test parsing an ordinary path ''' - self.assertEqual( - self._render_yaml(b''' -p1: - - alpha - - beta'''), + self._render_yaml(textwrap.dedent('''\ + p1: + - alpha + - beta''')), {'p1': ['alpha', 'beta']} ) @@ -49,38 +49,37 @@ p1: ''' Test YAML anchors ''' - # Simple merge test self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v2: beta'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v2: beta''')), {'p1': {'v1': 'alpha'}, 'p2': {'v1': 'alpha', 'v2': 'beta'}} ) # Test that keys/nodes are overwritten self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v1: new_alpha'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v1: new_alpha''')), {'p1': {'v1': 'alpha'}, 'p2': {'v1': 'new_alpha'}} ) # Test merging of lists self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: &v1 - - t1 - - t2 -p2: - v2: *v1'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: &v1 + - t1 + - t2 + p2: + v2: *v1''')), {"p2": {"v2": ["t1", "t2"]}, "p1": {"v1": ["t1", "t2"]}} ) @@ -89,15 +88,27 @@ p2: Test that duplicates still throw an error ''' with self.assertRaises(ConstructorError): - self._render_yaml(b''' -p1: alpha -p1: beta''') + self._render_yaml(textwrap.dedent('''\ + p1: alpha + p1: beta''')) with self.assertRaises(ConstructorError): - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v2: beta - v2: betabeta''') + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v2: beta + v2: betabeta''')) + + def test_yaml_with_unicode_literals(self): + ''' + Test proper loading of unicode literals + ''' + self.assertEqual( + self._render_yaml(textwrap.dedent('''\ + foo: + a: Д + b: {'a': u'\\u0414'}''')), + {'foo': {'a': u'\u0414', 'b': {'a': u'\u0414'}}} + ) diff --git a/tests/unit/utils/vmware_test/test_connection.py b/tests/unit/utils/vmware_test/test_connection.py index 812cb1231c..da5e5ae63f 100644 --- a/tests/unit/utils/vmware_test/test_connection.py +++ b/tests/unit/utils/vmware_test/test_connection.py @@ -40,6 +40,11 @@ if sys.version_info[:3] > (2, 7, 8): else: SSL_VALIDATION = False +if hasattr(ssl, '_create_unverified_context'): + ssl_context = 'ssl._create_unverified_context' +else: + ssl_context = 'ssl._create_stdlib_context' + # Get Logging Started log = logging.getLogger(__name__) @@ -337,14 +342,14 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_successful_connection(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' mock_sc = MagicMock(side_effect=[exc, None]) mock_ssl = MagicMock() with patch('salt.utils.vmware.SmartConnect', mock_sc): - with patch('ssl._create_unverified_context', + with patch(ssl_context, mock_ssl): salt.utils.vmware._get_service_instance( @@ -378,7 +383,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_successful_connection(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed') @@ -387,9 +392,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): mock_ssl_context = MagicMock() with patch('salt.utils.vmware.SmartConnect', mock_sc): - with patch('ssl._create_unverified_context', - mock_ssl_unverif): - + with patch(ssl_context, mock_ssl_unverif): with patch('ssl.SSLContext', mock_ssl_context): salt.utils.vmware._get_service_instance( @@ -473,7 +476,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_unsuccsessful_connection_default_error(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('Exception') @@ -498,7 +501,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_unsuccsessful_connection_vim_fault(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = vim.fault.VimFault() @@ -523,7 +526,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_unsuccessful_connection_detault_error(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed') @@ -548,7 +551,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_unsuccessful_connection_vim_fault(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed')