diff --git a/.github/stale.yml b/.github/stale.yml index 4b858b51f7..cb260d8f5d 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 1145 is approximately 3 years and 2 months -daysUntilStale: 1145 +# 1115 is approximately 3 years and 1 month +daysUntilStale: 1115 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 diff --git a/.mention-bot b/.mention-bot index 56be9ab9e6..b6ed4f2b24 100644 --- a/.mention-bot +++ b/.mention-bot @@ -1,4 +1,11 @@ { + "alwaysNotifyForPaths": [ + { + "name": "ryan-lane", + "files": ["salt/**/*boto*.py"], + "skipTeamPrs": false + } + ], "skipTitle": "Merge forward", "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"] } diff --git a/conf/cloud b/conf/cloud index 921cc04848..035cfea101 100644 --- a/conf/cloud +++ b/conf/cloud @@ -97,3 +97,14 @@ # #delete_sshkeys: False +# Whether or not to include grains information in the /etc/salt/minion file +# which is generated when the minion is provisioned. For example... +# grains: +# salt-cloud: +# driver: ec2 +# provider: my_ec2:ec2 +# profile: micro_ec2 +# +# Default: 'True' +# +#enable_cloud_grains: 'True' diff --git a/conf/master b/conf/master index 9ff6f3831e..3aa9742651 100644 --- a/conf/master +++ b/conf/master @@ -301,6 +301,22 @@ ##### Security settings ##### ########################################## +# Enable passphrase protection of Master private key. Although a string value +# is acceptable; passwords should be stored in an external vaulting mechanism +# and retrieved via sdb. See https://docs.saltstack.com/en/latest/topics/sdb/. +# Passphrase protection is off by default but an example of an sdb profile and +# query is as follows. +# masterkeyring: +# driver: keyring +# service: system +# +# key_pass: sdb://masterkeyring/key_pass + +# Enable passphrase protection of the Master signing_key. This only applies if +# master_sign_pubkey is set to True. This is disabled by default. +# master_sign_pubkey: True +# signing_key_pass: sdb://masterkeyring/signing_pass + # Enable "open mode", this mode still maintains encryption, but turns off # authentication, this is only intended for highly secure environments or for # the situation where your keys end up in a bad state. If you run in open mode @@ -311,6 +327,9 @@ # public keys from the minions. Note that this is insecure. #auto_accept: False +# The size of key that should be generated when creating new keys. +#keysize: 2048 + # Time in minutes that an incoming public key with a matching name found in # pki_dir/minion_autosign/keyid is automatically accepted. Expired autosign keys # are removed when the master checks the minion_autosign directory. @@ -968,6 +987,21 @@ #pillar_cache_backend: disk +###### Reactor Settings ##### +########################################### +# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/ +#reactor: [] + +#Set the TTL for the cache of the reactor configuration. +#reactor_refresh_interval: 60 + +#Configure the number of workers for the runner/wheel in the reactor. +#reactor_worker_threads: 10 + +#Define the queue size for workers in the reactor. +#reactor_worker_hwm: 10000 + + ##### Syndic settings ##### ########################################## # The Salt syndic is used to pass commands through a master from a higher diff --git a/conf/minion b/conf/minion index b1122c9e52..6cae043295 100644 --- a/conf/minion +++ b/conf/minion @@ -620,6 +620,9 @@ # you do so at your own risk! #open_mode: False +# The size of key that should be generated when creating new keys. +#keysize: 2048 + # Enable permissive access to the salt keys. This allows you to run the # master or minion as root, but have a non-root group be given access to # your pki_dir. To make the access explicit, root must belong to the group @@ -661,6 +664,21 @@ # ssl_version: PROTOCOL_TLSv1_2 +###### Reactor Settings ##### +########################################### +# Define a salt reactor. See https://docs.saltstack.com/en/latest/topics/reactor/ +#reactor: [] + +#Set the TTL for the cache of the reactor configuration. +#reactor_refresh_interval: 60 + +#Configure the number of workers for the runner/wheel in the reactor. +#reactor_worker_threads: 10 + +#Define the queue size for workers in the reactor. +#reactor_worker_hwm: 10000 + + ###### Thread settings ##### ########################################### # Disable multiprocessing support, by default when a minion receives a diff --git a/doc/conf.py b/doc/conf.py index 5961ffa299..f7c329db11 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -245,8 +245,8 @@ on_saltstack = 'SALT_ON_SALTSTACK' in os.environ project = 'Salt' version = salt.version.__version__ -latest_release = '2017.7.0' # latest release -previous_release = '2016.11.6' # latest release from previous branch +latest_release = '2017.7.1' # latest release +previous_release = '2016.11.7' # latest release from previous branch previous_release_dir = '2016.11' # path on web server for previous branch next_release = '' # next release next_release_dir = '' # path on web server for next release branch @@ -320,11 +320,21 @@ rst_prolog = """\ .. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers .. |windownload| raw:: html -

x86: Salt-Minion-{release}-x86-Setup.exe - | md5

+

Python2 x86: Salt-Minion-{release}-x86-Setup.exe + | md5

+ +

Python2 AMD64: Salt-Minion-{release}-AMD64-Setup.exe + | md5

+

Python3 x86: Salt-Minion-{release}-x86-Setup.exe + | md5

+ +

Python3 AMD64: Salt-Minion-{release}-AMD64-Setup.exe + | md5

-

AMD64: Salt-Minion-{release}-AMD64-Setup.exe - | md5

.. |osxdownload| raw:: html diff --git a/doc/faq.rst b/doc/faq.rst index 46c332966d..18674b370b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -321,7 +321,27 @@ Restart using states ******************** Now we can apply the workaround to restart the Minion in reliable way. -The following example works on both UNIX-like and Windows operating systems: +The following example works on UNIX-like operating systems: + +.. code-block:: jinja + + {%- if grains['os'] != 'Windows' % + Restart Salt Minion: + cmd.run: + - name: 'salt-call --local service.restart salt-minion' + - bg: True + - onchanges: + - pkg: Upgrade Salt Minion + {%- endif %} + +Note that restarting the ``salt-minion`` service on Windows operating systems is +not always necessary when performing an upgrade. The installer stops the +``salt-minion`` service, removes it, deletes the contents of the ``\salt\bin`` +directory, installs the new code, re-creates the ``salt-minion`` service, and +starts it (by default). The restart step **would** be necessary during the +upgrade process, however, if the minion config was edited after the upgrade or +installation. If a minion restart is necessary, the state above can be edited +as follows: .. code-block:: jinja @@ -337,8 +357,8 @@ The following example works on both UNIX-like and Windows operating systems: - pkg: Upgrade Salt Minion However, it requires more advanced tricks to upgrade from legacy version of -Salt (before ``2016.3.0``), where executing commands in the background is not -supported: +Salt (before ``2016.3.0``) on UNIX-like operating systems, where executing +commands in the background is not supported: .. code-block:: jinja diff --git a/doc/ref/auth/all/index.rst b/doc/ref/auth/all/index.rst index f12d6f28bf..d421efbbe7 100644 --- a/doc/ref/auth/all/index.rst +++ b/doc/ref/auth/all/index.rst @@ -19,5 +19,4 @@ auth modules pki rest sharedsecret - stormpath yubico diff --git a/doc/ref/auth/all/salt.auth.stormpath.rst b/doc/ref/auth/all/salt.auth.stormpath.rst deleted file mode 100644 index ea5e365ffa..0000000000 --- a/doc/ref/auth/all/salt.auth.stormpath.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================== -salt.auth.stormpath -=================== - -.. automodule:: salt.auth.stormpath - :members: \ No newline at end of file diff --git a/doc/ref/cli/_includes/output-options.rst b/doc/ref/cli/_includes/output-options.rst index 0128a1cfb0..c679269e76 100644 --- a/doc/ref/cli/_includes/output-options.rst +++ b/doc/ref/cli/_includes/output-options.rst @@ -33,6 +33,10 @@ Output Options Write the output to the specified file. +.. option:: --out-file-append, --output-file-append + + Append the output to the specified file. + .. option:: --no-color Disable all colored output @@ -46,3 +50,14 @@ Output Options ``green`` denotes success, ``red`` denotes failure, ``blue`` denotes changes and success and ``yellow`` denotes a expected future change in configuration. + +.. option:: --state-output=STATE_OUTPUT, --state_output=STATE_OUTPUT + + Override the configured state_output value for minion + output. One of 'full', 'terse', 'mixed', 'changes' or + 'filter'. Default: 'none'. + +.. option:: --state-verbose=STATE_VERBOSE, --state_verbose=STATE_VERBOSE + + Override the configured state_verbose value for minion + output. Set to True or False. Default: none. diff --git a/doc/ref/cli/salt.rst b/doc/ref/cli/salt.rst index f2fb693a1c..6eaa9e56ff 100644 --- a/doc/ref/cli/salt.rst +++ b/doc/ref/cli/salt.rst @@ -81,7 +81,7 @@ Options Pass in an external authentication medium to validate against. The credentials will be prompted for. The options are `auto`, - `keystone`, `ldap`, `pam`, and `stormpath`. Can be used with the -T + `keystone`, `ldap`, and `pam`. Can be used with the -T option. .. option:: -T, --make-token diff --git a/doc/ref/clouds/all/salt.cloud.clouds.oneandone.rst b/doc/ref/clouds/all/salt.cloud.clouds.oneandone.rst new file mode 100644 index 0000000000..4355d7570a --- /dev/null +++ b/doc/ref/clouds/all/salt.cloud.clouds.oneandone.rst @@ -0,0 +1,6 @@ +=========================== +salt.cloud.clouds.oneandone +=========================== + +.. automodule:: salt.cloud.clouds.oneandone + :members: diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index d91f2533e4..03b54ac42b 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -94,64 +94,6 @@ The user to run the Salt processes user: root -.. conf_master:: max_open_files - -``max_open_files`` ------------------- - -Default: ``100000`` - -Each minion connecting to the master uses AT LEAST one file descriptor, the -master subscription connection. If enough minions connect you might start -seeing on the console(and then salt-master crashes): - -.. code-block:: bash - - Too many open files (tcp_listener.cpp:335) - Aborted (core dumped) - -.. code-block:: yaml - - max_open_files: 100000 - -By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for -max open files. - -To set a different value than the default one, uncomment, and configure this -setting. Remember that this value CANNOT be higher than the hard limit. Raising -the hard limit depends on the OS and/or distribution, a good way to find the -limit is to search the internet for something like this: - -.. code-block:: text - - raise max open files hard limit debian - -.. conf_master:: worker_threads - -``worker_threads`` ------------------- - -Default: ``5`` - -The number of threads to start for receiving commands and replies from minions. -If minions are stalling on replies because you have many minions, raise the -worker_threads value. - -Worker threads should not be put below 3 when using the peer system, but can -drop down to 1 worker otherwise. - -.. note:: - When the master daemon starts, it is expected behaviour to see - multiple salt-master processes, even if 'worker_threads' is set to '1'. At - a minimum, a controlling process will start along with a Publisher, an - EventPublisher, and a number of MWorker processes will be started. The - number of MWorker processes is tuneable by the 'worker_threads' - configuration value while the others are not. - -.. code-block:: yaml - - worker_threads: 5 - .. conf_master:: ret_port ``ret_port`` @@ -946,6 +888,74 @@ to socket concurrently. sock_pool_size: 15 +.. conf_master:: ipc_mode + +``ipc_mode`` +------------ + +Default: ``ipc`` + +The ipc strategy. (i.e., sockets versus tcp, etc.) Windows platforms lack +POSIX IPC and must rely on TCP based inter-process communications. ``ipc_mode`` +is set to ``tcp`` by default on Windows. + +.. code-block:: yaml + + ipc_mode: ipc + +.. conf_master:: + +``tcp_master_pub_port`` +----------------------- + +Default: ``4512`` + +The TCP port on which events for the master should be published if ``ipc_mode`` is TCP. + +.. code-block:: yaml + + tcp_master_pub_port: 4512 + +.. conf_master:: tcp_master_pull_port + +``tcp_master_pull_port`` +------------------------ + +Default: ``4513`` + +The TCP port on which events for the master should be pulled if ``ipc_mode`` is TCP. + +.. code-block:: yaml + + tcp_master_pull_port: 4513 + +.. conf_master:: tcp_master_publish_pull + +``tcp_master_publish_pull`` +--------------------------- + +Default: ``4514`` + +The TCP port on which events for the master should be pulled fom and then republished onto +the event bus on the master. + +.. code-block:: yaml + + tcp_master_publish_pull: 4514 + +.. conf_master:: tcp_master_workers + +``tcp_master_workers`` +---------------------- + +Default: ``4515`` + +The TCP port for ``mworkers`` to connect to on the master. + +.. code-block:: yaml + + tcp_master_workers: 4515 + .. _salt-ssh-configuration: @@ -1192,6 +1202,19 @@ public keys from minions. auto_accept: False +.. conf_master:: keysize + +``keysize`` +----------- + +Default: ``2048`` + +The size of key that should be generated when creating new keys. + +.. code-block:: yaml + + keysize: 2048 + .. conf_master:: autosign_timeout ``autosign_timeout`` @@ -1236,6 +1259,24 @@ minion IDs for which keys will automatically be rejected. Will override both membership in the :conf_master:`autosign_file` and the :conf_master:`auto_accept` setting. +.. conf_master:: permissive_pki_access + +``permissive_pki_access`` +------------------------- + +Default: ``False`` + +Enable permissive access to the salt keys. This allows you to run the +master or minion as root, but have a non-root group be given access to +your pki_dir. To make the access explicit, root must belong to the group +you've given access to. This is potentially quite insecure. If an autosign_file +is specified, enabling permissive_pki_access will allow group access to that +specific file. + +.. code-block:: yaml + + permissive_pki_access: False + .. conf_master:: publisher_acl ``publisher_acl`` @@ -1278,6 +1319,20 @@ This is completely disabled by default. - cmd.* - test.echo +.. conf_master:: sudo_acl + +``sudo_acl`` +------------ + +Default: ``False`` + +Enforce ``publisher_acl`` and ``publisher_acl_blacklist`` when users have sudo +access to the salt command. + +.. code-block:: yaml + + sudo_acl: False + .. conf_master:: external_auth ``external_auth`` @@ -1462,6 +1517,19 @@ Do not disable this unless it is absolutely clear what this does. rotate_aes_key: True +.. conf_master:: publish_session + +``publish_session`` +------------------- + +Default: ``86400`` + +The number of seconds between AES key rotations on the master. + +.. code-block:: yaml + + publish_session: Default: 86400 + .. conf_master:: ssl ``ssl`` @@ -1492,6 +1560,24 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23 ``allow_minion_key_revoke`` --------------------------- +Default: ``False`` + +By default, the master deletes its cache of minion data when the key for that +minion is removed. To preserve the cache after key deletion, set +``preserve_minion_cache`` to True. + +WARNING: This may have security implications if compromised minions auth with +a previous deleted minion ID. + +.. code-block:: yaml + + preserve_minion_cache: False + +.. conf_master:: allow_minion_key_revoke + +``allow_minion_key_revoke`` +--------------------------- + Default: ``True`` Controls whether a minion can request its own key revocation. When True @@ -1504,6 +1590,127 @@ the master will drop the request and the minion's key will remain accepted. rotate_aes_key: True +Master Large Scale Tuning Settings +================================== + +.. conf_master:: max_open_files + +``max_open_files`` +------------------ + +Default: ``100000`` + +Each minion connecting to the master uses AT LEAST one file descriptor, the +master subscription connection. If enough minions connect you might start +seeing on the console(and then salt-master crashes): + +.. code-block:: bash + + Too many open files (tcp_listener.cpp:335) + Aborted (core dumped) + +.. code-block:: yaml + + max_open_files: 100000 + +By default this value will be the one of `ulimit -Hn`, i.e., the hard limit for +max open files. + +To set a different value than the default one, uncomment, and configure this +setting. Remember that this value CANNOT be higher than the hard limit. Raising +the hard limit depends on the OS and/or distribution, a good way to find the +limit is to search the internet for something like this: + +.. code-block:: text + + raise max open files hard limit debian + +.. conf_master:: worker_threads + +``worker_threads`` +------------------ + +Default: ``5`` + +The number of threads to start for receiving commands and replies from minions. +If minions are stalling on replies because you have many minions, raise the +worker_threads value. + +Worker threads should not be put below 3 when using the peer system, but can +drop down to 1 worker otherwise. + +.. note:: + When the master daemon starts, it is expected behaviour to see + multiple salt-master processes, even if 'worker_threads' is set to '1'. At + a minimum, a controlling process will start along with a Publisher, an + EventPublisher, and a number of MWorker processes will be started. The + number of MWorker processes is tuneable by the 'worker_threads' + configuration value while the others are not. + +.. code-block:: yaml + + worker_threads: 5 + +.. conf_master:: pub_hwm + +``pub_hwm`` +----------- + +Default: ``1000`` + +The zeromq high water mark on the publisher interface. + +.. code-block:: yaml + + pub_hwm: 1000 + +.. conf_master:: zmq_backlog + +``zmq_backlog`` +--------------- + +Default: ``1000`` + +The listen queue size of the ZeroMQ backlog. + +.. code-block:: yaml + + zmq_backlog: 1000 + +.. conf_master:: salt_event_pub_hwm +.. conf_master:: event_publisher_pub_hwm + +``salt_event_pub_hwm`` and ``event_publisher_pub_hwm`` +------------------------------------------------------ + +These two ZeroMQ High Water Mark settings, ``salt_event_pub_hwm`` and +``event_publisher_pub_hwm`` are significant for masters with thousands of +minions. When these are insufficiently high it will manifest in random +responses missing in the CLI and even missing from the job cache. Masters +that have fast CPUs and many cores with appropriate ``worker_threads`` +will not need these set as high. + +The ZeroMQ high-water-mark for the ``SaltEvent`` pub socket default is: + +.. code-block:: yaml + + salt_event_pub_hwm: 20000 + +The ZeroMQ high-water-mark for the ``EventPublisher`` pub socket default is: + +.. code-block:: yaml + + event_publisher_pub_hwm: 10000 + +As an example, on single master deployment with 8,000 minions, 2.4GHz CPUs, +24 cores, and 32GiB memory has these settings: + +.. code-block:: yaml + + salt_event_pub_hwm: 128000 + event_publisher_pub_hwm: 64000 + + .. _master-module-management: Master Module Management @@ -1552,7 +1759,8 @@ Default: ``top.sls`` The state system uses a "top" file to tell the minions what environment to use and what modules to use. The state_top file is defined relative to the -root of the base environment. +root of the base environment. The value of "state_top" is also used for the +pillar top file .. code-block:: yaml @@ -3245,6 +3453,26 @@ configuration. pillar_opts: False +.. conf_master:: pillar_safe_render_error + +``pillar_safe_render_error`` +---------------------------- + +Default: ``True`` + +The pillar_safe_render_error option prevents the master from passing pillar +render errors to the minion. This is set on by default because the error could +contain templating data which would give that minion information it shouldn't +have, like a password! When set ``True`` the error message will only show: + +.. code-block:: shell + + Rendering SLS 'my.sls' failed. Please see master log for details. + +.. code-block:: yaml + + pillar_safe_render_error: True + .. _master-configuration-ext-pillar: .. conf_master:: ext_pillar @@ -3915,6 +4143,62 @@ can be utilized: pillar_cache_backend: disk +Master Reactor Settings +======================= + +.. conf_master:: reactor + +``reactor`` +----------- + +Default: ``[]`` + +Defines a salt reactor. See the :ref:`Reactor ` documentation for more +information. + +.. code-block:: yaml + + reactor: [] + +.. conf_master:: reactor_refresh_interval + +``reactor_refresh_interval`` +---------------------------- + +Default: ``60`` + +The TTL for the cache of the reactor configuration. + +.. code-block:: yaml + + reactor_refresh_interval: 60 + +.. conf_master:: reactor_worker_threads + +``reactor_worker_threads`` +-------------------------- + +Default: ``10`` + +The number of workers for the runner/wheel in the reactor. + +.. code-block:: yaml + reactor_worker_threads: 10 + +.. conf_master:: reactor_worker_hwm + +``reactor_worker_hwm`` +---------------------- + +Default: ``10000`` + +The queue size for workers in the reactor. + +.. code-block:: yaml + + reactor_worker_hwm: 10000 + + .. _syndic-server-settings: Syndic Server Settings @@ -4381,6 +4665,63 @@ option then the master will log a warning message. - /etc/roles/webserver +Keepalive Settings +================== + +.. conf_master:: tcp_keepalive + +``tcp_keepalive`` +----------------- + +Default: ``True`` + +The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt +connectivity issues in messy network environments with misbehaving firewalls. + +.. code-block:: yaml + + tcp_keepalive: True + +.. conf_master:: tcp_keepalive_cnt + +``tcp_keepalive_cnt`` +--------------------- + +Default: ``-1`` + +Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_cnt: -1 + +.. conf_master:: tcp_keepalive_idle + +``tcp_keepalive_idle`` +---------------------- + +Default: ``300`` + +Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_idle: 300 + +.. conf_master:: tcp_keepalive_intvl + +``tcp_keepalive_intvl`` +----------------------- + +Default: ``-1`` + +Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_intvl': -1 + + .. _winrepo-master-config-opts: Windows Software Repo Settings @@ -4519,7 +4860,7 @@ URL of the repository: .. code-block:: yaml - winrepo_remotes: + winrepo_remotes_ng: - ' https://github.com/saltstack/salt-winrepo-ng.git' Replace ```` with the SHA1 hash of a commit ID. Specifying a commit diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 4d26e5dd7a..039764c6b2 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -782,6 +782,20 @@ seconds each iteration. acceptance_wait_time_max: 0 +.. conf_minion:: rejected_retry + +``rejected_retry`` +------------------ + +Default: ``False`` + +If the master rejects the minion's public key, retry instead of exiting. +Rejected keys will be handled the same as waiting on acceptance. + +.. code-block:: yaml + + rejected_retry: False + .. conf_minion:: random_reauth_delay ``random_reauth_delay`` @@ -1212,7 +1226,7 @@ If certain returners should be disabled, this is the place .. conf_minion:: enable_whitelist_modules ``whitelist_modules`` ----------------------------- +--------------------- Default: ``[]`` (Module whitelisting is disabled. Adding anything to the config option will cause only the listed modules to be enabled. Modules not in the list will @@ -1304,6 +1318,20 @@ A list of extra directories to search for Salt renderers render_dirs: - /var/lib/salt/renderers +.. conf_minion:: utils_dirs + +``utils_dirs`` +-------------- + +Default: ``[]`` + +A list of extra directories to search for Salt utilities + +.. code-block:: yaml + + utils_dirs: + - /var/lib/salt/utils + .. conf_minion:: cython_enable ``cython_enable`` @@ -1352,6 +1380,20 @@ below. providers: service: systemd +.. conf_minion:: modules_max_memory + +``modules_max_memory`` +---------------------- + +Default: ``-1`` + +Specify a max size (in bytes) for modules on import. This feature is currently +only supported on *nix operating systems and requires psutil. + +.. code-block:: yaml + + modules_max_memory: -1 + .. conf_minion:: extmod_whitelist .. conf_minion:: extmod_blacklist @@ -1377,8 +1419,8 @@ whitelist an empty list. modules: - specific_module - Valid options: + - beacons - clouds - sdb @@ -1524,6 +1566,52 @@ environment lacks one. default_top: dev +.. conf_minion:: startup_states + +``startup_states`` +------------------ + +Default: ``''`` + +States to run when the minion daemon starts. To enable, set ``startup_states`` to: + +- ``highstate``: Execute state.highstate +- ``sls``: Read in the sls_list option and execute the named sls files +- ``top``: Read top_file option and execute based on that file on the Master + +.. code-block:: yaml + + startup_states: '' + +.. conf_minion:: sls_list + +``sls_list`` +------------ + +Default: ``[]`` + +List of states to run when the minion starts up if ``startup_states`` is set to ``sls``. + +.. code-block:: yaml + + sls_list: + - edit.vim + - hyper + +.. conf_minion:: top_file + +``top_file`` +------------ + +Default: ``''`` + +Top file to execute if ``startup_states`` is set to ``top``. + +.. code-block:: yaml + + top_file: '' + + State Management Settings ========================= @@ -1540,7 +1628,7 @@ The default renderer used for local state executions renderer: yaml_jinja -.. conf_master:: test +.. conf_minion:: test ``test`` -------- @@ -2058,6 +2146,35 @@ before the initial key exchange. The master fingerprint can be found by running master_finger: 'ba:30:65:2a:d6:9e:20:4f:d8:b2:f3:a7:d4:65:11:13' +.. conf_minion:: keysize + +``keysize`` +----------- + +Default: ``2048`` + +The size of key that should be generated when creating new keys. + +.. code-block:: yaml + + keysize: 2048 + +.. conf_minion:: permissive_pki_access + +``permissive_pki_access`` +------------------------- + +Default: ``False`` + +Enable permissive access to the salt keys. This allows you to run the +master or minion as root, but have a non-root group be given access to +your pki_dir. To make the access explicit, root must belong to the group +you've given access to. This is potentially quite insecure. + +.. code-block:: yaml + + permissive_pki_access: False + .. conf_minion:: verify_master_pubkey_sign ``verify_master_pubkey_sign`` @@ -2165,7 +2282,7 @@ blocked. If `cmd_whitelist_glob` is NOT SET, then all shell commands are permitt - 'cat /etc/fstab' -.. conf_master:: ssl +.. conf_minion:: ssl ``ssl`` ------- @@ -2191,6 +2308,62 @@ constant names without ssl module prefix: ``CERT_REQUIRED`` or ``PROTOCOL_SSLv23 ssl_version: PROTOCOL_TLSv1_2 +Reactor Settings +================ + +.. conf_minion:: reactor + +``reactor`` +----------- + +Default: ``[]`` + +Defines a salt reactor. See the :ref:`Reactor ` documentation for more +information. + +.. code-block:: yaml + + reactor: [] + +.. conf_minion:: reactor_refresh_interval + +``reactor_refresh_interval`` +---------------------------- + +Default: ``60`` + +The TTL for the cache of the reactor configuration. + +.. code-block:: yaml + + reactor_refresh_interval: 60 + +.. conf_minion:: reactor_worker_threads + +``reactor_worker_threads`` +-------------------------- + +Default: ``10`` + +The number of workers for the runner/wheel in the reactor. + +.. code-block:: yaml + reactor_worker_threads: 10 + +.. conf_minion:: reactor_worker_hwm + +``reactor_worker_hwm`` +---------------------- + +Default: ``10000`` + +The queue size for workers in the reactor. + +.. code-block:: yaml + + reactor_worker_hwm: 10000 + + Thread Settings =============== @@ -2461,6 +2634,62 @@ option then the minion will log a warning message. - /etc/roles/webserver +Keepalive Settings +================== + +.. conf_minion:: tcp_keepalive + +``tcp_keepalive`` +----------------- + +Default: ``True`` + +The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt +connectivity issues in messy network environments with misbehaving firewalls. + +.. code-block:: yaml + + tcp_keepalive: True + +.. conf_minion:: tcp_keepalive_cnt + +``tcp_keepalive_cnt`` +--------------------- + +Default: ``-1`` + +Sets the ZeroMQ TCP keepalive count. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_cnt: -1 + +.. conf_minion:: tcp_keepalive_idle + +``tcp_keepalive_idle`` +---------------------- + +Default: ``300`` + +Sets ZeroMQ TCP keepalive idle. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_idle: 300 + +.. conf_minion:: tcp_keepalive_intvl + +``tcp_keepalive_intvl`` +----------------------- + +Default: ``-1`` + +Sets ZeroMQ TCP keepalive interval. May be used to tune issues with minion disconnects. + +.. code-block:: yaml + + tcp_keepalive_intvl': -1 + Frozen Build Update Settings ============================ @@ -2562,6 +2791,36 @@ out. winrepo_dir: 'D:\winrepo' +.. conf_minion:: winrepo_dir_ng + +``winrepo_dir_ng`` +------------------ + +.. versionadded:: 2015.8.0 + A new :ref:`ng ` repo was added. + +Default: ``/srv/salt/win/repo-ng`` + +Location on the minion where the :conf_minion:`winrepo_remotes_ng` are checked +out for 2015.8.0 and later minions. + +.. code-block:: yaml + + winrepo_dir_ng: /srv/salt/win/repo-ng + +.. conf_minion:: winrepo_source_dir + +``winrepo_source_dir`` +---------------------- + +Default: ``salt://win/repo-ng/`` + +The source location for the winrepo sls files. + +.. code-block:: yaml + + winrepo_source_dir: salt://win/repo-ng/ + .. conf_minion:: winrepo_cachefile .. conf_minion:: win_repo_cachefile @@ -2614,3 +2873,33 @@ URL of the repository: Replace ```` with the SHA1 hash of a commit ID. Specifying a commit ID is useful in that it allows one to revert back to a previous version in the event that an error is introduced in the latest revision of the repo. + +.. conf_minion:: winrepo_remotes_ng + +``winrepo_remotes_ng`` +---------------------- + +.. versionadded:: 2015.8.0 + A new :ref:`ng ` repo was added. + +Default: ``['https://github.com/saltstack/salt-winrepo-ng.git']`` + +List of git repositories to checkout and include in the winrepo for +2015.8.0 and later minions. + +.. code-block:: yaml + + winrepo_remotes_ng: + - https://github.com/saltstack/salt-winrepo-ng.git + +To specify a specific revision of the repository, prepend a commit ID to the +URL of the repository: + +.. code-block:: yaml + + winrepo_remotes_ng: + - ' https://github.com/saltstack/salt-winrepo-ng.git' + +Replace ```` with the SHA1 hash of a commit ID. Specifying a commit +ID is useful in that it allows one to revert back to a previous version in the +event that an error is introduced in the latest revision of the repo. diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index af4a19fbdb..1365f38453 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -97,6 +97,7 @@ execution modules cytest daemontools data + datadog_api ddns deb_apache deb_postgres @@ -197,6 +198,7 @@ execution modules keyboard keystone kmod + kubernetes launchctl layman ldap3 @@ -398,7 +400,6 @@ execution modules state status statuspage - stormpath supervisord suse_apache svn diff --git a/doc/ref/modules/all/salt.modules.datadog_api.rst b/doc/ref/modules/all/salt.modules.datadog_api.rst new file mode 100644 index 0000000000..9f6afa953e --- /dev/null +++ b/doc/ref/modules/all/salt.modules.datadog_api.rst @@ -0,0 +1,6 @@ +======================== +salt.modules.datadog_api +======================== + +.. automodule:: salt.modules.datadog_api + :members: \ No newline at end of file diff --git a/doc/ref/modules/all/salt.modules.kubernetes.rst b/doc/ref/modules/all/salt.modules.kubernetes.rst new file mode 100644 index 0000000000..a0f715d617 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.kubernetes.rst @@ -0,0 +1,6 @@ +======================= +salt.modules.kubernetes +======================= + +.. automodule:: salt.modules.kubernetes + :members: diff --git a/doc/ref/modules/all/salt.modules.stormpath.rst b/doc/ref/modules/all/salt.modules.stormpath.rst deleted file mode 100644 index 708cfb2ab6..0000000000 --- a/doc/ref/modules/all/salt.modules.stormpath.rst +++ /dev/null @@ -1,6 +0,0 @@ -====================== -salt.modules.stormpath -====================== - -.. automodule:: salt.modules.stormpath - :members: diff --git a/doc/ref/modules/all/salt.modules.test.rst b/doc/ref/modules/all/salt.modules.test.rst index 5163f48e43..0aa6ccf6a8 100644 --- a/doc/ref/modules/all/salt.modules.test.rst +++ b/doc/ref/modules/all/salt.modules.test.rst @@ -3,4 +3,5 @@ salt.modules.test ================= .. automodule:: salt.modules.test - :members: \ No newline at end of file + :members: + :exclude-members: rand_str diff --git a/doc/ref/modules/index.rst b/doc/ref/modules/index.rst index ecffb224a0..e68eb94507 100644 --- a/doc/ref/modules/index.rst +++ b/doc/ref/modules/index.rst @@ -429,10 +429,33 @@ similar to the following: Confine this module to Mac OS with Homebrew. ''' - if salt.utils.which('brew') and __grains__['os'] == 'MacOS': + if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return False +The ``__virtual__()`` function can return a ``True`` or ``False`` boolean, a tuple, +or a string. If it returns a ``True`` value, this ``__virtualname__`` module-level +attribute can be set as seen in the above example. This is the string that the module +should be referred to as. + +When ``__virtual__()`` returns a tuple, the first item should be a boolean and the +second should be a string. This is typically done when the module should not load. The +first value of the tuple is ``False`` and the second is the error message to display +for why the module did not load. + +For example: + +.. code-block:: python + + def __virtual__(): + ''' + Only load if git exists on the system + ''' + if salt.utils.which('git') is None: + return (False, + 'The git execution module cannot be loaded: git unavailable.') + else: + return True Documentation ============= diff --git a/doc/ref/renderers/index.rst b/doc/ref/renderers/index.rst index bea2b7798f..8399f7f604 100644 --- a/doc/ref/renderers/index.rst +++ b/doc/ref/renderers/index.rst @@ -146,8 +146,10 @@ Here is a simple YAML renderer example: import yaml from salt.utils.yamlloader import SaltYamlSafeLoader + from salt.ext import six + def render(yaml_data, saltenv='', sls='', **kws): - if not isinstance(yaml_data, basestring): + if not isinstance(yaml_data, six.string_types): yaml_data = yaml_data.read() data = yaml.load( yaml_data, diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 1344bbc026..4803648006 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -136,6 +136,7 @@ state modules keyboard keystone kmod + kubernetes layman ldap libcloud_dns @@ -249,7 +250,6 @@ state modules stateconf status statuspage - stormpath_account supervisord svn sysctl diff --git a/doc/ref/states/all/salt.states.kubernetes.rst b/doc/ref/states/all/salt.states.kubernetes.rst new file mode 100644 index 0000000000..55bd416492 --- /dev/null +++ b/doc/ref/states/all/salt.states.kubernetes.rst @@ -0,0 +1,6 @@ +====================== +salt.states.kubernetes +====================== + +.. automodule:: salt.states.kubernetes + :members: diff --git a/doc/ref/states/all/salt.states.stormpath_account.rst b/doc/ref/states/all/salt.states.stormpath_account.rst deleted file mode 100644 index f361ec2bb7..0000000000 --- a/doc/ref/states/all/salt.states.stormpath_account.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================= -salt.states.stormpath_account -============================= - -.. automodule:: salt.states.stormpath_account - :members: diff --git a/doc/ref/states/requisites.rst b/doc/ref/states/requisites.rst index 6b00b3e856..3e3d29213c 100644 --- a/doc/ref/states/requisites.rst +++ b/doc/ref/states/requisites.rst @@ -519,7 +519,8 @@ runas .. versionadded:: 2017.7.0 -The ``runas`` global option is used to set the user which will be used to run the command in the ``cmd.run`` module. +The ``runas`` global option is used to set the user which will be used to run +the command in the ``cmd.run`` module. .. code-block:: yaml @@ -532,6 +533,26 @@ The ``runas`` global option is used to set the user which will be used to run th In the above state, the pip command run by ``cmd.run`` will be run by the daniel user. +runas_password +~~~~~~~~~~~~~~ + +.. versionadded:: 2017.7.2 + +The ``runas_password`` global option is used to set the password used by the +runas global option. This is required by ``cmd.run`` on Windows when ``runas`` +is specified. It will be set when ``runas_password`` is defined in the state. + +.. code-block:: yaml + + run_script: + cmd.run: + - name: Powershell -NonInteractive -ExecutionPolicy Bypass -File C:\\Temp\\script.ps1 + - runas: frank + - runas_password: supersecret + +In the above state, the Powershell script run by ``cmd.run`` will be run by the +frank user with the password ``supersecret``. + .. _requisites-require-in: .. _requisites-watch-in: .. _requisites-onchanges-in: diff --git a/doc/ref/states/writing.rst b/doc/ref/states/writing.rst index 365e1b581b..f278df5294 100644 --- a/doc/ref/states/writing.rst +++ b/doc/ref/states/writing.rst @@ -135,19 +135,23 @@ A State Module must return a dict containing the following keys/values: ``test=True``, and changes would have been made if the state was not run in test mode. - +--------------------+-----------+-----------+ - | | live mode | test mode | - +====================+===========+===========+ - | no changes | ``True`` | ``True`` | - +--------------------+-----------+-----------+ - | successful changes | ``True`` | ``None`` | - +--------------------+-----------+-----------+ - | failed changes | ``False`` | ``None`` | - +--------------------+-----------+-----------+ + +--------------------+-----------+------------------------+ + | | live mode | test mode | + +====================+===========+========================+ + | no changes | ``True`` | ``True`` | + +--------------------+-----------+------------------------+ + | successful changes | ``True`` | ``None`` | + +--------------------+-----------+------------------------+ + | failed changes | ``False`` | ``False`` or ``None`` | + +--------------------+-----------+------------------------+ .. note:: - Test mode does not predict if the changes will be successful or not. + Test mode does not predict if the changes will be successful or not, + and hence the result for pending changes is usually ``None``. + + However, if a state is going to fail and this can be determined + in test mode without applying the change, ``False`` can be returned. - **comment:** A string containing a summary of the result. diff --git a/doc/spelling_wordlist.txt b/doc/spelling_wordlist.txt index f2eb2a614b..c097ef1c59 100644 --- a/doc/spelling_wordlist.txt +++ b/doc/spelling_wordlist.txt @@ -777,8 +777,6 @@ Stateconf stderr stdin stdout -stormpath -Stormpath str strftime subfolder diff --git a/doc/topics/cloud/action.rst b/doc/topics/cloud/action.rst index 0e10a95653..f36211d01a 100644 --- a/doc/topics/cloud/action.rst +++ b/doc/topics/cloud/action.rst @@ -21,7 +21,7 @@ Or you may specify a map which includes all VMs to perform the action on: $ salt-cloud -a reboot -m /path/to/mapfile -The following is a list of actions currently supported by salt-cloud: +The following is an example list of actions currently supported by ``salt-cloud``: .. code-block:: yaml @@ -36,5 +36,5 @@ The following is a list of actions currently supported by salt-cloud: - start - stop -Another useful reference for viewing more salt-cloud actions is the -:ref:Salt Cloud Feature Matrix +Another useful reference for viewing more ``salt-cloud`` actions is the +:ref:`Salt Cloud Feature Matrix `. diff --git a/doc/topics/cloud/cloud.rst b/doc/topics/cloud/cloud.rst index 0ca59b83f3..c88a5eccb7 100644 --- a/doc/topics/cloud/cloud.rst +++ b/doc/topics/cloud/cloud.rst @@ -146,24 +146,24 @@ library. The following two lines set up the imports: .. code-block:: python from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 - from salt.utils import namespaced_function + import salt.utils And then a series of declarations will make the necessary functions available within the cloud module. .. code-block:: python - get_size = namespaced_function(get_size, globals()) - get_image = namespaced_function(get_image, globals()) - avail_locations = namespaced_function(avail_locations, globals()) - avail_images = namespaced_function(avail_images, globals()) - avail_sizes = namespaced_function(avail_sizes, globals()) - script = namespaced_function(script, globals()) - destroy = namespaced_function(destroy, globals()) - list_nodes = namespaced_function(list_nodes, globals()) - list_nodes_full = namespaced_function(list_nodes_full, globals()) - list_nodes_select = namespaced_function(list_nodes_select, globals()) - show_instance = namespaced_function(show_instance, globals()) + get_size = salt.utils.namespaced_function(get_size, globals()) + get_image = salt.utils.namespaced_function(get_image, globals()) + avail_locations = salt.utils.namespaced_function(avail_locations, globals()) + avail_images = salt.utils.namespaced_function(avail_images, globals()) + avail_sizes = salt.utils.namespaced_function(avail_sizes, globals()) + script = salt.utils.namespaced_function(script, globals()) + destroy = salt.utils.namespaced_function(destroy, globals()) + list_nodes = salt.utils.namespaced_function(list_nodes, globals()) + list_nodes_full = salt.utils.namespaced_function(list_nodes_full, globals()) + list_nodes_select = salt.utils.namespaced_function(list_nodes_select, globals()) + show_instance = salt.utils.namespaced_function(show_instance, globals()) If necessary, these functions may be replaced by removing the appropriate declaration line, and then adding the function as normal. diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index d5986340d4..8028aa414f 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -56,6 +56,24 @@ settings can be placed in the provider or profile: sls_list: - web + +When salt cloud creates a new minon, it can automatically add grain information +to the minion configuration file identifying the sources originally used +to define it. + +The generated grain information will appear similar to: + +.. code-block:: yaml + + grains: + salt-cloud: + driver: ec2 + provider: my_ec2:ec2 + profile: ec2-web + +The generation of the salt-cloud grain can be surpressed by the +option ``enable_cloud_grains: 'False'`` in the cloud configuration file. + Cloud Configuration Syntax ========================== diff --git a/doc/topics/cloud/function.rst b/doc/topics/cloud/function.rst index 3368cd820b..c83487d36b 100644 --- a/doc/topics/cloud/function.rst +++ b/doc/topics/cloud/function.rst @@ -26,5 +26,5 @@ gathering information about instances on a provider basis: $ salt-cloud -f list_nodes_full linode $ salt-cloud -f list_nodes_select linode -Another useful reference for viewing salt-cloud functions is the +Another useful reference for viewing ``salt-cloud`` functions is the :ref:`Salt Cloud Feature Matrix `. diff --git a/doc/topics/cloud/index.rst b/doc/topics/cloud/index.rst index 7af8447ba5..eff8e2aa8f 100644 --- a/doc/topics/cloud/index.rst +++ b/doc/topics/cloud/index.rst @@ -119,6 +119,7 @@ Cloud Provider Specifics Getting Started With Libvirt Getting Started With Linode Getting Started With LXC + Getting Started With OneAndOne Getting Started With OpenNebula Getting Started With OpenStack Getting Started With Parallels diff --git a/doc/topics/cloud/joyent.rst b/doc/topics/cloud/joyent.rst index 8d4ca2c100..a56d533840 100644 --- a/doc/topics/cloud/joyent.rst +++ b/doc/topics/cloud/joyent.rst @@ -49,7 +49,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the .. code-block:: yaml - joyent_512 + joyent_512: provider: my-joyent-config size: g4-highcpu-512M image: ubuntu-16.04 diff --git a/doc/topics/cloud/libvirt.rst b/doc/topics/cloud/libvirt.rst index 48ba5ba8d4..7fb0ff25ef 100644 --- a/doc/topics/cloud/libvirt.rst +++ b/doc/topics/cloud/libvirt.rst @@ -8,10 +8,14 @@ libvirt with qemu-kvm. http://www.libvirt.org/ -Dependencies +Host Dependencies ============ * libvirt >= 1.2.18 (older might work) +Salt-Cloud Dependencies +============ +* libvirt-python + Provider Configuration ====================== diff --git a/doc/topics/cloud/misc.rst b/doc/topics/cloud/misc.rst index 3ef1518923..2e44aa5612 100644 --- a/doc/topics/cloud/misc.rst +++ b/doc/topics/cloud/misc.rst @@ -386,3 +386,42 @@ script, a cloud profile using ``file_map`` might look like: file_map: /local/path/to/custom/script: /remote/path/to/use/custom/script /local/path/to/package: /remote/path/to/store/package + +Running Pre-Flight Commands +=========================== + +.. versionadded:: Oxygen + +To execute specified preflight shell commands on a VM before the deploy script is +run, use the ``preflight_cmds`` option. These must be defined as a list in a cloud +configuration file. For example: + +.. code-block:: yaml + + my-cloud-profile: + provider: linode-config + image: Ubuntu 16.04 LTS + size: Linode 2048 + preflight_cmds: + - whoami + - echo 'hello world!' + +These commands will run in sequence **before** the bootstrap script is executed. + +Force Minion Config +=================== + +.. versionadded:: Oxygen + +The ``force_minion_config`` option requests the bootstrap process to overwrite +an existing minion configuration file and public/private key files. +Default: False + +This might be important for drivers (such as ``saltify``) which are expected to +take over a connection from a former salt master. + +.. code-block:: yaml + + my_saltify_provider: + driver: saltify + force_minion_config: true diff --git a/doc/topics/cloud/oneandone.rst b/doc/topics/cloud/oneandone.rst new file mode 100644 index 0000000000..e0e52e739a --- /dev/null +++ b/doc/topics/cloud/oneandone.rst @@ -0,0 +1,146 @@ +========================== +Getting Started With 1and1 +========================== + +1&1 is one of the world’s leading Web hosting providers. 1&1 currently offers +a wide range of Web hosting products, including email solutions and high-end +servers in 10 different countries including Germany, Spain, Great Britain +and the United States. From domains to 1&1 MyWebsite to eBusiness solutions +like Cloud Hosting and Web servers for complex tasks, 1&1 is well placed to deliver +a high quality service to its customers. All 1&1 products are hosted in +1&1‘s high-performance, green data centers in the USA and Europe. + +Dependencies +============ + +* 1and1 >= 1.2.0 + +Configuration +============= + +* Using the new format, set up the cloud configuration at + ``/etc/salt/cloud.providers`` or + ``/etc/salt/cloud.providers.d/oneandone.conf``: + +.. code-block:: yaml + + my-oneandone-config: + driver: oneandone + + # Set the location of the salt-master + # + minion: + master: saltmaster.example.com + + # Configure oneandone authentication credentials + # + api_token: + ssh_private_key: /path/to/id_rsa + ssh_public_key: /path/to/id_rsa.pub + +Authentication +============== + +The ``api_key`` is used for API authorization. This token can be obtained +from the CloudPanel in the Management section below Users. + +Profiles +======== + +Here is an example of a profile: + +.. code-block:: yaml + + oneandone_fixed_size: + provider: my-oneandone-config + description: Small instance size server + fixed_instance_size: S + appliance_id: 8E3BAA98E3DFD37857810E0288DD8FBA + + oneandone_custom_size: + provider: my-oneandone-config + description: Custom size server + vcore: 2 + cores_per_processor: 2 + ram: 8 + appliance_id: 8E3BAA98E3DFD37857810E0288DD8FBA + hdds: + - + is_main: true + size: 20 + - + is_main: false + size: 20 + +The following list explains some of the important properties. + +fixed_instance_size_id + When creating a server, either ``fixed_instance_size_id`` or custom hardware params + containing ``vcore``, ``cores_per_processor``, ``ram``, and ``hdds`` must be provided. + Can be one of the IDs listed among the output of the following command: + +.. code-block:: bash + + salt-cloud --list-sizes oneandone + +vcore + Total amount of processors. + +cores_per_processor + Number of cores per processor. + +ram + RAM memory size in GB. + +hdds + Hard disks. + +appliance_id + ID of the image that will be installed on server. + Can be one of the IDs listed in the output of the following command: + +.. code-block:: bash + + salt-cloud --list-images oneandone + +datacenter_id + ID of the datacenter where the server will be created. + Can be one of the IDs listed in the output of the following command: + +.. code-block:: bash + + salt-cloud --list-locations oneandone + +description + Description of the server. + +password + Password of the server. Password must contain more than 8 characters + using uppercase letters, numbers and other special symbols. + +power_on + Power on server after creation. Default is set to true. + +firewall_policy_id + Firewall policy ID. If it is not provided, the server will assign + the best firewall policy, creating a new one if necessary. If the parameter + is sent with a 0 value, the server will be created with all ports blocked. + +ip_id + IP address ID. + +load_balancer_id + Load balancer ID. + +monitoring_policy_id + Monitoring policy ID. + +deploy + Set to False if Salt should not be installed on the node. + +wait_for_timeout + The timeout to wait in seconds for provisioning resources such as servers. + The default wait_for_timeout is 15 minutes. + +For more information concerning cloud profiles, see :ref:`here +`. diff --git a/doc/topics/cloud/saltify.rst b/doc/topics/cloud/saltify.rst index 2e8fa15661..dda9801522 100644 --- a/doc/topics/cloud/saltify.rst +++ b/doc/topics/cloud/saltify.rst @@ -16,7 +16,7 @@ The Saltify driver has no external dependencies. Configuration ============= -Because the Saltify driver does not use an actual cloud provider host, it has a +Because the Saltify driver does not use an actual cloud provider host, it can have a simple provider configuration. The only thing that is required to be set is the driver name, and any other potentially useful information, like the location of the salt-master: @@ -31,6 +31,12 @@ the salt-master: master: 111.222.333.444 provider: saltify +However, if you wish to use the more advanced capabilities of salt-cloud, such as +rebooting, listing, and disconnecting machines, then the salt master must fill +the role usually performed by a vendor's cloud management system. In order to do +that, you must configure your salt master as a salt-api server, and supply credentials +to use it. (See ``salt-api setup`` below.) + Profiles ======== @@ -72,6 +78,30 @@ to it can be verified with Salt: salt my-machine test.ping +Destroy Options +--------------- + +For obvious reasons, the ``destroy`` action does not actually vaporize hardware. +If the salt master is connected using salt-api, it can tear down parts of +the client machines. It will remove the client's key from the salt master, +and will attempt the following options: + +.. code-block:: yaml + + - remove_config_on_destroy: true + # default: true + # Deactivate salt-minion on reboot and + # delete the minion config and key files from its ``/etc/salt`` directory, + # NOTE: If deactivation is unsuccessful (older Ubuntu machines) then when + # salt-minion restarts it will automatically create a new, unwanted, set + # of key files. The ``force_minion_config`` option must be used in that case. + + - shutdown_on_destroy: false + # default: false + # send a ``shutdown`` command to the client. + +.. versionadded:: Oxygen + Using Map Files --------------- The settings explained in the section above may also be set in a map file. An @@ -135,3 +165,67 @@ Return values: - ``True``: Credential verification succeeded - ``False``: Credential verification succeeded - ``None``: Credential verification was not attempted. + +Provisioning salt-api +===================== + +In order to query or control minions it created, saltify needs to send commands +to the salt master. It does that using the network interface to salt-api. + +The salt-api is not enabled by default. The following example will provide a +simple installation. + +.. code-block:: yaml + + # file /etc/salt/cloud.profiles.d/my_saltify_profiles.conf + hw_41: # a theoretical example hardware machine + ssh_host: 10.100.9.41 # the hard address of your target + ssh_username: vagrant # a user name which has passwordless sudo + password: vagrant # on your target machine + provider: my_saltify_provider + + +.. code-block:: yaml + + # file /etc/salt/cloud.providers.d/saltify_provider.conf + my_saltify_provider: + driver: saltify + eauth: pam + username: vagrant # supply some sudo-group-member's name + password: vagrant # and password on the salt master + minion: + master: 10.100.9.5 # the hard address of the master + + +.. code-block:: yaml + + # file /etc/salt/master.d/auth.conf + # using salt-api ... members of the 'sudo' group can do anything ... + external_auth: + pam: + sudo%: + - .* + - '@wheel' + - '@runner' + - '@jobs' + + +.. code-block:: yaml + + # file /etc/salt/master.d/api.conf + # see https://docs.saltstack.com/en/latest/ref/netapi/all/salt.netapi.rest_cherrypy.html + rest_cherrypy: + host: localhost + port: 8000 + ssl_crt: /etc/pki/tls/certs/localhost.crt + ssl_key: /etc/pki/tls/certs/localhost.key + thread_pool: 30 + socket_queue_size: 10 + + +Start your target machine as a Salt minion named "node41" by: + +.. code-block:: bash + + $ sudo salt-cloud -p hw_41 node41 + diff --git a/doc/topics/development/conventions/formulas.rst b/doc/topics/development/conventions/formulas.rst index 29f968faf9..4fc8f722f3 100644 --- a/doc/topics/development/conventions/formulas.rst +++ b/doc/topics/development/conventions/formulas.rst @@ -214,18 +214,34 @@ Writing Formulas Each Formula is a separate repository in the `saltstack-formulas`_ organization on GitHub. -.. note:: Get involved creating new Formulas +Get involved creating new Formulas +---------------------------------- - The best way to create new Formula repositories for now is to create a - repository in your own account on GitHub and notify a SaltStack employee - when it is ready. We will add you to the contributors team on the - `saltstack-formulas`_ organization and help you transfer the repository - over. Ping a SaltStack employee on IRC (``#salt`` on Freenode) or send an - email to the `salt-users`_ mailing list. +The best way to create new Formula repositories for now is to create a +repository in your own account on GitHub and notify a SaltStack employee when +it is ready. We will add you to the Contributors team on the +`saltstack-formulas`_ organization and help you transfer the repository over. +Ping a SaltStack employee on IRC (``#salt`` on Freenode) or send an email to +the `salt-users`_ mailing list. - There are a lot of repositories in that organization! Team members can - manage which repositories they are subscribed to on GitHub's watching page: - https://github.com/watching. +There are a lot of repositories in that organization! Team members can manage +which repositories they are subscribed to on GitHub's watching page: +https://github.com/watching. + +Members of the Contributors team are welcome to participate in reviewing pull +requests across the Organization. Some repositories will have regular +contributors and some repositories will not. As you get involved in a +repository be sure to communicate with any other contributors there on pull +requests that are large or have breaking changes. + +In general it is best to have another Contributor review and merge any pull +requests that you open. Feel free to `at-mention`__ other regular contributors +to a repository and request a review. However, there are a lot of formula +repositories so if a repository does not yet have regular contributors or if +your pull request has stayed open for more than a couple days feel free to +"selfie-merge" your own pull request. + +__: https://help.github.com/articles/basic-writing-and-formatting-syntax/#mentioning-users-and-teams Style ----- diff --git a/doc/topics/development/deprecations.rst b/doc/topics/development/deprecations.rst index c13a5b32cc..fc89c106bd 100644 --- a/doc/topics/development/deprecations.rst +++ b/doc/topics/development/deprecations.rst @@ -18,10 +18,10 @@ on the significance and complexity of the changes required by the user. Salt feature releases are based on the Periodic Table. Any new features going into the develop branch will be named after the next element in the Periodic -Table. For example, Beryllium was the feature release name of the develop branch -before the 2015.8 branch was tagged. At that point in time, any new features going -into the develop branch after 2015.8 was branched were part of the Boron feature -release. +Table. For example, Beryllium was the feature release name of the develop +branch before the 2015.8 branch was tagged. At that point in time, any new +features going into the develop branch after 2015.8 was branched were part of +the Boron feature release. A deprecation warning should be in place for at least two major releases before the deprecated code and its accompanying deprecation warning are removed. More @@ -29,14 +29,14 @@ time should be given for more complex changes. For example, if the current release under development is ``Sodium``, the deprecated code and associated warnings should remain in place and warn for at least ``Aluminum``. -To help in this deprecation task, salt provides :func:`salt.utils.warn_until -`. The idea behind this helper function is to show the -deprecation warning to the user until salt reaches the provided version. Once -that provided version is equaled :func:`salt.utils.warn_until -` will raise a :py:exc:`RuntimeError` making salt stop -its execution. This stoppage is unpleasant and will remind the developer that -the deprecation limit has been reached and that the code can then be safely -removed. +To help in this deprecation task, salt provides +:func:`salt.utils.versions.warn_until `. The +idea behind this helper function is to show the deprecation warning to the user +until salt reaches the provided version. Once that provided version is equaled +:func:`salt.utils.versions.warn_until ` will +raise a :py:exc:`RuntimeError` making salt stop its execution. This stoppage is +unpleasant and will remind the developer that the deprecation limit has been +reached and that the code can then be safely removed. Consider the following example: @@ -44,7 +44,7 @@ Consider the following example: def some_function(bar=False, foo=None): if foo is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Aluminum', 'The \'foo\' argument has been deprecated and its ' 'functionality removed, as such, its usage is no longer ' diff --git a/doc/topics/development/tests/unit.rst b/doc/topics/development/tests/unit.rst index d90c770178..595a14d636 100644 --- a/doc/topics/development/tests/unit.rst +++ b/doc/topics/development/tests/unit.rst @@ -319,7 +319,7 @@ function into ``__salt__`` that's actually a MagicMock instance. def show_patch(self): with patch.dict(my_module.__salt__, - {'function.to_replace': MagicMock()}: + {'function.to_replace': MagicMock()}): # From this scope, carry on with testing, with a modified __salt__! diff --git a/doc/topics/installation/eos.rst b/doc/topics/installation/eos.rst new file mode 100644 index 0000000000..c8c7787be7 --- /dev/null +++ b/doc/topics/installation/eos.rst @@ -0,0 +1,154 @@ +========================================= +Arista EOS Salt minion installation guide +========================================= + +The Salt minion for Arista EOS is distributed as a SWIX extension and can be installed directly on the switch. The EOS network operating system is based on old Fedora distributions and the installation of the ``salt-minion`` requires backports. This SWIX extension contains the necessary backports, together with the Salt basecode. + +.. note:: + + This SWIX extension has been tested on Arista DCS-7280SE-68-R, running EOS 4.17.5M and vEOS 4.18.3F. + +Important Notes +=============== + +This package is in beta, make sure to test it carefully before running it in production. + +If confirmed working correctly, please report and add a note on this page with the platform model and EOS version. + +If you want to uninstall this package, please refer to the uninstalling_ section. + +Installation from the Official SaltStack Repository +=================================================== + +Download the swix package and save it to flash. + +.. code-block:: bash + + veos#copy https://salt-eos.netops.life/salt-eos-latest.swix flash: + veos#copy https://salt-eos.netops.life/startup.sh flash: + +Install the Extension +===================== + +Copy the Salt package to extension + +.. code-block:: bash + + veos#copy flash:salt-eos-latest.swix extension: + +Install the SWIX + +.. code-block:: bash + + veos#extension salt-eos-latest.swix force + +Verify the installation + +.. code-block:: bash + + veos#show extensions | include salt-eos + salt-eos-2017-07-19.swix 1.0.11/1.fc25 A, F 27 + +Change the Salt master IP address or FQDN, by edit the variable (SALT_MASTER) + +.. code-block:: bash + + veos#bash vi /mnt/flash/startup.sh + +Make sure you enable the eAPI with unix-socket + +.. code-block:: bash + + veos(config)#management api http-commands + protocol unix-socket + no shutdown + +Post-installation tasks +======================= + +Generate Keys and host record and start Salt minion + +.. code-block:: bash + + veos#bash + #sudo /mnt/flash/startup.sh + +``salt-minion`` should be running + +Copy the installed extensions to boot-extensions + +.. code-block:: bash + + veos#copy installed-extensions boot-extensions + +Apply event-handler to let EOS start salt-minion during boot-up + +.. code-block:: bash + + veos(config)#event-handler boot-up-script + trigger on-boot + action bash sudo /mnt/flash/startup.sh + +For more specific installation details of the ``salt-minion``, please refer to :ref:`Configuring Salt`. + +.. _uninstalling: + +Uninstalling +============ + +If you decide to uninstall this package, the following steps are recommended for safety: + +1. Remove the extension from boot-extensions + +.. code-block:: bash + + veos#bash rm /mnt/flash/boot-extensions + +2. Remove the extension from extensions folder + +.. code-block:: bash + + veos#bash rm /mnt/flash/.extensions/salt-eos-latest.swix + +2. Remove boot-up script + +.. code-block:: bash + + veos(config)#no event-handler boot-up-script + +Additional Information +====================== + +This SWIX extension contains the following RPM packages: + +.. code-block:: text + + libsodium-1.0.11-1.fc25.i686.rpm + libstdc++-6.2.1-2.fc25.i686.rpm + openpgm-5.2.122-6.fc24.i686.rpm + python-Jinja2-2.8-0.i686.rpm + python-PyYAML-3.12-0.i686.rpm + python-babel-0.9.6-5.fc18.noarch.rpm + python-backports-1.0-3.fc18.i686.rpm + python-backports-ssl_match_hostname-3.4.0.2-1.fc18.noarch.rpm + python-backports_abc-0.5-0.i686.rpm + python-certifi-2016.9.26-0.i686.rpm + python-chardet-2.0.1-5.fc18.noarch.rpm + python-crypto-1.4.1-1.noarch.rpm + python-crypto-2.6.1-1.fc18.i686.rpm + python-futures-3.1.1-1.noarch.rpm + python-jtextfsm-0.3.1-0.noarch.rpm + python-kitchen-1.1.1-2.fc18.noarch.rpm + python-markupsafe-0.18-1.fc18.i686.rpm + python-msgpack-python-0.4.8-0.i686.rpm + python-napalm-base-0.24.3-1.noarch.rpm + python-napalm-eos-0.6.0-1.noarch.rpm + python-netaddr-0.7.18-0.noarch.rpm + python-pyeapi-0.7.0-0.noarch.rpm + python-salt-2017.7.0_1414_g2fb986f-1.noarch.rpm + python-singledispatch-3.4.0.3-0.i686.rpm + python-six-1.10.0-0.i686.rpm + python-tornado-4.4.2-0.i686.rpm + python-urllib3-1.5-7.fc18.noarch.rpm + python2-zmq-15.3.0-2.fc25.i686.rpm + zeromq-4.1.4-5.fc25.i686.rpm diff --git a/doc/topics/installation/index.rst b/doc/topics/installation/index.rst index 0ee4b38d7a..2f96830c4e 100644 --- a/doc/topics/installation/index.rst +++ b/doc/topics/installation/index.rst @@ -46,6 +46,7 @@ These guides go into detail how to install Salt on a given platform. arch debian + eos fedora freebsd gentoo diff --git a/doc/topics/jinja/index.rst b/doc/topics/jinja/index.rst index 33001e137a..d57fbcbb04 100644 --- a/doc/topics/jinja/index.rst +++ b/doc/topics/jinja/index.rst @@ -351,7 +351,7 @@ Returns: .. versionadded:: 2017.7.0 -Wraps a text around quoutes. +This text will be wrapped in quotes. .. jinja_ref:: regex_search @@ -766,19 +766,43 @@ Returns: Check a whitelist and/or blacklist to see if the value matches it. -Example: +This filter can be used with either a whitelist or a blacklist individually, +or a whitelist and a blacklist can be passed simultaneously. + +If whitelist is used alone, value membership is checked against the +whitelist only. If the value is found, the function returns ``True``. +Otherwise, it returns ``False``. + +If blacklist is used alone, value membership is checked against the +blacklist only. If the value is found, the function returns ``False``. +Otherwise, it returns ``True``. + +If both a whitelist and a blacklist are provided, value membership in the +blacklist will be examined first. If the value is not found in the blacklist, +then the whitelist is checked. If the value isn't found in the whitelist, +the function returns ``False``. + +Whitelist Example: .. code-block:: jinja - {{ 5 | check_whitelist_blacklist(whitelist=[5, 6, 7]) }} - {{ 5 | check_whitelist_blacklist(blacklist=[5, 6, 7]) }} + {{ 5 | check_whitelist_blacklist(whitelist=[5, 6, 7]) }} Returns: .. code-block:: python - True + True +Blacklist Example: + +.. code-block:: jinja + + {{ 5 | check_whitelist_blacklist(blacklist=[5, 6, 7]) }} + +.. code-block:: python + + False .. jinja_ref:: date_format @@ -804,12 +828,14 @@ Returns: 08.03.2017 17:00 -.. jinja_ref:: str_to_num +.. jinja_ref:: to_num -``str_to_num`` --------------- +``to_num`` +---------- .. versionadded:: 2017.7.0 +.. versionadded:: Oxygen + Renamed from ``str_to_num`` to ``to_num``. Converts a string to its numerical value. @@ -817,7 +843,7 @@ Example: .. code-block:: jinja - {{ '5' | str_to_num }} + {{ '5' | to_num }} Returns: @@ -841,6 +867,13 @@ Example: {{ 'wall of text' | to_bytes }} +.. note:: + + This option may have adverse effects when using the default renderer, ``yaml_jinja``. + This is due to the fact that YAML requires proper handling in regard to special + characters. Please see the section on :ref:`YAML ASCII support ` + in the :ref:`YAML Idiosyncracies ` documentation for more + information. .. jinja_ref:: json_decode_list @@ -886,22 +919,28 @@ Returns: {'a': 'b'} -.. jinja_ref:: rand_str +.. jinja_ref:: random_hash -``rand_str`` ------------- +``random_hash`` +--------------- .. versionadded:: 2017.7.0 +.. versionadded:: Oxygen + Renamed from ``rand_str`` to ``random_hash`` to more accurately describe + what the filter does. -Generate a random string and applies a hash. Default hashing: md5. +Generates a random number between 1 and the number passed to the filter, and +then hashes it. The default hash type is the one specified by the minion's +:conf_minion:`hash_type` config option, but an alternate hash type can be +passed to the filter as an argument. Example: .. code-block:: jinja - {% set passwd_length = 17 %} - {{ passwd_length | rand_str }} - {{ passwd_length | rand_str('sha512') }} + {% set num_range = 99999999 %} + {{ num_range | random_hash }} + {{ num_range | random_hash('sha512') }} Returns: @@ -1202,7 +1241,7 @@ Example: .. code-block:: jinja - {{ ['192.168.0.1', 'foo', 'bar', 'fe80::'] | ipv4 }} + {{ ['192.168.0.1', 'foo', 'bar', 'fe80::'] | ipv6 }} Returns: @@ -1245,7 +1284,7 @@ Returns: .. versionadded:: 2017.7.0 -Return the size of the network. +Return the size of the network. This utility works for both IPv4 and IPv6. Example: @@ -1305,6 +1344,13 @@ Example: {{ '00:11:22:33:44:55' | mac_str_to_bytes }} +.. note:: + + This option may have adverse effects when using the default renderer, ``yaml_jinja``. + This is due to the fact that YAML requires proper handling in regard to special + characters. Please see the section on :ref:`YAML ASCII support ` + in the :ref:`YAML Idiosyncracies ` documentation for more + information. .. jinja_ref:: dns_check diff --git a/doc/topics/proxyminion/index.rst b/doc/topics/proxyminion/index.rst index 80806c646a..bfa5383155 100644 --- a/doc/topics/proxyminion/index.rst +++ b/doc/topics/proxyminion/index.rst @@ -89,7 +89,7 @@ they are being loaded for the correct proxytype, example below: Only work on proxy ''' try: - if salt.utils.is_proxy() and \ + if salt.utils.platform.is_proxy() and \ __opts__['proxy']['proxytype'] == 'ssh_sample': return __virtualname__ except KeyError: @@ -156,20 +156,23 @@ will need to be restarted to pick up any changes. A corresponding utility funct ``saltutil.sync_proxymodules``, has been added to sync these modules to minions. In addition, a salt.utils helper function called `is_proxy()` was added to make -it easier to tell when the running minion is a proxy minion. +it easier to tell when the running minion is a proxy minion. **NOTE: This +function was renamed to salt.utils.platform.is_proxy() for the Oxygen release** New in 2015.8 ------------- -Starting with the 2015.8 release of Salt, proxy processes are no longer forked off from a controlling minion. -Instead, they have their own script ``salt-proxy`` which takes mostly the same arguments that the -standard Salt minion does with the addition of ``--proxyid``. This is the id that the salt-proxy will -use to identify itself to the master. Proxy configurations are still best kept in Pillar and their format -has not changed. +Starting with the 2015.8 release of Salt, proxy processes are no longer forked +off from a controlling minion. Instead, they have their own script +``salt-proxy`` which takes mostly the same arguments that the standard Salt +minion does with the addition of ``--proxyid``. This is the id that the +salt-proxy will use to identify itself to the master. Proxy configurations are +still best kept in Pillar and their format has not changed. -This change allows for better process control and logging. Proxy processes can now be listed with standard -process management utilities (``ps`` from the command line). Also, a full Salt minion is no longer -required (though it is still strongly recommended) on machines hosting proxies. +This change allows for better process control and logging. Proxy processes can +now be listed with standard process management utilities (``ps`` from the +command line). Also, a full Salt minion is no longer required (though it is +still strongly recommended) on machines hosting proxies. Getting Started @@ -619,9 +622,10 @@ in the proxymodule itself. This might be useful if a proxymodule author wants t all the code for the proxy interface in the same place instead of splitting it between the proxy and grains directories. -This function will only be called automatically if the configuration variable ``proxy_merge_grains_in_module`` -is set to True in the proxy configuration file (default ``/etc/salt/proxy``). This -variable defaults to ``True`` in the release code-named *2017.7.0*. +This function will only be called automatically if the configuration variable +``proxy_merge_grains_in_module`` is set to True in the proxy configuration file +(default ``/etc/salt/proxy``). This variable defaults to ``True`` in the +release code-named *2017.7.0*. .. code: python:: @@ -640,7 +644,7 @@ variable defaults to ``True`` in the release code-named *2017.7.0*. def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except KeyError: pass @@ -708,7 +712,7 @@ Example from ``salt/grains/rest_sample.py``: def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except KeyError: pass diff --git a/doc/topics/releases/2016.11.7.rst b/doc/topics/releases/2016.11.7.rst index 6ef9587133..2ad821ff22 100644 --- a/doc/topics/releases/2016.11.7.rst +++ b/doc/topics/releases/2016.11.7.rst @@ -3,3 +3,13 @@ Salt 2016.11.7 Release Notes ============================ Version 2016.11.7 is a bugfix release for :ref:`2016.11.0 `. + +Changes for v2016.11.6..v2016.11.7 +---------------------------------- + +Security Fix +============ + +CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master + +Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com diff --git a/doc/topics/releases/2016.3.0.rst b/doc/topics/releases/2016.3.0.rst index 01e1a5751c..b28fed2c66 100644 --- a/doc/topics/releases/2016.3.0.rst +++ b/doc/topics/releases/2016.3.0.rst @@ -175,6 +175,10 @@ they are being loaded for the correct proxytype, example below: return False +.. note:: + ``salt.utils.is_proxy()`` has been renamed to + ``salt.utils.platform.is_proxy`` as of the Oxygen release. + The try/except block above exists because grains are processed very early in the proxy minion startup process, sometimes earlier than the proxy key in the ``__opts__`` dictionary is populated. diff --git a/doc/topics/releases/2017.7.0.rst b/doc/topics/releases/2017.7.0.rst index 52e026e490..a0e257b347 100644 --- a/doc/topics/releases/2017.7.0.rst +++ b/doc/topics/releases/2017.7.0.rst @@ -28,8 +28,6 @@ The following salt-cloud drivers have known issues running with Python 3. These - Joyent -- Any driver that relies on the `apache-libcloud` library such as cloudstack, dimenstiondata, gce, nova, and openstack - - When running under Python 3, users who require Unicode support should ensure that a locale is set on their machines. Users using the `C` locale are advised to switch to a UTF-aware locale to ensure proper functionality with Salt with Python 3. @@ -124,13 +122,12 @@ State Module Changes # After run_something: module.run: - mymodule.something: + - mymodule.something: - name: some name - first_arg: one - second_arg: two - do_stuff: True - Since a lot of users are already using :py:func:`module.run ` states, this new behavior must currently be explicitly turned on, to allow users to take their time updating their SLS @@ -138,6 +135,36 @@ State Module Changes the next feature release of Salt (Oxygen) and the old usage will no longer be supported at that time. + Another feature of the new :py:func:`module.run ` is that + it allows calling many functions in a single batch, such as: + + .. code-block:: yaml + + run_something: + module.run: + - mymodule.function_without_parameters: + - mymodule.another_function: + - myparam + - my_other_param + + In a rare case that you have a function that needs to be called several times but + with the different parameters, an additional feature of "tagging" is to the + rescue. In order to tag a function, use a colon delimeter. For example: + + .. code-block:: yaml + + run_something: + module.run: + - mymodule.same_function:1: + - mymodule.same_function:2: + - myparam + - my_other_param + - mymodule.same_function:3: + - foo: bar + + The example above will run `mymodule.same_function` three times with the + different parameters. + To enable the new behavior for :py:func:`module.run `, add the following to the minion config file: @@ -145,6 +172,7 @@ State Module Changes use_superseded: - module.run + - The default for the ``fingerprint_hash_type`` option used in the ``present`` function in the :mod:`ssh ` state changed from ``md5`` to ``sha256``. @@ -678,6 +706,7 @@ Execution modules - :mod:`salt.modules.grafana4 ` - :mod:`salt.modules.heat ` - :mod:`salt.modules.icinga2 ` +- :mod:`salt.modules.kubernetes ` - :mod:`salt.modules.logmod ` - :mod:`salt.modules.mattermost ` - :mod:`salt.modules.namecheap_dns ` @@ -756,6 +785,7 @@ States - :mod:`salt.states.icinga2 ` - :mod:`salt.states.influxdb_continuous_query ` - :mod:`salt.states.influxdb_retention_policy ` +- :mod:`salt.states.kubernetes ` - :mod:`salt.states.logadm ` - :mod:`salt.states.logrotate ` - :mod:`salt.states.msteams ` @@ -945,3 +975,13 @@ The ``glusterfs`` state had the following function removed: The ``openvswitch_port`` state had the following change: - The ``type`` option was removed from the ``present`` function. Please use ``tunnel_type`` instead. + +Build Notes +=========== + +Windows Installer Packages +-------------------------- + +Windows Installer packages have been patched with the following PR: 42347_ + +.. _42347: https://github.com/saltstack/salt/pull/42347 diff --git a/doc/topics/releases/2017.7.1.rst b/doc/topics/releases/2017.7.1.rst new file mode 100644 index 0000000000..7cc616c94b --- /dev/null +++ b/doc/topics/releases/2017.7.1.rst @@ -0,0 +1,190 @@ +============================ +Salt 2017.7.1 Release Notes +============================ + +Version 2017.7.1 is a bugfix release for :ref:`2017.7.0 `. + +Security Fix +============ + +CVE-2017-12791 Maliciously crafted minion IDs can cause unwanted directory traversals on the Salt-master + +Correct a flaw in minion id validation which could allow certain minions to authenticate to a master despite not having the correct credentials. To exploit the vulnerability, an attacker must create a salt-minion with an ID containing characters that will cause a directory traversal. Credit for discovering the security flaw goes to: Vernhk@qq.com + +Changes for v2017.7.0..v2017.7.1 +-------------------------------- + +Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): + +*Generated at: 2017-07-26T01:09:40Z* + +Statistics: + +- Total Merges: **11** +- Total Issue references: **9** +- Total PR references: **22** + +Changes: + + +- **PR** `#42548`_: (*gtmanfred*) pass in empty kwarg for reactor + @ *2017-07-26T00:41:20Z* + + - **ISSUE** `#460`_: (*whiteinge*) Add a topic and a ref for modules/states/returners/renderers/runners + | refs: `#42548`_ + * 711b742c54 Merge pull request `#42548`_ from gtmanfred/2017.7.1 + * 0257c1dc32 pass in empty kwarg for reactor + + * b948e980d2 update chunk, not kwarg in chunk + +- **PR** `#42522`_: (*gtmanfred*) pacman wildcard is only for repository installs + @ *2017-07-24T20:51:05Z* + + - **ISSUE** `#42519`_: (*xuhcc*) Error when installing package from file under Arch Linux + | refs: `#42522`_ + * 50c1635dcc Merge pull request `#42522`_ from gtmanfred/2017.7.1 + * 7787fb9e1b pacman wildcard is only for repository installs + +- **PR** `#42508`_: (*rallytime*) Back-port `#42474`_ to 2017.7.1 + @ *2017-07-24T20:49:51Z* + + - **PR** `#42474`_: (*whiteinge*) Cmd arg kwarg parsing test + | refs: `#42508`_ + - **PR** `#39646`_: (*terminalmage*) Handle deprecation of passing string args to load_args_and_kwargs + | refs: `#42474`_ + * 05c07ac049 Merge pull request `#42508`_ from rallytime/`bp-42474`_ + * 76fb074433 Add a test.arg variant that cleans the pub kwargs by default + + * 624f63648e Lint fixes + + * d246a5fc61 Add back support for string kwargs + + * 854e098aa0 Add LocalClient.cmd test for arg/kwarg parsing + +- **PR** `#42472`_: (*rallytime*) Back-port `#42435`_ to 2017.7.1 + @ *2017-07-24T15:11:13Z* + + - **ISSUE** `#42427`_: (*grichmond-salt*) Issue Passing Variables created from load_json as Inline Pillar Between States + | refs: `#42435`_ + - **PR** `#42435`_: (*terminalmage*) Modify our custom YAML loader to treat unicode literals as unicode strings + | refs: `#42472`_ + * 95fe2558e4 Merge pull request `#42472`_ from rallytime/`bp-42435`_ + * 5c47af5b98 Modify our custom YAML loader to treat unicode literals as unicode strings + +- **PR** `#42473`_: (*rallytime*) Back-port `#42436`_ to 2017.7.1 + @ *2017-07-24T15:10:29Z* + + - **ISSUE** `#42374`_: (*tyhunt99*) [2017.7.0] salt-run mange.versions throws exception if minion is offline or unresponsive + | refs: `#42436`_ + - **PR** `#42436`_: (*garethgreenaway*) Fixes to versions function in manage runner + | refs: `#42473`_ + * 5b99d45f54 Merge pull request `#42473`_ from rallytime/`bp-42436`_ + * 82ed919803 Updating the versions function inside the manage runner to account for when a minion is offline and we are unable to determine it's version. + +- **PR** `#42471`_: (*rallytime*) Back-port `#42399`_ to 2017.7.1 + @ *2017-07-24T15:09:50Z* + + - **ISSUE** `#42381`_: (*zebooka*) Git.detached broken in 2017.7.0 + | refs: `#42399`_ + - **ISSUE** `#38878`_: (*tomlaredo*) [Naming consistency] git.latest "rev" option VS git.detached "ref" option + | refs: `#38898`_ + - **PR** `#42399`_: (*rallytime*) Update old "ref" references to "rev" in git.detached state + | refs: `#42471`_ + - **PR** `#38898`_: (*terminalmage*) git.detached: rename ref to rev for consistency + | refs: `#42399`_ + * 3d1a2d3f9f Merge pull request `#42471`_ from rallytime/`bp-42399`_ + * b9a4669e5a Update old "ref" references to "rev" in git.detached state + +- **PR** `#42470`_: (*rallytime*) Back-port `#42031`_ to 2017.7.1 + @ *2017-07-24T15:09:30Z* + + - **ISSUE** `#42400`_: (*Enquier*) Conflict in execution of passing pillar data to orch/reactor event executions 2017.7.0 + | refs: `#42031`_ + - **PR** `#42031`_: (*skizunov*) Fix: Reactor emits critical error + | refs: `#42470`_ + * 09766bccbc Merge pull request `#42470`_ from rallytime/`bp-42031`_ + * 0a0c6287a4 Fix: Reactor emits critical error + +- **PR** `#42469`_: (*rallytime*) Back-port `#42027`_ to 2017.7.1 + @ *2017-07-21T22:41:02Z* + + - **ISSUE** `#41949`_: (*jrporcaro*) Event returner doesn't work with Windows Master + | refs: `#42027`_ + - **PR** `#42027`_: (*gtmanfred*) import salt.minion for EventReturn for Windows + | refs: `#42469`_ + * d7b172a15b Merge pull request `#42469`_ from rallytime/`bp-42027`_ + * ed612b4ee7 import salt.minion for EventReturn for Windows + +- **PR** `#42466`_: (*rallytime*) Back-port `#42452`_ to 2017.7.1 + @ *2017-07-21T19:41:24Z* + + - **PR** `#42452`_: (*Ch3LL*) update windows urls to new py2/py3 naming scheme + | refs: `#42466`_ + * 8777b1a825 Merge pull request `#42466`_ from rallytime/`bp-42452`_ + * c10196f68c update windows urls to new py2/py3 naming scheme + +- **PR** `#42439`_: (*rallytime*) Back-port `#42409`_ to 2017.7.1 + @ *2017-07-21T17:38:10Z* + + - **PR** `#42409`_: (*twangboy*) Add Scripts to build Py3 on Mac + | refs: `#42439`_ + * fceaaf41d0 Merge pull request `#42439`_ from rallytime/`bp-42409`_ + * 8176964b41 Remove build and dist, sign pkgs + + * 2c14d92a07 Fix hard coded pip path + + * 82fdd7c2e1 Add support for Py3 + + * 2478447246 Update Python and other reqs + +- **PR** `#42441`_: (*rallytime*) Back-port `#42433`_ to 2017.7.1 + @ *2017-07-21T17:37:01Z* + + - **ISSUE** `#42403`_: (*astronouth7303*) [2017.7] Pillar empty when state is applied from orchestrate + | refs: `#42433`_ + - **PR** `#42433`_: (*terminalmage*) Only force saltenv/pillarenv to be a string when not None + | refs: `#42441`_ + * 660400560b Merge pull request `#42441`_ from rallytime/`bp-42433`_ + * 17f347123a Only force saltenv/pillarenv to be a string when not None + + +.. _`#38878`: https://github.com/saltstack/salt/issues/38878 +.. _`#38898`: https://github.com/saltstack/salt/pull/38898 +.. _`#39646`: https://github.com/saltstack/salt/pull/39646 +.. _`#41949`: https://github.com/saltstack/salt/issues/41949 +.. _`#42027`: https://github.com/saltstack/salt/pull/42027 +.. _`#42031`: https://github.com/saltstack/salt/pull/42031 +.. _`#42374`: https://github.com/saltstack/salt/issues/42374 +.. _`#42381`: https://github.com/saltstack/salt/issues/42381 +.. _`#42399`: https://github.com/saltstack/salt/pull/42399 +.. _`#42400`: https://github.com/saltstack/salt/issues/42400 +.. _`#42403`: https://github.com/saltstack/salt/issues/42403 +.. _`#42409`: https://github.com/saltstack/salt/pull/42409 +.. _`#42427`: https://github.com/saltstack/salt/issues/42427 +.. _`#42433`: https://github.com/saltstack/salt/pull/42433 +.. _`#42435`: https://github.com/saltstack/salt/pull/42435 +.. _`#42436`: https://github.com/saltstack/salt/pull/42436 +.. _`#42439`: https://github.com/saltstack/salt/pull/42439 +.. _`#42441`: https://github.com/saltstack/salt/pull/42441 +.. _`#42452`: https://github.com/saltstack/salt/pull/42452 +.. _`#42466`: https://github.com/saltstack/salt/pull/42466 +.. _`#42469`: https://github.com/saltstack/salt/pull/42469 +.. _`#42470`: https://github.com/saltstack/salt/pull/42470 +.. _`#42471`: https://github.com/saltstack/salt/pull/42471 +.. _`#42472`: https://github.com/saltstack/salt/pull/42472 +.. _`#42473`: https://github.com/saltstack/salt/pull/42473 +.. _`#42474`: https://github.com/saltstack/salt/pull/42474 +.. _`#42508`: https://github.com/saltstack/salt/pull/42508 +.. _`#42519`: https://github.com/saltstack/salt/issues/42519 +.. _`#42522`: https://github.com/saltstack/salt/pull/42522 +.. _`#42548`: https://github.com/saltstack/salt/pull/42548 +.. _`#460`: https://github.com/saltstack/salt/issues/460 +.. _`bp-42027`: https://github.com/saltstack/salt/pull/42027 +.. _`bp-42031`: https://github.com/saltstack/salt/pull/42031 +.. _`bp-42399`: https://github.com/saltstack/salt/pull/42399 +.. _`bp-42409`: https://github.com/saltstack/salt/pull/42409 +.. _`bp-42433`: https://github.com/saltstack/salt/pull/42433 +.. _`bp-42435`: https://github.com/saltstack/salt/pull/42435 +.. _`bp-42436`: https://github.com/saltstack/salt/pull/42436 +.. _`bp-42452`: https://github.com/saltstack/salt/pull/42452 +.. _`bp-42474`: https://github.com/saltstack/salt/pull/42474 diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 1fb464d4db..64eecb5a3f 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -25,6 +25,27 @@ by any master tops matches that are not matched via a top file. To make master tops matches execute first, followed by top file matches, set the new :conf_minion:`master_tops_first` minion config option to ``True``. +LDAP via External Authentication Changes +---------------------------------------- +In this release of Salt, if LDAP Bind Credentials are supplied, then +these credentials will be used for all LDAP access except the first +authentication when a job is submitted. The first authentication will +use the user's credentials as passed on the CLI. This behavior is to +accommodate certain two-factor authentication schemes where the authentication +token can only be used once. + +In previous releases the bind credentials would only be used to determine +the LDAP user's existence and group membership. The user's LDAP credentials +were used from then on. + +Stormpath External Authentication Removed +----------------------------------------- + +Per Stormpath's announcement, their API will be shutting down on 8/17/2017 at +noon PST so the Stormpath external authentication module has been removed. + +https://stormpath.com/oktaplusstormpath + New GitFS Features ------------------ @@ -49,8 +70,30 @@ environments (i.e. ``saltenvs``) have been added: ignore all tags and use branches only, and also to keep SHAs from being made available as saltenvs. -Salt Cloud and Newer PyWinRM Versions -------------------------------------- +Salt Cloud Features +------------------- + +Pre-Flight Commands +=================== + +Support has been added for specified "preflight commands" to run on a VM before +the deploy script is run. These must be defined as a list in a cloud configuration +file. For example: + +.. code-block:: yaml + + my-cloud-profile: + provider: linode-config + image: Ubuntu 16.04 LTS + size: Linode 2048 + preflight_cmds: + - whoami + - echo 'hello world!' + +These commands will run in sequence **before** the bootstrap script is executed. + +Newer PyWinRM Versions +---------------------- Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self signed certificates. :ref:`Here` for more information. @@ -63,23 +106,514 @@ running on T-Series SPARC hardware. The ``virtual_subtype`` grain is populated as a list of domain roles. +Beacon configuration changes +---------------------------------------- + +In order to remain consistent and to align with other Salt components such as states, +support for configuring beacons using dictionary based configuration has been deprecated +in favor of list based configuration. All beacons have a validation function which will +check the configuration for the correct format and only load if the validation passes. + +- ``avahi_announce`` beacon + + Old behavior: + ``` + beacons: + avahi_announce: + run_once: True + servicetype: _demo._tcp + port: 1234 + txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' + ``` + + New behavior: + ``` + beacons: + avahi_announce: + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' + ``` + + - ``bonjour_announce`` beacon + + Old behavior: + ``` + beacons: + bonjour_announce: + run_once: True + servicetype: _demo._tcp + port: 1234 + txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' + ``` + + New behavior: + ``` + beacons: + bonjour_announce: + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' + ``` + +- ``btmp`` beacon + + Old behavior: + ``` + beacons: + btmp: {} + ``` + + New behavior: + ``` + beacons: + btmp: [] + + ``` + +- ``glxinfo`` beacon + + Old behavior: + ``` + beacons: + glxinfo: + user: frank + screen_event: True + ``` + + New behavior: + ``` + beacons: + glxinfo: + - user: frank + - screen_event: True + ``` + +- ``haproxy`` beacon + + Old behavior: + ``` + beacons: + haproxy: + - www-backend: + threshold: 45 + servers: + - web1 + - web2 + - interval: 120 + ``` + + New behavior: + ``` + beacons: + haproxy: + - backends: + www-backend: + threshold: 45 + servers: + - web1 + - web2 + - interval: 120 + ``` + +- ``inotify`` beacon + + Old behavior: + ``` + beacons: + inotify: + /path/to/file/or/dir: + mask: + - open + - create + - close_write + recurse: True + auto_add: True + exclude: + - /path/to/file/or/dir/exclude1 + - /path/to/file/or/dir/exclude2 + - /path/to/file/or/dir/regex[a-m]*$: + regex: True + coalesce: True + ``` + + New behavior: + ``` + beacons: + inotify: + - files: + /path/to/file/or/dir: + mask: + - open + - create + - close_write + recurse: True + auto_add: True + exclude: + - /path/to/file/or/dir/exclude1 + - /path/to/file/or/dir/exclude2 + - /path/to/file/or/dir/regex[a-m]*$: + regex: True + - coalesce: True +``` + +- ``journald`` beacon + + Old behavior: + ``` + beacons: + journald: + sshd: + SYSLOG_IDENTIFIER: sshd + PRIORITY: 6 + ``` + + New behavior: + ``` + beacons: + journald: + - services: + sshd: + SYSLOG_IDENTIFIER: sshd + PRIORITY: 6 + ``` + +- ``load`` beacon + + Old behavior: + ``` + beacons: + load: + 1m: + - 0.0 + - 2.0 + 5m: + - 0.0 + - 1.5 + 15m: + - 0.1 + - 1.0 + emitatstartup: True + onchangeonly: False + ``` + + New behavior: + ``` + beacons: + load: + - averages: + 1m: + - 0.0 + - 2.0 + 5m: + - 0.0 + - 1.5 + 15m: + - 0.1 + - 1.0 + - emitatstartup: True + - onchangeonly: False + ``` + +- ``log`` beacon + + Old behavior: + ``` + beacons: + log: + file: + : + regex: + ``` + + New behavior: + ``` + beacons: + log: + - file: + - tags: + : + regex: + ``` + +- ``network_info`` beacon + + Old behavior: + ``` + beacons: + network_info: + - eth0: + type: equal + bytes_sent: 100000 + bytes_recv: 100000 + packets_sent: 100000 + packets_recv: 100000 + errin: 100 + errout: 100 + dropin: 100 + dropout: 100 + ``` + + New behavior: + ``` + beacons: + network_info: + - interfaces: + eth0: + type: equal + bytes_sent: 100000 + bytes_recv: 100000 + packets_sent: 100000 + packets_recv: 100000 + errin: 100 + errout: 100 + dropin: 100 + dropout: 100 + ``` + +- ``network_settings`` beacon + + Old behavior: + ``` + beacons: + network_settings: + eth0: + ipaddr: + promiscuity: + onvalue: 1 + eth1: + linkmode: + ``` + + New behavior: + ``` + beacons: + network_settings: + - interfaces: + - eth0: + ipaddr: + promiscuity: + onvalue: 1 + - eth1: + linkmode: + ``` + +- ``proxy_example`` beacon + + Old behavior: + ``` + beacons: + proxy_example: + endpoint: beacon + ``` + + New behavior: + ``` + beacons: + proxy_example: + - endpoint: beacon + ``` + +- ``ps`` beacon + + Old behavior: + ``` + beacons: + ps: + - salt-master: running + - mysql: stopped + ``` + + New behavior: + ``` + beacons: + ps: + - processes: + salt-master: running + mysql: stopped + ``` + +- ``salt_proxy`` beacon + + Old behavior: + ``` + beacons: + salt_proxy: + - p8000: {} + - p8001: {} + ``` + + New behavior: + ``` + beacons: + salt_proxy: + - proxies: + p8000: {} + p8001: {} + ``` + +- ``sensehat`` beacon + + Old behavior: + ``` + beacons: + sensehat: + humidity: 70% + temperature: [20, 40] + temperature_from_pressure: 40 + pressure: 1500 + ``` + + New behavior: + ``` + beacons: + sensehat: + - sensors: + humidity: 70% + temperature: [20, 40] + temperature_from_pressure: 40 + pressure: 1500 + ``` + +- ``service`` beacon + + Old behavior: + ``` + beacons: + service: + salt-master: + mysql: + + ``` + + New behavior: + ``` + beacons: + service: + - services: + nginx: + onchangeonly: True + delay: 30 + uncleanshutdown: /run/nginx.pid + ``` + +- ``sh`` beacon + + Old behavior: + ``` + beacons: + sh: {} + ``` + + New behavior: + ``` + beacons: + sh: [] + ``` + +- ``status`` beacon + + Old behavior: + ``` + beacons: + status: {} + ``` + + New behavior: + ``` + beacons: + status: [] + ``` + +- ``telegram_bot_msg`` beacon + + Old behavior: + ``` + beacons: + telegram_bot_msg: + token: "" + accept_from: + - "" + interval: 10 + ``` + + New behavior: + ``` + beacons: + telegram_bot_msg: + - token: "" + - accept_from: + - "" + - interval: 10 + ``` + +- ``twilio_txt_msg`` beacon + + Old behavior: + ``` + beacons: + twilio_txt_msg: + account_sid: "" + auth_token: "" + twilio_number: "+15555555555" + interval: 10 + ``` + + New behavior: + ``` + beacons: + twilio_txt_msg: + - account_sid: "" + - auth_token: "" + - twilio_number: "+15555555555" + - interval: 10 + ``` + +- ``wtmp`` beacon + + Old behavior: + ``` + beacons: + wtmp: {} + ``` + + New behavior: + ``` + beacons: + wtmp: [] + ``` + Deprecations -============ +------------ Configuration Option Deprecations ---------------------------------- +================================= - The ``requests_lib`` configuration option has been removed. Please use ``backend`` instead. Profitbricks Cloud Updated Dependency -------------------------------------- +===================================== -The minimum version of the `profitbrick` python package for the `profitbricks` +The minimum version of the ``profitbrick`` python package for the ``profitbricks`` cloud driver has changed from 3.0.0 to 3.1.0. Module Deprecations -------------------- +=================== The ``blockdev`` execution module has been removed. Its functions were merged with the ``disk`` module. Please use the ``disk`` execution module instead. @@ -119,7 +653,7 @@ The ``win_service`` module had the following changes: ``service_type`` instead. Runner Deprecations -------------------- +=================== The ``manage`` runner had the following changes: @@ -127,7 +661,7 @@ The ``manage`` runner had the following changes: use ``salt-ssh`` roster entries for the host instead. State Deprecations ------------------- +================== The ``archive`` state had the following changes: @@ -150,15 +684,27 @@ The ``file`` state had the following changes: - The ``show_diff`` option was removed. Please use ``show_changes`` instead. Grain Deprecations ------------------- +================== For ``smartos`` some grains have been deprecated. These grains will be removed in Neon. - The ``hypervisor_uuid`` has been replaced with ``mdata:sdc:server_uuid`` grain. - The ``datacenter`` has been replaced with ``mdata:sdc:datacenter_name`` grain. +Minion Blackout +--------------- + +During a blackout, minions will not execute any remote execution commands, +except for :mod:`saltutil.refresh_pillar `. +Previously, support was added so that blackouts are enabled using a special +pillar key, ``minion_blackout`` set to ``True`` and an optional pillar key +``minion_blackout_whitelist`` to specify additional functions that are permitted +during blackout. This release adds support for using this feature in the grains +as well, by using special grains keys ``minion_blackout`` and +``minion_blackout_whitelist``. + Utils Deprecations ------------------- +================== The ``salt.utils.cloud.py`` file had the following change: @@ -166,7 +712,7 @@ The ``salt.utils.cloud.py`` file had the following change: optional. Other Miscellaneous Deprecations --------------------------------- +================================ The ``version.py`` file had the following changes: diff --git a/doc/topics/releases/releasecandidate.rst b/doc/topics/releases/releasecandidate.rst index e2a628920a..60918d6ac3 100644 --- a/doc/topics/releases/releasecandidate.rst +++ b/doc/topics/releases/releasecandidate.rst @@ -32,6 +32,8 @@ Builds for a few platforms are available as part of the RC at https://repo.salts Available builds: +- Ubuntu16 +- Redhat7 - Windows .. FreeBSD diff --git a/doc/topics/ssh/index.rst b/doc/topics/ssh/index.rst index 8920e9e648..95a2bdecdb 100644 --- a/doc/topics/ssh/index.rst +++ b/doc/topics/ssh/index.rst @@ -64,7 +64,8 @@ Deploy ssh key for salt-ssh =========================== By default, salt-ssh will generate key pairs for ssh, the default path will be -/etc/salt/pki/master/ssh/salt-ssh.rsa +``/etc/salt/pki/master/ssh/salt-ssh.rsa``. The key generation happens when you run +``salt-ssh`` for the first time. You can use ssh-copy-id, (the OpenSSH key deployment tool) to deploy keys to your servers. diff --git a/doc/topics/troubleshooting/yaml_idiosyncrasies.rst b/doc/topics/troubleshooting/yaml_idiosyncrasies.rst index e1c6383459..f5a9fed916 100644 --- a/doc/topics/troubleshooting/yaml_idiosyncrasies.rst +++ b/doc/topics/troubleshooting/yaml_idiosyncrasies.rst @@ -28,6 +28,7 @@ hit `Enter`. Also, you can convert tabs to 2 spaces by these commands in Vim: Indentation =========== + The suggested syntax for YAML files is to use 2 spaces for indentation, but YAML will follow whatever indentation system that the individual file uses. Indentation of two spaces works very well for SLS files given the @@ -112,8 +113,24 @@ PyYAML will load these values as boolean ``True`` or ``False``. Un-capitalized versions will also be loaded as booleans (``true``, ``false``, ``yes``, ``no``, ``on``, and ``off``). This can be especially problematic when constructing Pillar data. Make sure that your Pillars which need to use the string versions -of these values are enclosed in quotes. Pillars will be parsed twice by salt, -so you'll need to wrap your values in multiple quotes, for example '"false"'. +of these values are enclosed in quotes. Pillars will be parsed twice by salt, +so you'll need to wrap your values in multiple quotes, including double quotation +marks (``" "``) and single quotation marks (``' '``). Note that spaces are included +in the quotation type examples for clarity. + +Multiple quoting examples looks like this: + +.. code-block:: yaml + + - '"false"' + - "'True'" + - "'YES'" + - '"No"' + +.. note:: + + When using multiple quotes in this manner, they must be different. Using ``"" ""`` + or ``'' ''`` won't work in this case (spaces are included in examples for clarity). The '%' Sign ============ @@ -248,8 +265,10 @@ Alternatively, they can be defined the "old way", or with multiple - require: - user: fred -YAML support only plain ASCII -============================= +.. _yaml_plain_ascii: + +YAML supports only plain ASCII +============================== According to YAML specification, only ASCII characters can be used. diff --git a/doc/topics/tutorials/gitfs.rst b/doc/topics/tutorials/gitfs.rst index b81d9eca14..10b4c4339e 100644 --- a/doc/topics/tutorials/gitfs.rst +++ b/doc/topics/tutorials/gitfs.rst @@ -166,13 +166,15 @@ Ubuntu 14.04 LTS and Debian Wheezy (7.x) also have a compatible version packaged # apt-get install python-git -If your master is running an older version (such as Ubuntu 12.04 LTS or Debian -Squeeze), then you will need to install GitPython using either pip_ or -easy_install (it is recommended to use pip). Version 0.3.2.RC1 is now marked as -the stable release in PyPI, so it should be a simple matter of running ``pip -install GitPython`` (or ``easy_install GitPython``) as root. +GitPython_ requires the ``git`` CLI utility to work. If installed from a system +package, then git should already be installed, but if installed via pip_ then +it may still be necessary to install git separately. For MacOS users, +GitPython_ comes bundled in with the Salt installer, but git must still be +installed for it to work properly. Git can be installed in several ways, +including by installing XCode_. -.. _`pip`: http://www.pip-installer.org/ +.. _pip: http://www.pip-installer.org/ +.. _XCode: https://developer.apple.com/xcode/ .. warning:: diff --git a/doc/topics/tutorials/http.rst b/doc/topics/tutorials/http.rst index 1eaee62071..e6b20c62a2 100644 --- a/doc/topics/tutorials/http.rst +++ b/doc/topics/tutorials/http.rst @@ -110,7 +110,7 @@ To pass through a file that contains jinja + yaml templating (the default): method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) To pass through a file that contains mako templating: @@ -123,7 +123,7 @@ To pass through a file that contains mako templating: data_file='/srv/salt/somefile.mako', data_render=True, data_renderer='mako', - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) Because this function uses Salt's own rendering system, any Salt renderer can @@ -140,7 +140,7 @@ However, this can be changed to ``master`` if necessary. method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'}, + template_dict={'key1': 'value1', 'key2': 'value2'}, opts=__opts__ ) @@ -149,7 +149,7 @@ However, this can be changed to ``master`` if necessary. method='POST', data_file='/srv/salt/somefile.jinja', data_render=True, - template_data={'key1': 'value1', 'key2': 'value2'}, + template_dict={'key1': 'value1', 'key2': 'value2'}, node='master' ) @@ -170,11 +170,11 @@ a Python dict. header_file='/srv/salt/headers.jinja', header_render=True, header_renderer='jinja', - template_data={'key1': 'value1', 'key2': 'value2'} + template_dict={'key1': 'value1', 'key2': 'value2'} ) Because much of the data that would be templated between headers and data may be -the same, the ``template_data`` is the same for both. Correcting possible +the same, the ``template_dict`` is the same for both. Correcting possible variable name collisions is up to the user. Authentication diff --git a/doc/topics/tutorials/index.rst b/doc/topics/tutorials/index.rst index cba3acd739..df668bc395 100644 --- a/doc/topics/tutorials/index.rst +++ b/doc/topics/tutorials/index.rst @@ -28,9 +28,8 @@ Tutorials Index * :ref:`States tutorial, part 3 - Templating, Includes, Extends ` * :ref:`States tutorial, part 4 ` * :ref:`How to Convert Jinja Logic to an Execution Module ` -* :ref:`Using Salt with Stormpath ` * :ref:`Syslog-ng usage ` * :ref:`The macOS (Maverick) Developer Step By Step Guide To Salt Installation ` * :ref:`SaltStack Walk-through ` * :ref:`Writing Salt Tests ` -* :ref:`Multi-cloud orchestration with Apache Libcloud ` \ No newline at end of file +* :ref:`Multi-cloud orchestration with Apache Libcloud ` diff --git a/doc/topics/tutorials/pillar.rst b/doc/topics/tutorials/pillar.rst index e0c97f26bd..3ec2c1ddea 100644 --- a/doc/topics/tutorials/pillar.rst +++ b/doc/topics/tutorials/pillar.rst @@ -75,7 +75,7 @@ The default location for the pillar is in /srv/pillar. .. note:: - The pillar location can be configured via the `pillar_roots` option inside + The pillar location can be configured via the ``pillar_roots`` option inside the master configuration file. It must not be in a subdirectory of the state tree or file_roots. If the pillar is under file_roots, any pillar targeting can be bypassed by minions. @@ -242,7 +242,7 @@ set in the minion's pillar, then the default of ``httpd`` will be used. .. note:: Under the hood, pillar is just a Python dict, so Python dict methods such - as `get` and `items` can be used. + as ``get`` and ``items`` can be used. Pillar Makes Simple States Grow Easily ====================================== @@ -303,6 +303,18 @@ Where the vimrc source location can now be changed via pillar: Ensuring that the right vimrc is sent out to the correct minions. +The pillar top file must include a reference to the new sls pillar file: + +``/srv/pillar/top.sls``: + +.. code-block:: yaml + + base: + '*': + - pkg + - edit.vim + + Setting Pillar Data on the Command Line ======================================= diff --git a/doc/topics/tutorials/stormpath.rst b/doc/topics/tutorials/stormpath.rst deleted file mode 100644 index 34003d5f99..0000000000 --- a/doc/topics/tutorials/stormpath.rst +++ /dev/null @@ -1,198 +0,0 @@ -.. _tutorial-stormpath: - -========================= -Using Salt with Stormpath -========================= - -`Stormpath `_ is a user management and authentication -service. This tutorial covers using SaltStack to manage and take advantage of -Stormpath's features. - -External Authentication ------------------------ -Stormpath can be used for Salt's external authentication system. In order to do -this, the master should be configured with an ``apiid``, ``apikey``, and the ID -of the ``application`` that is associated with the users to be authenticated: - -.. code-block:: yaml - - stormpath: - apiid: 367DFSF4FRJ8767FSF4G34FGH - apikey: FEFREF43t3FEFRe/f323fwer4FWF3445gferWRWEer1 - application: 786786FREFrefreg435fr1 - -.. note:: - These values can be found in the `Stormpath dashboard - `_`. - -Users that are to be authenticated should be set up under the ``stormpath`` -dict under ``external_auth``: - -.. code-block:: yaml - - external_auth: - stormpath: - larry: - - .* - - '@runner' - - '@wheel' - -Keep in mind that while Stormpath defaults the username associated with the -account to the email address, it is better to use a username without an ``@`` -sign in it. - - -Configuring Stormpath Modules ------------------------------ -Stormpath accounts can be managed via either an execution or state module. In -order to use either, a minion must be configured with an API ID and key. - -.. code-block:: yaml - - stormpath: - apiid: 367DFSF4FRJ8767FSF4G34FGH - apikey: FEFREF43t3FEFRe/f323fwer4FWF3445gferWRWEer1 - directory: efreg435fr1786786FREFr - application: 786786FREFrefreg435fr1 - -Some functions in the ``stormpath`` modules can make use of other options. The -following options are also available. - -directory -````````` -The ID of the directory that is to be used with this minion. Many functions -require an ID to be specified to do their work. However, if the ID of a -``directory`` is specified, then Salt can often look up the resource in -question. - -application -``````````` -The ID of the application that is to be used with this minion. Many functions -require an ID to be specified to do their work. However, if the ID of a -``application`` is specified, then Salt can often look up the resource in -question. - - -Managing Stormpath Accounts ---------------------------- -With the ``stormpath`` configuration in place, Salt can be used to configure -accounts (which may be thought of as users) on the Stormpath service. The -following functions are available. - -stormpath.create_account -```````````````````````` -Create an account on the Stormpath service. This requires a ``directory_id`` as -the first argument; it will not be retrieved from the minion configuration. An -``email`` address, ``password``, first name (``givenName``) and last name -(``surname``) are also required. For the full list of other parameters that may -be specified, see: - -http://docs.stormpath.com/rest/product-guide/#account-resource - -When executed with no errors, this function will return the information about -the account, from Stormpath. - -.. code-block:: bash - - salt myminion stormpath.create_account shemp@example.com letmein Shemp Howard - - -stormpath.list_accounts -``````````````````````` -Show all accounts on the Stormpath service. This will return all accounts, -regardless of directory, application, or group. - -.. code-block:: bash - - salt myminion stormpath.list_accounts - ''' - -stormpath.show_account -`````````````````````` -Show the details for a specific Stormpath account. An ``account_id`` is normally -required. However, if am ``email`` is provided instead, along with either a -``directory_id``, ``application_id``, or ``group_id``, then Salt will search the -specified resource to try and locate the ``account_id``. - -.. code-block:: bash - - salt myminion stormpath.show_account - salt myminion stormpath.show_account email= directory_id= - - -stormpath.update_account -```````````````````````` -Update one or more items for this account. Specifying an empty value will clear -it for that account. This function may be used in one of two ways. In order to -update only one key/value pair, specify them in order: - -.. code-block:: bash - - salt myminion stormpath.update_account givenName shemp - salt myminion stormpath.update_account middleName '' - -In order to specify multiple items, they need to be passed in as a dict. From -the command line, it is best to do this as a JSON string: - -.. code-block:: bash - - salt myminion stormpath.update_account items='{"givenName": "Shemp"} - salt myminion stormpath.update_account items='{"middlename": ""} - -When executed with no errors, this function will return the information about -the account, from Stormpath. - - -stormpath.delete_account -```````````````````````` -Delete an account from Stormpath. - -.. code-block:: bash - - salt myminion stormpath.delete_account - - -stormpath.list_directories -`````````````````````````` -Show all directories associated with this tenant. - -.. code-block:: bash - - salt myminion stormpath.list_directories - - -Using Stormpath States ----------------------- -Stormpath resources may be managed using the state system. The following states -are available. - -stormpath_account.present -````````````````````````` -Ensure that an account exists on the Stormpath service. All options that are -available with the ``stormpath.create_account`` function are available here. -If an account needs to be created, then this function will require the same -fields that ``stormpath.create_account`` requires, including the ``password``. -However, if a password changes for an existing account, it will NOT be updated -by this state. - -.. code-block:: yaml - - curly@example.com: - stormpath_account.present: - - directory_id: efreg435fr1786786FREFr - - password: badpass - - firstName: Curly - - surname: Howard - - nickname: curly - -It is advisable to always set a ``nickname`` that is not also an email address, -so that it can be used by Salt's external authentication module. - -stormpath_account.absent -```````````````````````` -Ensure that an account does not exist on Stormpath. As with -``stormpath_account.present``, the ``name`` supplied to this state is the -``email`` address associated with this account. Salt will use this, with or -without the ``directory`` ID that is configured for the minion. However, lookups -will be much faster with a directory ID specified. - diff --git a/doc/topics/utils/index.rst b/doc/topics/utils/index.rst index 48728174f5..44380f3541 100644 --- a/doc/topics/utils/index.rst +++ b/doc/topics/utils/index.rst @@ -54,7 +54,7 @@ types like so: salt '*' mymodule.observe_the_awesomeness ''' - print __utils__['foo.bar']() + return __utils__['foo.bar']() Utility modules, like any other kind of Salt extension, support using a :ref:`__virtual__ function ` to conditionally load them, @@ -81,11 +81,56 @@ the ``foo`` utility module with a ``__virtual__`` function. def bar(): return 'baz' +Also you could even write your utility modules in object oriented fashion: + +.. code-block:: python + + # -*- coding: utf-8 -*- + ''' + My utils module + --------------- + + This module contains common functions for use in my other custom types. + ''' + + class Foo(object): + + def __init__(self): + pass + + def bar(self): + return 'baz' + +And import them into other custom modules: + +.. code-block:: python + + # -*- coding: utf-8 -*- + ''' + My awesome execution module + --------------------------- + ''' + + import mymodule + + def observe_the_awesomeness(): + ''' + Prints information from my utility module + + CLI Example: + + .. code-block:: bash + + salt '*' mymodule.observe_the_awesomeness + ''' + foo = mymodule.Foo() + return foo.bar() + These are, of course, contrived examples, but they should serve to show some of the possibilities opened up by writing utility modules. Keep in mind though -that States still have access to all of the execution modules, so it is not +that states still have access to all of the execution modules, so it is not necessary to write a utility module to make a function available to both a -state and an execution module. One good use case for utililty modules is one +state and an execution module. One good use case for utility modules is one where it is necessary to invoke the same function from a custom :ref:`outputter `/returner, as well as an execution module. diff --git a/pkg/osx/build.sh b/pkg/osx/build.sh index e304248328..7850d48cd8 100755 --- a/pkg/osx/build.sh +++ b/pkg/osx/build.sh @@ -19,14 +19,16 @@ # $1 : : the version of salt to build # (a git tag, not a branch) # (defaults to git-repo state) -# $2 : : the staging area for the package +# $2 : : The version of Python to use in the +# build. Default is 2 +# $3 : : the staging area for the package # defaults to /tmp/salt_pkg # # Example: -# The following will build Salt v2015.8.3 and stage all files -# in /tmp/custom_pkg: +# The following will build Salt v2015.8.3 with Python 2 and +# stage all files in /tmp/custom_pkg: # -# ./build.sh v2015.8.3 /tmp/custom_pkg +# ./build.sh v2015.8.3 2 /tmp/custom_pkg # ############################################################################ echo -n -e "\033]0;Build: Variables\007" @@ -41,9 +43,15 @@ else fi if [ "$2" == "" ]; then + PYVER=2 +else + PYVER=$2 +fi + +if [ "$3" == "" ]; then PKGDIR=/tmp/salt_pkg else - PKGDIR=$2 + PKGDIR=$3 fi ############################################################################ @@ -51,6 +59,12 @@ fi ############################################################################ SRCDIR=`git rev-parse --show-toplevel` PKGRESOURCES=$SRCDIR/pkg/osx +if [ "$PYVER" == "2" ]; then + PYTHON=/opt/salt/bin/python +else + PYTHON=/opt/salt/bin/python3 +fi +CPUARCH=`uname -m` ############################################################################ # Make sure this is the Salt Repository @@ -66,16 +80,23 @@ fi # Create the Build Environment ############################################################################ echo -n -e "\033]0;Build: Build Environment\007" -sudo $PKGRESOURCES/build_env.sh +sudo $PKGRESOURCES/build_env.sh $PYVER ############################################################################ # Install Salt ############################################################################ echo -n -e "\033]0;Build: Install Salt\007" -sudo /opt/salt/bin/python $SRCDIR/setup.py install +sudo rm -rf $SRCDIR/build +sudo rm -rf $SRCDIR/dist +sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install ############################################################################ # Build Package ############################################################################ echo -n -e "\033]0;Build: Package Salt\007" -sudo $PKGRESOURCES/build_pkg.sh $VERSION $PKGDIR +sudo $PKGRESOURCES/build_pkg.sh $VERSION $PYVER $PKGDIR + +############################################################################ +# Sign Package +############################################################################ +sudo $PKGRESOURCES/build_sig.sh salt-$VERSION-py$PYVER-$CPUARCH.pkg salt-$VERSION-py$PYVER-$CPUARCH-signed.pkg diff --git a/pkg/osx/build_env.sh b/pkg/osx/build_env.sh index f886bd59ad..5d7f1bac5a 100755 --- a/pkg/osx/build_env.sh +++ b/pkg/osx/build_env.sh @@ -6,18 +6,21 @@ # Authors: CR Oldham, Shane Lee # Date: December 2015 # -# Description: This script sets up a build environment for salt on macOS. +# Description: This script sets up a build environment for Salt on macOS. # # Requirements: # - XCode Command Line Tools (xcode-select --install) # # Usage: -# This script is not passed any parameters +# This script can be passed 1 parameter +# $1 : : the version of Python to use for the +# build environment. Default is 2 # # Example: -# The following will set up a build environment for salt on macOS +# The following will set up a Python 3 build environment for Salt +# on macOS # -# ./dev_env.sh +# ./dev_env.sh 3 # ############################################################################ @@ -31,6 +34,15 @@ quit_on_error() { exit -1 } +############################################################################ +# Check passed parameters, set defaults +############################################################################ +if [ "$1" == "" ]; then + PYVER=2 +else + PYVER=$1 +fi + ############################################################################ # Parameters Required for the script to function properly ############################################################################ @@ -45,6 +57,15 @@ SHADIR=$SCRIPTDIR/shasums PKG_CONFIG_PATH=/opt/salt/lib/pkgconfig CFLAGS="-I/opt/salt/include" LDFLAGS="-L/opt/salt/lib" +if [ "$PYVER" == "2" ]; then + PYDIR=/opt/salt/lib/python2.7 + PYTHON=/opt/salt/bin/python + PIP=/opt/salt/bin/pip +else + PYDIR=/opt/salt/lib/python3.5 + PYTHON=/opt/salt/bin/python3 + PIP=/opt/salt/bin/pip3 +fi ############################################################################ # Determine Which XCode is being used (XCode or XCode Command Line Tools) @@ -121,8 +142,8 @@ BUILDDIR=$SCRIPTDIR/build ############################################################################ echo -n -e "\033]0;Build_Env: pkg-config\007" -PKGURL="http://pkgconfig.freedesktop.org/releases/pkg-config-0.29.tar.gz" -PKGDIR="pkg-config-0.29" +PKGURL="http://pkgconfig.freedesktop.org/releases/pkg-config-0.29.2.tar.gz" +PKGDIR="pkg-config-0.29.2" download $PKGURL @@ -140,8 +161,8 @@ sudo -H $MAKE install ############################################################################ echo -n -e "\033]0;Build_Env: libsodium\007" -PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.12.tar.gz" -PKGDIR="libsodium-1.0.12" +PKGURL="https://download.libsodium.org/libsodium/releases/libsodium-1.0.13.tar.gz" +PKGDIR="libsodium-1.0.13" download $PKGURL @@ -159,8 +180,8 @@ sudo -H $MAKE install ############################################################################ echo -n -e "\033]0;Build_Env: zeromq\007" -PKGURL="http://download.zeromq.org/zeromq-4.1.3.tar.gz" -PKGDIR="zeromq-4.1.3" +PKGURL="http://download.zeromq.org/zeromq-4.1.4.tar.gz" +PKGDIR="zeromq-4.1.4" download $PKGURL @@ -178,13 +199,13 @@ sudo -H $MAKE install ############################################################################ echo -n -e "\033]0;Build_Env: OpenSSL\007" -PKGURL="http://openssl.org/source/openssl-1.0.2f.tar.gz" -PKGDIR="openssl-1.0.2f" +PKGURL="http://openssl.org/source/openssl-1.0.2l.tar.gz" +PKGDIR="openssl-1.0.2l" download $PKGURL echo "################################################################################" -echo "Building OpenSSL 1.0.2f" +echo "Building OpenSSL" echo "################################################################################" cd $PKGDIR ./Configure darwin64-x86_64-cc --prefix=/opt/salt --openssldir=/opt/salt/openssl @@ -197,13 +218,18 @@ sudo -H $MAKE install ############################################################################ echo -n -e "\033]0;Build_Env: Python\007" -PKGURL="https://www.python.org/ftp/python/2.7.12/Python-2.7.12.tar.xz" -PKGDIR="Python-2.7.12" +if [ "$PYVER" == "2" ]; then + PKGURL="https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tar.xz" + PKGDIR="Python-2.7.13" +else + PKGURL="https://www.python.org/ftp/python/3.5.3/Python-3.5.3.tar.xz" + PKGDIR="Python-3.5.3" +fi download $PKGURL echo "################################################################################" -echo "Building Python 2.7.12" +echo "Building Python" echo "################################################################################" echo "Note there are some test failures" cd $PKGDIR @@ -215,7 +241,7 @@ sudo -H $MAKE install ############################################################################ # upgrade pip ############################################################################ -sudo -H /opt/salt/bin/pip install --upgrade pip +sudo -H $PIP install --upgrade pip ############################################################################ # Download and install salt python dependencies @@ -227,23 +253,23 @@ cd $BUILDDIR echo "################################################################################" echo "Installing Salt Dependencies with pip (normal)" echo "################################################################################" -sudo -H /opt/salt/bin/pip install \ - -r $SRCDIR/pkg/osx/req.txt \ - --no-cache-dir +sudo -H $PIP install \ + -r $SRCDIR/pkg/osx/req.txt \ + --no-cache-dir echo "################################################################################" echo "Installing Salt Dependencies with pip (build_ext)" echo "################################################################################" -sudo -H /opt/salt/bin/pip install \ - -r $SRCDIR/pkg/osx/req_ext.txt \ - --global-option=build_ext \ - --global-option="-I/opt/salt/include" \ - --no-cache-dir +sudo -H $PIP install \ + -r $SRCDIR/pkg/osx/req_ext.txt \ + --global-option=build_ext \ + --global-option="-I/opt/salt/include" \ + --no-cache-dir echo "--------------------------------------------------------------------------------" echo "Create Symlink to certifi for openssl" echo "--------------------------------------------------------------------------------" -sudo ln -s /opt/salt/lib/python2.7/site-packages/certifi/cacert.pem /opt/salt/openssl/cert.pem +sudo ln -s $PYDIR/site-packages/certifi/cacert.pem /opt/salt/openssl/cert.pem echo -n -e "\033]0;Build_Env: Finished\007" diff --git a/pkg/osx/build_pkg.sh b/pkg/osx/build_pkg.sh index 86ad9aa450..80f5c4f734 100755 --- a/pkg/osx/build_pkg.sh +++ b/pkg/osx/build_pkg.sh @@ -15,13 +15,16 @@ # This script can be passed 2 parameters # $1 : : the version name to give the package (overrides # version of the git repo) (Defaults to the git repo version) -# $2 : : the staging area for the package defaults to +# $2 : : the version of python that was built (defaults +# to 2) +# $3 : : the staging area for the package defaults to # /tmp/salt_pkg # # Example: -# The following will build Salt and stage all files in /tmp/salt_pkg: +# The following will build Salt version 2017.7.0 with Python 3 and +# stage all files in /tmp/salt_pkg: # -# ./build.sh +# ./build.sh 2017.7.0 3 # ############################################################################ @@ -45,11 +48,18 @@ else VERSION=$1 fi -# Get/Set temp directory +# Get/Set Python Version if [ "$2" == "" ]; then + PYVER=2 +else + PYVER=$2 +fi + +# Get/Set temp directory +if [ "$3" == "" ]; then PKGDIR=/tmp/salt_pkg else - PKGDIR=$2 + PKGDIR=$3 fi CPUARCH=`uname -m` @@ -114,7 +124,11 @@ sudo rm -rdf $PKGDIR/opt/salt/lib/engines sudo rm -rdf $PKGDIR/opt/salt/share/aclocal sudo rm -rdf $PKGDIR/opt/salt/share/doc sudo rm -rdf $PKGDIR/opt/salt/share/man/man1/pkg-config.1 -sudo rm -rdf $PKGDIR/opt/salt/lib/python2.7/test +if [ "$PYVER" == "2" ]; then + sudo rm -rdf $PKGDIR/opt/salt/lib/python2.7/test +else + sudo rm -rdf $PKGDIR/opt/salt/lib/python3.5/test +fi echo -n -e "\033]0;Build_Pkg: Remove compiled python files\007" sudo find $PKGDIR/opt/salt -name '*.pyc' -type f -delete @@ -133,15 +147,30 @@ cp $SRCDIR/conf/master $PKGDIR/etc/salt/master.dist ############################################################################ echo -n -e "\033]0;Build_Pkg: Add Version to .xml\007" +if [ "$PYVER" == "2" ]; then + TITLE="Salt $VERSION" + DESC="Salt $VERSION with Python 2" +else + TITLE="Salt $VERSION (Python 3)" + DESC="Salt $VERSION with Python 3" +fi + cd $PKGRESOURCES cp distribution.xml.dist distribution.xml -SEDSTR="s/@VERSION@/$VERSION/" -echo $SEDSTR -sed -i '' $SEDSTR distribution.xml +SEDSTR="s/@TITLE@/$TITLE/g" +sed -E -i '' "$SEDSTR" distribution.xml -SEDSTR="s/@CPUARCH@/$CPUARCH/" -echo $SEDSTR -sed -i '' $SEDSTR distribution.xml +SEDSTR="s/@DESC@/$DESC/g" +sed -E -i '' "$SEDSTR" distribution.xml + +SEDSTR="s/@VERSION@/$VERSION/g" +sed -E -i '' "$SEDSTR" distribution.xml + +SEDSTR="s/@PYVER@/$PYVER/g" +sed -E -i '' "$SEDSTR" distribution.xml + +SEDSTR="s/@CPUARCH@/$CPUARCH/g" +sed -i '' "$SEDSTR" distribution.xml ############################################################################ # Build the Package @@ -152,10 +181,10 @@ pkgbuild --root=$PKGDIR \ --scripts=pkg-scripts \ --identifier=com.saltstack.salt \ --version=$VERSION \ - --ownership=recommended salt-src-$VERSION-$CPUARCH.pkg + --ownership=recommended salt-src-$VERSION-py$PYVER-$CPUARCH.pkg productbuild --resources=pkg-resources \ --distribution=distribution.xml \ - --package-path=salt-src-$VERSION-$CPUARCH.pkg \ - --version=$VERSION salt-$VERSION-$CPUARCH.pkg + --package-path=salt-src-$VERSION-py$PYVER-$CPUARCH.pkg \ + --version=$VERSION salt-$VERSION-py$PYVER-$CPUARCH.pkg diff --git a/pkg/osx/distribution.xml.dist b/pkg/osx/distribution.xml.dist index 083fef44f6..d31063f5f4 100644 --- a/pkg/osx/distribution.xml.dist +++ b/pkg/osx/distribution.xml.dist @@ -1,6 +1,6 @@ - Salt @VERSION@ + @TITLE@ com.saltstack.salt @@ -25,7 +25,7 @@ salt-src-@VERSION@-@CPUARCH@.pkg + auth="root">salt-src-@VERSION@-py@PYVER@-@CPUARCH@.pkg @@ -34,8 +34,8 @@ diff --git a/pkg/osx/req.txt b/pkg/osx/req.txt index 3bee76f5b1..338454f456 100644 --- a/pkg/osx/req.txt +++ b/pkg/osx/req.txt @@ -1,34 +1,31 @@ -apache-libcloud==0.20.1 +apache-libcloud==2.1.0 backports.ssl_match_hostname==3.5.0.1 -backports_abc==0.4 +backports_abc==0.5 certifi -cffi==1.5.0 -CherryPy==4.0.0 -click==6.2 -enum34==1.1.2 +cffi==1.10.0 +CherryPy==11.0.0 +click==6.7 +enum34==1.1.6 gitdb==0.6.4 -GitPython==1.0.1 -idna==2.0 -ioflo==1.5.0 -ipaddress==1.0.16 -Jinja2==2.9.4 -libnacl==1.4.4 +GitPython==2.1.1 +idna==2.5 +ipaddress==1.0.18 +Jinja2==2.9.6 linode-python==1.1.1 -Mako==1.0.3 -MarkupSafe==0.23 -msgpack-python==0.4.7 -pyasn1==0.1.9 -pycparser==2.14 +Mako==1.0.7 +MarkupSafe==1.0 +msgpack-python==0.4.8 +pyasn1==0.2.3 +pycparser==2.18 pycrypto==2.6.1 -python-dateutil==2.4.2 -python-gnupg==0.3.8 -PyYAML==3.11 -pyzmq==15.2.0 -raet==0.6.5 -requests==2.9.1 +python-dateutil==2.6.1 +python-gnupg==0.4.1 +PyYAML==3.12 +pyzmq==16.0.2 +requests==2.18.1 singledispatch==3.4.0.3 six==1.10.0 smmap==0.9.0 timelib==0.2.4 -tornado==4.3 -vultr==0.1.2 +tornado==4.5.1 +vultr==1.0rc1 diff --git a/pkg/osx/req_ext.txt b/pkg/osx/req_ext.txt index b31fff2b1d..fac429a942 100644 --- a/pkg/osx/req_ext.txt +++ b/pkg/osx/req_ext.txt @@ -1,2 +1,2 @@ -cryptography==1.2.2 -pyOpenSSL==0.15.1 +cryptography==2.0 +pyOpenSSL==17.1.0 diff --git a/pkg/osx/shasums/Python-2.7.12.tar.xz.sha512 b/pkg/osx/shasums/Python-2.7.12.tar.xz.sha512 deleted file mode 100644 index 513cf3de5a..0000000000 --- a/pkg/osx/shasums/Python-2.7.12.tar.xz.sha512 +++ /dev/null @@ -1 +0,0 @@ -6ddbbce47cc49597433d98ca05c2f62f07ed1070807b645602a8e9e9b996adc6fa66fa20a33cd7d23d4e7e925e25071d7301d288149fbe4e8c5f06d5438dda1f ./Python-2.7.12.tar.xz diff --git a/pkg/osx/shasums/Python-2.7.13.tar.xz.sha512 b/pkg/osx/shasums/Python-2.7.13.tar.xz.sha512 new file mode 100644 index 0000000000..a57f8db9b9 --- /dev/null +++ b/pkg/osx/shasums/Python-2.7.13.tar.xz.sha512 @@ -0,0 +1 @@ +f37c9a28ce129d01e63c84d7db627a06402854578f62d17927334ea21ede318e04bbf66e890e3f47c85333e6b19f6e5581fb3f3e27efd24be27017d1b6529c4b ./Python-2.7.13.tar.xz diff --git a/pkg/osx/shasums/Python-3.5.3.tar.xz.sha512 b/pkg/osx/shasums/Python-3.5.3.tar.xz.sha512 new file mode 100644 index 0000000000..d4ab61d5f7 --- /dev/null +++ b/pkg/osx/shasums/Python-3.5.3.tar.xz.sha512 @@ -0,0 +1 @@ +bbcc20e315c63dbc8901d7e7bfa29d4dbdad9335720757d8d679730319fd1d9fcfdb55cf62d620c9b052134170f162c28d653a8af60923185b8932524d827864 ./Python-3.5.3.tar.xz diff --git a/pkg/osx/shasums/libsodium-1.0.12.tar.gz.sha512 b/pkg/osx/shasums/libsodium-1.0.12.tar.gz.sha512 deleted file mode 100644 index 948a5da6b6..0000000000 --- a/pkg/osx/shasums/libsodium-1.0.12.tar.gz.sha512 +++ /dev/null @@ -1 +0,0 @@ -1e63960da42bcc90945463ae1f5b1355849881dce5bba6d293391f8d6f0932063a5bfd433a071cb184af90ebeab469acc34710587116922144d61f3d7661901b ./libsodium-1.0.12.tar.gz diff --git a/pkg/osx/shasums/libsodium-1.0.13.tar.gz.sha512 b/pkg/osx/shasums/libsodium-1.0.13.tar.gz.sha512 new file mode 100644 index 0000000000..1b5270adbf --- /dev/null +++ b/pkg/osx/shasums/libsodium-1.0.13.tar.gz.sha512 @@ -0,0 +1 @@ +c619b12fdf0b2e59174b6e383a62d5499ebcd720fdbb2c1a41a98a46c285df075202423454b294fefee185432441e943805397d7656f7cd7837de425da623929 ./libsodium-1.0.13.tar.gz diff --git a/pkg/osx/shasums/openssl-1.0.2f.tar.gz.sha512 b/pkg/osx/shasums/openssl-1.0.2f.tar.gz.sha512 deleted file mode 100644 index b107e52b8a..0000000000 --- a/pkg/osx/shasums/openssl-1.0.2f.tar.gz.sha512 +++ /dev/null @@ -1 +0,0 @@ -50abf6dc94cafd06e7fd20770808bdc675c88daa369e4f752bd584ab17f72a57357c1ca1eca3c83e6745b5a3c9c73c99dce70adaa904d73f6df4c75bc7138351 ./openssl-1.0.2f.tar.gz diff --git a/pkg/osx/shasums/openssl-1.0.2l.tar.gz.sha512 b/pkg/osx/shasums/openssl-1.0.2l.tar.gz.sha512 new file mode 100644 index 0000000000..9dac0d205f --- /dev/null +++ b/pkg/osx/shasums/openssl-1.0.2l.tar.gz.sha512 @@ -0,0 +1 @@ +047d964508ad6025c79caabd8965efd2416dc026a56183d0ef4de7a0a6769ce8e0b4608a3f8393d326f6d03b26a2b067e6e0c750f35b20be190e595e8290c0e3 ./openssl-1.0.2l.tar.gz diff --git a/pkg/osx/shasums/pkg-config-0.29.2.tar.gz.sha512 b/pkg/osx/shasums/pkg-config-0.29.2.tar.gz.sha512 new file mode 100644 index 0000000000..beb4354b5a --- /dev/null +++ b/pkg/osx/shasums/pkg-config-0.29.2.tar.gz.sha512 @@ -0,0 +1 @@ +4861ec6428fead416f5cbbbb0bbad10b9152967e481d4b0ff2eb396a9f297f552984c9bb72f6864a37dcd8fca1d9ccceda3ef18d8f121938dbe4fdf2b870fe75 ./pkg-config-0.29.2.tar.gz diff --git a/pkg/osx/shasums/pkg-config-0.29.tar.gz.sha512 b/pkg/osx/shasums/pkg-config-0.29.tar.gz.sha512 deleted file mode 100644 index 8e19abaabb..0000000000 --- a/pkg/osx/shasums/pkg-config-0.29.tar.gz.sha512 +++ /dev/null @@ -1 +0,0 @@ -c2857cd67801c0db5d204912453ff6bdc7da3ea61f8b1c6b38983d48dffb958725e7723f909abbc057c7b34a85c27290eec6943808312a75909306076064aa63 ./pkg-config-0.29.tar.gz diff --git a/pkg/osx/shasums/zeromq-4.1.3.tar.gz.sha512 b/pkg/osx/shasums/zeromq-4.1.3.tar.gz.sha512 deleted file mode 100644 index ea85a1581d..0000000000 --- a/pkg/osx/shasums/zeromq-4.1.3.tar.gz.sha512 +++ /dev/null @@ -1 +0,0 @@ -2c993d18ea44e1cba890e024176af65b85b842ca4f8a22d319be4ace8388ab8828dd706b065f02754025bf271b1d7aa878c3f6655878248f7826452cb2a6134c ./zeromq-4.1.3.tar.gz diff --git a/pkg/osx/shasums/zeromq-4.1.4.tar.gz.sha512 b/pkg/osx/shasums/zeromq-4.1.4.tar.gz.sha512 new file mode 100644 index 0000000000..4f8932f829 --- /dev/null +++ b/pkg/osx/shasums/zeromq-4.1.4.tar.gz.sha512 @@ -0,0 +1 @@ +8a8cf4f52ad78dddfff104bfba0f80bbc12566920906a0fafb9fc340aa92f5577c2923cb2e5346c69835cd2ea1609647a8893c2883cd22c1f0340a720511460c ./zeromq-4.1.4.tar.gz diff --git a/pkg/windows/build.bat b/pkg/windows/build.bat index 9dde8b2e72..0117718539 100644 --- a/pkg/windows/build.bat +++ b/pkg/windows/build.bat @@ -110,6 +110,13 @@ if not %errorLevel%==0 ( ) @echo. +:: Remove build and dist directories +@echo %0 :: Remove build and dist directories... +@echo --------------------------------------------------------------------- +rd /s /q "%SrcDir%\build" +rd /s /q "%SrcDir%\dist" +@echo. + :: Install Current Version of salt @echo %0 :: Install Current Version of salt... @echo --------------------------------------------------------------------- diff --git a/pkg/windows/build_pkg.bat b/pkg/windows/build_pkg.bat index b6b52f3bda..0d30f047ac 100644 --- a/pkg/windows/build_pkg.bat +++ b/pkg/windows/build_pkg.bat @@ -108,9 +108,9 @@ xcopy /E /Q "%PyDir%" "%BinDir%\" @echo Copying configs to buildenv\conf... @echo ---------------------------------------------------------------------- @echo xcopy /E /Q "%SrcDir%\conf\master" "%CnfDir%\" -xcopy /Q "%SrcDir%\conf\master" "%CnfDir%\" +xcopy /Q /Y "%SrcDir%\conf\master" "%CnfDir%\" @echo xcopy /E /Q "%SrcDir%\conf\minion" "%CnfDir%\" -xcopy /Q "%SrcDir%\conf\minion" "%CnfDir%\" +xcopy /Q /Y "%SrcDir%\conf\minion" "%CnfDir%\" @echo. @echo Copying VCRedist to Prerequisites @@ -582,6 +582,10 @@ If Exist "%BinDir%\Scripts\salt-run*"^ If Exist "%BldDir%\salt-run.bat"^ del /Q "%BldDir%\salt-run.bat" 1>nul +:: Remove the master config file +if Exist "%CnfDir%\master"^ + del /Q "%CnfDir%\master" 1>nul + :: Make the Salt Minion Installer makensis.exe /DSaltVersion=%Version% /DPythonVersion=%Python% "%InsDir%\Salt-Minion-Setup.nsi" @echo. diff --git a/pkg/windows/buildenv/salt-call.bat b/pkg/windows/buildenv/salt-call.bat index 095f51e4c1..55f7cfac3b 100644 --- a/pkg/windows/buildenv/salt-call.bat +++ b/pkg/windows/buildenv/salt-call.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-call :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-cp.bat b/pkg/windows/buildenv/salt-cp.bat index 29274320b1..61fd1ce444 100644 --- a/pkg/windows/buildenv/salt-cp.bat +++ b/pkg/windows/buildenv/salt-cp.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-cp :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-key.bat b/pkg/windows/buildenv/salt-key.bat index 471e3626b2..4928b696d5 100644 --- a/pkg/windows/buildenv/salt-key.bat +++ b/pkg/windows/buildenv/salt-key.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-key :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-master.bat b/pkg/windows/buildenv/salt-master.bat index 9a124ffd46..134875c072 100644 --- a/pkg/windows/buildenv/salt-master.bat +++ b/pkg/windows/buildenv/salt-master.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-master :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-minion-debug.bat b/pkg/windows/buildenv/salt-minion-debug.bat index ad0ebafee0..2e6b5c5bcf 100644 --- a/pkg/windows/buildenv/salt-minion-debug.bat +++ b/pkg/windows/buildenv/salt-minion-debug.bat @@ -12,5 +12,4 @@ Set Script=%SaltDir%\bin\Scripts\salt-minion net stop salt-minion :: Launch Script -"%Python%" "%Script%" -l debug - +"%Python%" -E -s "%Script%" -l debug diff --git a/pkg/windows/buildenv/salt-minion.bat b/pkg/windows/buildenv/salt-minion.bat index 0a1aafa0c0..0eb041219c 100644 --- a/pkg/windows/buildenv/salt-minion.bat +++ b/pkg/windows/buildenv/salt-minion.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-minion :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt-run.bat b/pkg/windows/buildenv/salt-run.bat index 5d62775d5e..a8766fc9b0 100644 --- a/pkg/windows/buildenv/salt-run.bat +++ b/pkg/windows/buildenv/salt-run.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt-run :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/buildenv/salt.bat b/pkg/windows/buildenv/salt.bat index 125005b23e..5b07c73281 100644 --- a/pkg/windows/buildenv/salt.bat +++ b/pkg/windows/buildenv/salt.bat @@ -9,5 +9,4 @@ Set Python=%SaltDir%\bin\python.exe Set Script=%SaltDir%\bin\Scripts\salt :: Launch Script -"%Python%" "%Script%" %* - +"%Python%" -E -s "%Script%" %* diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index 39f55852c0..46fb821fb8 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -379,13 +379,12 @@ Section -Post WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\" ; Register the Salt-Minion Service - nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" - nsExec::Exec "nssm.exe set salt-minion AppEnvironmentExtra PYTHONHOME=" + nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com" nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START" nsExec::Exec "nssm.exe set salt-minion AppNoConsole 1" - - RMDir /R "$INSTDIR\var\cache\salt" ; removing cache from old version + nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000" + nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000" Call updateMinionConfig diff --git a/requirements/opt.txt b/requirements/opt.txt index e4c9181db7..8b73815859 100644 --- a/requirements/opt.txt +++ b/requirements/opt.txt @@ -5,3 +5,4 @@ yappi>=0.8.2 --allow-unverified python-neutronclient>2.3.6 python-gnupg cherrypy>=3.2.2 +libnacl diff --git a/salt/__init__.py b/salt/__init__.py index 84e5d84b51..21302c9605 100644 --- a/salt/__init__.py +++ b/salt/__init__.py @@ -7,6 +7,7 @@ Salt package from __future__ import absolute_import import warnings +# future lint: disable=non-unicode-string # All salt related deprecation warnings should be shown once each! warnings.filterwarnings( 'once', # Show once @@ -14,18 +15,19 @@ warnings.filterwarnings( DeprecationWarning, # This filter is for DeprecationWarnings r'^(salt|salt\.(.*))$' # Match module(s) 'salt' and 'salt.' ) +# future lint: enable=non-unicode-string # While we are supporting Python2.6, hide nested with-statements warnings warnings.filterwarnings( - 'ignore', - 'With-statements now directly support multiple context managers', + u'ignore', + u'With-statements now directly support multiple context managers', DeprecationWarning ) # Filter the backports package UserWarning about being re-imported warnings.filterwarnings( - 'ignore', - '^Module backports was already imported from (.*), but (.*) is being added to sys.path$', + u'ignore', + u'^Module backports was already imported from (.*), but (.*) is being added to sys.path$', UserWarning ) @@ -37,7 +39,7 @@ def __define_global_system_encoding_variable__(): # and reset to None encoding = None - if not sys.platform.startswith('win') and sys.stdin is not None: + if not sys.platform.startswith(u'win') and sys.stdin is not None: # On linux we can rely on sys.stdin for the encoding since it # most commonly matches the filesystem encoding. This however # does not apply to windows @@ -63,16 +65,16 @@ def __define_global_system_encoding_variable__(): # the way back to ascii encoding = sys.getdefaultencoding() if not encoding: - if sys.platform.startswith('darwin'): + if sys.platform.startswith(u'darwin'): # Mac OS X uses UTF-8 - encoding = 'utf-8' - elif sys.platform.startswith('win'): + encoding = u'utf-8' + elif sys.platform.startswith(u'win'): # Windows uses a configurable encoding; on Windows, Python uses the name “mbcs” # to refer to whatever the currently configured encoding is. - encoding = 'mbcs' + encoding = u'mbcs' else: # On linux default to ascii as a last resort - encoding = 'ascii' + encoding = u'ascii' # We can't use six.moves.builtins because these builtins get deleted sooner # than expected. See: @@ -83,7 +85,7 @@ def __define_global_system_encoding_variable__(): import builtins # pylint: disable=import-error # Define the detected encoding as a built-in variable for ease of use - setattr(builtins, '__salt_system_encoding__', encoding) + setattr(builtins, u'__salt_system_encoding__', encoding) # This is now garbage collectable del sys diff --git a/salt/_compat.py b/salt/_compat.py index 9b10646ace..052e68c050 100644 --- a/salt/_compat.py +++ b/salt/_compat.py @@ -46,7 +46,7 @@ else: if HAS_XML: - if not hasattr(ElementTree, 'ParseError'): + if not hasattr(ElementTree, u'ParseError'): class ParseError(Exception): ''' older versions of ElementTree do not have ParseError @@ -56,7 +56,7 @@ if HAS_XML: ElementTree.ParseError = ParseError -def text_(s, encoding='latin-1', errors='strict'): +def text_(s, encoding=u'latin-1', errors=u'strict'): ''' If ``s`` is an instance of ``binary_type``, return ``s.decode(encoding, errors)``, otherwise return ``s`` @@ -66,7 +66,7 @@ def text_(s, encoding='latin-1', errors='strict'): return s -def bytes_(s, encoding='latin-1', errors='strict'): +def bytes_(s, encoding=u'latin-1', errors=u'strict'): ''' If ``s`` is an instance of ``text_type``, return ``s.encode(encoding, errors)``, otherwise return ``s`` @@ -79,25 +79,25 @@ def bytes_(s, encoding='latin-1', errors='strict'): if PY3: def ascii_native_(s): if isinstance(s, text_type): - s = s.encode('ascii') - return str(s, 'ascii', 'strict') + s = s.encode(u'ascii') + return str(s, u'ascii', u'strict') else: def ascii_native_(s): if isinstance(s, text_type): - s = s.encode('ascii') + s = s.encode(u'ascii') return str(s) ascii_native_.__doc__ = ''' Python 3: If ``s`` is an instance of ``text_type``, return -``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')`` +``s.encode(u'ascii')``, otherwise return ``str(s, 'ascii', 'strict')`` Python 2: If ``s`` is an instance of ``text_type``, return -``s.encode('ascii')``, otherwise return ``str(s)`` -''' +``s.encode(u'ascii')``, otherwise return ``str(s)`` +''' # future lint: disable=non-unicode-string if PY3: - def native_(s, encoding='latin-1', errors='strict'): + def native_(s, encoding=u'latin-1', errors=u'strict'): ''' If ``s`` is an instance of ``text_type``, return ``s``, otherwise return ``str(s, encoding, errors)`` @@ -106,7 +106,7 @@ if PY3: return s return str(s, encoding, errors) else: - def native_(s, encoding='latin-1', errors='strict'): + def native_(s, encoding=u'latin-1', errors=u'strict'): ''' If ``s`` is an instance of ``text_type``, return ``s.encode(encoding, errors)``, otherwise return ``str(s)`` @@ -121,7 +121,7 @@ return ``str(s, encoding, errors)`` Python 2: If ``s`` is an instance of ``text_type``, return ``s.encode(encoding, errors)``, otherwise return ``str(s)`` -''' +''' # future lint: disable=non-unicode-string def string_io(data=None): # cStringIO can't handle unicode diff --git a/salt/acl/__init__.py b/salt/acl/__init__.py index dcaddcb43a..00efe2e40c 100644 --- a/salt/acl/__init__.py +++ b/salt/acl/__init__.py @@ -10,8 +10,13 @@ found by reading the salt documentation: # Import python libraries from __future__ import absolute_import + +# Import salt libs import salt.utils +# Import 3rd-party libs +from salt.ext import six + class PublisherACL(object): ''' @@ -30,7 +35,7 @@ class PublisherACL(object): def cmd_is_blacklisted(self, cmd): # If this is a regular command, it is a single function - if isinstance(cmd, str): + if isinstance(cmd, six.string_types): cmd = [cmd] for fun in cmd: if not salt.utils.check_whitelist_blacklist(fun, blacklist=self.blacklist.get('modules', [])): diff --git a/salt/auth/django.py b/salt/auth/django.py index 726c550b8d..2b77b87723 100644 --- a/salt/auth/django.py +++ b/salt/auth/django.py @@ -55,7 +55,7 @@ import sys # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import django diff --git a/salt/auth/file.py b/salt/auth/file.py index bd6c3582f5..1abaece4ca 100644 --- a/salt/auth/file.py +++ b/salt/auth/file.py @@ -101,8 +101,8 @@ import logging import os # Import salt utils -import salt.utils import salt.utils.files +import salt.utils.versions log = logging.getLogger(__name__) @@ -200,7 +200,7 @@ def _htpasswd(username, password, **kwargs): pwfile = HtpasswdFile(kwargs['filename']) # passlib below version 1.6 uses 'verify' function instead of 'check_password' - if salt.utils.version_cmp(kwargs['passlib_version'], '1.6') < 0: + if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0: return pwfile.verify(username, password) else: return pwfile.check_password(username, password) @@ -222,7 +222,7 @@ def _htdigest(username, password, **kwargs): pwfile = HtdigestFile(kwargs['filename']) # passlib below version 1.6 uses 'verify' function instead of 'check_password' - if salt.utils.version_cmp(kwargs['passlib_version'], '1.6') < 0: + if salt.utils.versions.version_cmp(kwargs['passlib_version'], '1.6') < 0: return pwfile.verify(username, realm, password) else: return pwfile.check_password(username, realm, password) diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py index 396c1d00a2..f5a44cba04 100644 --- a/salt/auth/ldap.py +++ b/salt/auth/ldap.py @@ -8,7 +8,7 @@ Provide authentication using simple LDAP binds # Import python libs from __future__ import absolute_import import logging -import salt.ext.six as six +from salt.ext import six # Import salt libs from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -280,8 +280,14 @@ def auth(username, password): ''' Simple LDAP auth ''' - if _bind(username, password, anonymous=_config('auth_by_group_membership_only', mandatory=False) and - _config('anonymous', mandatory=False)): + #If bind credentials are configured, use them instead of user's + if _config('binddn', mandatory=False) and _config('bindpw', mandatory=False): + bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False)) + else: + bind = _bind(username, password, anonymous=_config('auth_by_group_membership_only', mandatory=False) and + _config('anonymous', mandatory=False)) + + if bind: log.debug('LDAP authentication successful') return True else: @@ -306,8 +312,9 @@ def groups(username, **kwargs): ''' group_list = [] - bind = _bind(username, kwargs['password'], - anonymous=_config('anonymous', mandatory=False)) + # Perform un-authenticated bind to determine group membership + bind = _bind_for_search(anonymous=_config('anonymous', mandatory=False)) + if bind: log.debug('ldap bind to determine group membership succeeded!') @@ -381,7 +388,11 @@ def groups(username, **kwargs): group_list.append(group.split(',')[0].split('=')[-1]) log.debug('User {0} is a member of groups: {1}'.format(username, group_list)) - if not auth(username, kwargs['password']): + # Only test user auth on first call for job. + # 'show_jid' only exists on first payload so we can use that for the conditional. + if 'show_jid' in kwargs and not _bind(username, kwargs['password'], + anonymous=_config('auth_by_group_membership_only', mandatory=False) and + _config('anonymous', mandatory=False)): log.error('LDAP username and password do not match') return [] else: diff --git a/salt/auth/pam.py b/salt/auth/pam.py index 861cb99eed..8387e910f8 100644 --- a/salt/auth/pam.py +++ b/salt/auth/pam.py @@ -42,11 +42,11 @@ from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int from ctypes.util import find_library # Import Salt libs -from salt.utils import get_group_list +import salt.utils # Can be removed once get_group_list is moved from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six LIBPAM = CDLL(find_library('pam')) LIBC = CDLL(find_library('c')) @@ -214,4 +214,4 @@ def groups(username, *args, **kwargs): Uses system groups ''' - return get_group_list(username) + return salt.utils.get_group_list(username) diff --git a/salt/auth/stormpath.py b/salt/auth/stormpath.py deleted file mode 100644 index 0a710db46c..0000000000 --- a/salt/auth/stormpath.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Provide authentication using Stormpath. - -This driver requires some extra configuration beyond that which Stormpath -normally requires. - -.. code-block:: yaml - - stormpath: - apiid: 1234567890 - apikey: 1234567890/ABCDEF - # Can use an application ID - application: 6789012345 - # Or can use a directory ID - directory: 3456789012 - # But not both - -.. versionadded:: 2015.8.0 -''' - -from __future__ import absolute_import -import json -import base64 -import urllib -import salt.utils.http -import logging - -log = logging.getLogger(__name__) - - -def auth(username, password): - ''' - Authenticate using a Stormpath directory or application - ''' - apiid = __opts__.get('stormpath', {}).get('apiid', None) - apikey = __opts__.get('stormpath', {}).get('apikey', None) - application = __opts__.get('stormpath', {}).get('application', None) - path = 'https://api.stormpath.com/v1' - - if application is not None: - path = '{0}/applications/{1}/loginAttempts'.format(path, application) - else: - return False - - username = urllib.quote(username) - data = { - 'type': 'basic', - 'value': base64.b64encode('{0}:{1}'.format(username, password)) - } - log.debug('{0}:{1}'.format(username, password)) - log.debug(path) - log.debug(data) - log.debug(json.dumps(data)) - - result = salt.utils.http.query( - path, - method='POST', - username=apiid, - password=apikey, - data=json.dumps(data), - header_dict={'Content-type': 'application/json;charset=UTF-8'}, - decode=False, - status=True, - opts=__opts__, - ) - log.debug(result) - if result.get('status', 403) == 200: - return True - - return False diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index 994f2c02af..0b27bfbe67 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -37,8 +37,9 @@ class Beacon(object): .. code_block:: yaml beacons: inotify: - - /etc/fstab: {} - - /var/cache/foo: {} + - files: + - /etc/fstab: {} + - /var/cache/foo: {} ''' ret = [] b_config = copy.deepcopy(config) @@ -69,6 +70,7 @@ class Beacon(object): log.trace('Beacon processing: {0}'.format(mod)) fun_str = '{0}.beacon'.format(mod) + validate_str = '{0}.validate'.format(mod) if fun_str in self.beacons: runonce = self._determine_beacon_config(current_beacon_config, 'run_once') interval = self._determine_beacon_config(current_beacon_config, 'interval') @@ -95,6 +97,17 @@ class Beacon(object): continue # Update __grains__ on the beacon self.beacons[fun_str].__globals__['__grains__'] = grains + + # Run the validate function if it's available, + # otherwise there is a warning about it being missing + if validate_str in self.beacons: + valid, vcomment = self.beacons[validate_str](b_config[mod]) + + if not valid: + log.info('Beacon %s configuration invalid, ' + 'not running.\n%s', mod, vcomment) + continue + # Fire the beacon! raw = self.beacons[fun_str](b_config[mod]) for data in raw: @@ -193,6 +206,8 @@ class Beacon(object): # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) b_conf = self.functions['config.merge']('beacons') + if not isinstance(self.opts['beacons'], dict): + self.opts['beacons'] = {} self.opts['beacons'].update(b_conf) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_list_complete') diff --git a/salt/beacons/adb.py b/salt/beacons/adb.py index 003f2d6206..1fc1c2a3b6 100644 --- a/salt/beacons/adb.py +++ b/salt/beacons/adb.py @@ -10,7 +10,8 @@ from __future__ import absolute_import import logging # Salt libs -import salt.utils +import salt.utils.path +from salt.ext.six.moves import map log = logging.getLogger(__name__) @@ -21,32 +22,41 @@ last_state_extra = {'value': False, 'no_devices': False} def __virtual__(): - which_result = salt.utils.which('adb') + which_result = salt.utils.path.which('adb') if which_result is None: return False else: return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for adb beacon should be a dictionary with states array - if not isinstance(config, dict): - log.info('Configuration for adb beacon must be a dict.') - return False, ('Configuration for adb beacon must be a dict.') - elif 'states' not in config.keys(): + if not isinstance(config, list): + log.info('Configuration for adb beacon must be a list.') + return False, ('Configuration for adb beacon must be a list.') + + _config = {} + list(map(_config.update, config)) + + if 'states' not in _config: log.info('Configuration for adb beacon must include a states array.') return False, ('Configuration for adb beacon must include a states array.') else: - states = ['offline', 'bootloader', 'device', 'host', 'recovery', 'no permissions', - 'sideload', 'unauthorized', 'unknown', 'missing'] - if any(s not in states for s in config['states']): - log.info('Need a one of the following adb ' - 'states: {0}'.format(', '.join(states))) - return False, ('Need a one of the following adb ' - 'states: {0}'.format(', '.join(states))) + if not isinstance(_config['states'], list): + log.info('Configuration for adb beacon must include a states array.') + return False, ('Configuration for adb beacon must include a states array.') + else: + states = ['offline', 'bootloader', 'device', 'host', + 'recovery', 'no permissions', + 'sideload', 'unauthorized', 'unknown', 'missing'] + if any(s not in states for s in _config['states']): + log.info('Need a one of the following adb ' + 'states: {0}'.format(', '.join(states))) + return False, ('Need a one of the following adb ' + 'states: {0}'.format(', '.join(states))) return True, 'Valid beacon configuration' @@ -74,11 +84,10 @@ def beacon(config): log.trace('adb beacon starting') ret = [] - _validate = __validate__(config) - if not _validate[0]: - return ret + _config = {} + list(map(_config.update, config)) - out = __salt__['cmd.run']('adb devices', runas=config.get('user', None)) + out = __salt__['cmd.run']('adb devices', runas=_config.get('user', None)) lines = out.split('\n')[1:] last_state_devices = list(last_state.keys()) @@ -90,21 +99,21 @@ def beacon(config): found_devices.append(device) if device not in last_state_devices or \ ('state' in last_state[device] and last_state[device]['state'] != state): - if state in config['states']: + if state in _config['states']: ret.append({'device': device, 'state': state, 'tag': state}) last_state[device] = {'state': state} - if 'battery_low' in config: + if 'battery_low' in _config: val = last_state.get(device, {}) cmd = 'adb -s {0} shell cat /sys/class/power_supply/*/capacity'.format(device) - battery_levels = __salt__['cmd.run'](cmd, runas=config.get('user', None)).split('\n') + battery_levels = __salt__['cmd.run'](cmd, runas=_config.get('user', None)).split('\n') for l in battery_levels: battery_level = int(l) if 0 < battery_level < 100: if 'battery' not in val or battery_level != val['battery']: - if ('battery' not in val or val['battery'] > config['battery_low']) and \ - battery_level <= config['battery_low']: + if ('battery' not in val or val['battery'] > _config['battery_low']) and \ + battery_level <= _config['battery_low']: ret.append({'device': device, 'battery_level': battery_level, 'tag': 'battery_low'}) if device not in last_state: @@ -118,13 +127,13 @@ def beacon(config): # Find missing devices and remove them / send an event for device in last_state_devices: if device not in found_devices: - if 'missing' in config['states']: + if 'missing' in _config['states']: ret.append({'device': device, 'state': 'missing', 'tag': 'missing'}) del last_state[device] # Maybe send an event if we don't have any devices - if 'no_devices_event' in config and config['no_devices_event'] is True: + if 'no_devices_event' in _config and _config['no_devices_event'] is True: if len(found_devices) == 0 and not last_state_extra['no_devices']: ret.append({'tag': 'no_devices'}) diff --git a/salt/beacons/avahi_announce.py b/salt/beacons/avahi_announce.py index ee4d9a7bb7..382660a2f3 100644 --- a/salt/beacons/avahi_announce.py +++ b/salt/beacons/avahi_announce.py @@ -15,6 +15,7 @@ Dependencies from __future__ import absolute_import import logging import time +from salt.ext.six.moves import map # Import 3rd Party libs try: @@ -54,17 +55,23 @@ def __virtual__(): '\'python-avahi\' dependency is missing.'.format(__virtualname__) -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): - return False, ('Configuration for avahi_announcement ' - 'beacon must be a dictionary') - elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')): + _config = {} + list(map(_config.update, config)) + + if not isinstance(config, list): + return False, ('Configuration for avahi_announce ' + 'beacon must be a list.') + + elif not all(x in _config for x in ('servicetype', + 'port', + 'txt')): return False, ('Configuration for avahi_announce beacon ' - 'must contain servicetype, port and txt items') - return True, 'Valid beacon configuration' + 'must contain servicetype, port and txt items.') + return True, 'Valid beacon configuration.' def _enforce_txt_record_maxlen(key, value): @@ -138,13 +145,13 @@ def beacon(config): beacons: avahi_announce: - run_once: True - servicetype: _demo._tcp - port: 1234 - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' ''' ret = [] changes = {} @@ -152,30 +159,27 @@ def beacon(config): global LAST_GRAINS - _validate = __validate__(config) - if not _validate[0]: - log.warning('Beacon {0} configuration invalid, ' - 'not adding. {1}'.format(__virtualname__, _validate[1])) - return ret + _config = {} + list(map(_config.update, config)) - if 'servicename' in config: - servicename = config['servicename'] + if 'servicename' in _config: + servicename = _config['servicename'] else: servicename = __grains__['host'] # Check for hostname change if LAST_GRAINS and LAST_GRAINS['host'] != servicename: changes['servicename'] = servicename - if LAST_GRAINS and config.get('reset_on_change', False): + if LAST_GRAINS and _config.get('reset_on_change', False): # Check for IP address change in the case when we reset on change if LAST_GRAINS.get('ipv4', []) != __grains__.get('ipv4', []): changes['ipv4'] = __grains__.get('ipv4', []) if LAST_GRAINS.get('ipv6', []) != __grains__.get('ipv6', []): changes['ipv6'] = __grains__.get('ipv6', []) - for item in config['txt']: - if config['txt'][item].startswith('grains.'): - grain = config['txt'][item][7:] + for item in _config['txt']: + if _config['txt'][item].startswith('grains.'): + grain = _config['txt'][item][7:] grain_index = None square_bracket = grain.find('[') if square_bracket != -1 and grain[-1] == ']': @@ -192,7 +196,7 @@ def beacon(config): if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')): changes[str('txt.' + item)] = txt[item] else: - txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item]) + txt[item] = _enforce_txt_record_maxlen(item, _config['txt'][item]) if not LAST_GRAINS: changes[str('txt.' + item)] = txt[item] @@ -200,33 +204,33 @@ def beacon(config): if changes: if not LAST_GRAINS: changes['servicename'] = servicename - changes['servicetype'] = config['servicetype'] - changes['port'] = config['port'] + changes['servicetype'] = _config['servicetype'] + changes['port'] = _config['port'] changes['ipv4'] = __grains__.get('ipv4', []) changes['ipv6'] = __grains__.get('ipv6', []) GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0), - servicename, config['servicetype'], '', '', - dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt)) + servicename, _config['servicetype'], '', '', + dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt)) GROUP.Commit() - elif config.get('reset_on_change', False) or 'servicename' in changes: + elif _config.get('reset_on_change', False) or 'servicename' in changes: # A change in 'servicename' requires a reset because we can only # directly update TXT records GROUP.Reset() - reset_wait = config.get('reset_wait', 0) + reset_wait = _config.get('reset_wait', 0) if reset_wait > 0: time.sleep(reset_wait) GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0), - servicename, config['servicetype'], '', '', - dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt)) + servicename, _config['servicetype'], '', '', + dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt)) GROUP.Commit() else: GROUP.UpdateServiceTxt(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0), - servicename, config['servicetype'], '', + servicename, _config['servicetype'], '', avahi.dict_to_txt_array(txt)) ret.append({'tag': 'result', 'changes': changes}) - if config.get('copy_grains', False): + if _config.get('copy_grains', False): LAST_GRAINS = __grains__.copy() else: LAST_GRAINS = __grains__ diff --git a/salt/beacons/bonjour_announce.py b/salt/beacons/bonjour_announce.py index fce5211464..f225c6211d 100644 --- a/salt/beacons/bonjour_announce.py +++ b/salt/beacons/bonjour_announce.py @@ -9,6 +9,7 @@ import atexit import logging import select import time +from salt.ext.six.moves import map # Import 3rd Party libs try: @@ -47,17 +48,23 @@ def _register_callback(sdRef, flags, errorCode, name, regtype, domain): # pylin log.error('Bonjour registration failed with error code {0}'.format(errorCode)) -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): - return False, ('Configuration for bonjour_announcement ' - 'beacon must be a dictionary') - elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')): + _config = {} + list(map(_config.update, config)) + + if not isinstance(config, list): + return False, ('Configuration for bonjour_announce ' + 'beacon must be a list.') + + elif not all(x in _config for x in ('servicetype', + 'port', + 'txt')): return False, ('Configuration for bonjour_announce beacon ' - 'must contain servicetype, port and txt items') - return True, 'Valid beacon configuration' + 'must contain servicetype, port and txt items.') + return True, 'Valid beacon configuration.' def _enforce_txt_record_maxlen(key, value): @@ -131,13 +138,13 @@ def beacon(config): beacons: bonjour_announce: - run_once: True - servicetype: _demo._tcp - port: 1234 - txt: - ProdName: grains.productname - SerialNo: grains.serialnumber - Comments: 'this is a test' + - run_once: True + - servicetype: _demo._tcp + - port: 1234 + - txt: + ProdName: grains.productname + SerialNo: grains.serialnumber + Comments: 'this is a test' ''' ret = [] changes = {} @@ -146,30 +153,27 @@ def beacon(config): global LAST_GRAINS global SD_REF - _validate = __validate__(config) - if not _validate[0]: - log.warning('Beacon {0} configuration invalid, ' - 'not adding. {1}'.format(__virtualname__, _validate[1])) - return ret + _config = {} + list(map(_config.update, config)) - if 'servicename' in config: - servicename = config['servicename'] + if 'servicename' in _config: + servicename = _config['servicename'] else: servicename = __grains__['host'] # Check for hostname change if LAST_GRAINS and LAST_GRAINS['host'] != servicename: changes['servicename'] = servicename - if LAST_GRAINS and config.get('reset_on_change', False): + if LAST_GRAINS and _config.get('reset_on_change', False): # Check for IP address change in the case when we reset on change if LAST_GRAINS.get('ipv4', []) != __grains__.get('ipv4', []): changes['ipv4'] = __grains__.get('ipv4', []) if LAST_GRAINS.get('ipv6', []) != __grains__.get('ipv6', []): changes['ipv6'] = __grains__.get('ipv6', []) - for item in config['txt']: - if config['txt'][item].startswith('grains.'): - grain = config['txt'][item][7:] + for item in _config['txt']: + if _config['txt'][item].startswith('grains.'): + grain = _config['txt'][item][7:] grain_index = None square_bracket = grain.find('[') if square_bracket != -1 and grain[-1] == ']': @@ -186,7 +190,7 @@ def beacon(config): if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')): changes[str('txt.' + item)] = txt[item] else: - txt[item] = _enforce_txt_record_maxlen(item, config['txt'][item]) + txt[item] = _enforce_txt_record_maxlen(item, _config['txt'][item]) if not LAST_GRAINS: changes[str('txt.' + item)] = txt[item] @@ -195,32 +199,32 @@ def beacon(config): txt_record = pybonjour.TXTRecord(items=txt) if not LAST_GRAINS: changes['servicename'] = servicename - changes['servicetype'] = config['servicetype'] - changes['port'] = config['port'] + changes['servicetype'] = _config['servicetype'] + changes['port'] = _config['port'] changes['ipv4'] = __grains__.get('ipv4', []) changes['ipv6'] = __grains__.get('ipv6', []) SD_REF = pybonjour.DNSServiceRegister( name=servicename, - regtype=config['servicetype'], - port=config['port'], + regtype=_config['servicetype'], + port=_config['port'], txtRecord=txt_record, callBack=_register_callback) atexit.register(_close_sd_ref) ready = select.select([SD_REF], [], []) if SD_REF in ready[0]: pybonjour.DNSServiceProcessResult(SD_REF) - elif config.get('reset_on_change', False) or 'servicename' in changes: + elif _config.get('reset_on_change', False) or 'servicename' in changes: # A change in 'servicename' requires a reset because we can only # directly update TXT records SD_REF.close() SD_REF = None - reset_wait = config.get('reset_wait', 0) + reset_wait = _config.get('reset_wait', 0) if reset_wait > 0: time.sleep(reset_wait) SD_REF = pybonjour.DNSServiceRegister( name=servicename, - regtype=config['servicetype'], - port=config['port'], + regtype=_config['servicetype'], + port=_config['port'], txtRecord=txt_record, callBack=_register_callback) ready = select.select([SD_REF], [], []) @@ -236,7 +240,7 @@ def beacon(config): ret.append({'tag': 'result', 'changes': changes}) - if config.get('copy_grains', False): + if _config.get('copy_grains', False): LAST_GRAINS = __grains__.copy() else: LAST_GRAINS = __grains__ diff --git a/salt/beacons/btmp.py b/salt/beacons/btmp.py index 19f421e41c..40b50470d8 100644 --- a/salt/beacons/btmp.py +++ b/salt/beacons/btmp.py @@ -5,7 +5,7 @@ Beacon to fire events at failed login of users .. code-block:: yaml beacons: - btmp: {} + btmp: [] ''' # Import python libs @@ -16,6 +16,9 @@ import struct # Import Salt Libs import salt.utils.files +# Import 3rd-party libs +from salt.ext import six + __virtualname__ = 'btmp' BTMP = '/var/log/btmp' FMT = 'hi32s4s32s256shhiii4i20x' @@ -49,14 +52,14 @@ def _get_loc(): return __context__[LOC_KEY] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for load beacon should be a list of dicts - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for btmp beacon must ' - 'be a list of dictionaries.') + 'be a list.') return True, 'Valid beacon configuration' @@ -68,7 +71,7 @@ def beacon(config): .. code-block:: yaml beacons: - btmp: {} + btmp: [] ''' ret = [] with salt.utils.files.fopen(BTMP, 'rb') as fp_: @@ -88,7 +91,7 @@ def beacon(config): event = {} for ind, field in enumerate(FIELDS): event[field] = pack[ind] - if isinstance(event[field], str): + if isinstance(event[field], six.string_types): event[field] = event[field].strip('\x00') ret.append(event) return ret diff --git a/salt/beacons/diskusage.py b/salt/beacons/diskusage.py index 635f13954b..99d71ed0a9 100644 --- a/salt/beacons/diskusage.py +++ b/salt/beacons/diskusage.py @@ -31,14 +31,14 @@ def __virtual__(): return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for diskusage beacon should be a list of dicts - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for diskusage beacon ' - 'must be a dictionary.') + 'must be a list.') return True, 'Valid beacon configuration' @@ -86,25 +86,24 @@ def beacon(config): parts = psutil.disk_partitions(all=False) ret = [] for mounts in config: - mount = mounts.keys()[0] + mount = next(iter(mounts)) - try: - _current_usage = psutil.disk_usage(mount) - except OSError: - # Ensure a valid mount point - log.warning('{0} is not a valid mount point, try regex.'.format(mount)) - for part in parts: - if re.match(mount, part.mountpoint): - row = {} - row[part.mountpoint] = mounts[mount] - config.append(row) - continue + for part in parts: + if re.match(mount, part.mountpoint): + _mount = part.mountpoint - current_usage = _current_usage.percent - monitor_usage = mounts[mount] - if '%' in monitor_usage: - monitor_usage = re.sub('%', '', monitor_usage) - monitor_usage = float(monitor_usage) - if current_usage >= monitor_usage: - ret.append({'diskusage': current_usage, 'mount': mount}) + try: + _current_usage = psutil.disk_usage(mount) + except OSError: + log.warning('{0} is not a valid mount point.'.format(mount)) + continue + + current_usage = _current_usage.percent + monitor_usage = mounts[mount] + log.info('current_usage {}'.format(current_usage)) + if '%' in monitor_usage: + monitor_usage = re.sub('%', '', monitor_usage) + monitor_usage = float(monitor_usage) + if current_usage >= monitor_usage: + ret.append({'diskusage': current_usage, 'mount': _mount}) return ret diff --git a/salt/beacons/glxinfo.py b/salt/beacons/glxinfo.py index a1961abcc0..005c429474 100644 --- a/salt/beacons/glxinfo.py +++ b/salt/beacons/glxinfo.py @@ -10,7 +10,8 @@ from __future__ import absolute_import import logging # Salt libs -import salt.utils +import salt.utils.path +from salt.ext.six.moves import map log = logging.getLogger(__name__) @@ -21,21 +22,25 @@ last_state = {} def __virtual__(): - which_result = salt.utils.which('glxinfo') + which_result = salt.utils.path.which('glxinfo') if which_result is None: return False else: return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for glxinfo beacon should be a dictionary - if not isinstance(config, dict): - return False, ('Configuration for glxinfo beacon must be a dict.') - if 'user' not in config: + if not isinstance(config, list): + return False, ('Configuration for glxinfo beacon must be a list.') + + _config = {} + list(map(_config.update, config)) + + if 'user' not in _config: return False, ('Configuration for glxinfo beacon must ' 'include a user as glxinfo is not available to root.') return True, 'Valid beacon configuration' @@ -45,27 +50,28 @@ def beacon(config): ''' Emit the status of a connected display to the minion - Mainly this is used to detect when the display fails to connect for whatever reason. + Mainly this is used to detect when the display fails to connect + for whatever reason. .. code-block:: yaml beacons: glxinfo: - user: frank - screen_event: True + - user: frank + - screen_event: True ''' log.trace('glxinfo beacon starting') ret = [] - _validate = __validate__(config) - if not _validate[0]: - return ret + _config = {} + list(map(_config.update, config)) - retcode = __salt__['cmd.retcode']('DISPLAY=:0 glxinfo', runas=config['user'], python_shell=True) + retcode = __salt__['cmd.retcode']('DISPLAY=:0 glxinfo', + runas=_config['user'], python_shell=True) - if 'screen_event' in config and config['screen_event']: + if 'screen_event' in _config and _config['screen_event']: last_value = last_state.get('screen_available', False) screen_available = retcode == 0 if last_value != screen_available or 'screen_available' not in last_state: diff --git a/salt/beacons/haproxy.py b/salt/beacons/haproxy.py index 9873803d29..269c6e67dd 100644 --- a/salt/beacons/haproxy.py +++ b/salt/beacons/haproxy.py @@ -9,7 +9,7 @@ Fire an event when over a specified threshold. # Import Python libs from __future__ import absolute_import import logging - +from salt.ext.six.moves import map log = logging.getLogger(__name__) @@ -23,17 +23,39 @@ def __virtual__(): if 'haproxy.get_sessions' in __salt__: return __virtualname__ else: + log.debug('Not loading haproxy beacon') return False -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): - return False, ('Configuration for haproxy beacon must be a dictionary.') - if 'haproxy' not in config: - return False, ('Configuration for haproxy beacon requires a list of backends and servers') + if not isinstance(config, list): + return False, ('Configuration for haproxy beacon must ' + 'be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'backends' not in _config: + return False, ('Configuration for haproxy beacon ' + 'requires backends.') + else: + if not isinstance(_config['backends'], dict): + return False, ('Backends for haproxy beacon ' + 'must be a dictionary.') + else: + for backend in _config['backends']: + log.debug('_config {}'.format(_config['backends'][backend])) + if 'servers' not in _config['backends'][backend]: + return False, ('Backends for haproxy beacon ' + 'require servers.') + else: + _servers = _config['backends'][backend]['servers'] + if not isinstance(_servers, list): + return False, ('Servers for haproxy beacon ' + 'must be a list.') return True, 'Valid beacon configuration' @@ -46,22 +68,23 @@ def beacon(config): beacons: haproxy: - - www-backend: - threshold: 45 - servers: - - web1 - - web2 + - backends: + www-backend: + threshold: 45 + servers: + - web1 + - web2 - interval: 120 ''' - log.debug('haproxy beacon starting') ret = [] - _validate = __validate__(config) - if not _validate: - log.debug('haproxy beacon unable to validate') - return ret - for backend in config: - threshold = config[backend]['threshold'] - for server in config[backend]['servers']: + + _config = {} + list(map(_config.update, config)) + + for backend in _config.get('backends', ()): + backend_config = _config['backends'][backend] + threshold = backend_config['threshold'] + for server in backend_config['servers']: scur = __salt__['haproxy.get_sessions'](server, backend) if scur: if int(scur) > int(threshold): @@ -69,6 +92,10 @@ def beacon(config): 'scur': scur, 'threshold': threshold, } - log.debug('Emit because {0} > {1} for {2} in {3}'.format(scur, threshold, server, backend)) + log.debug('Emit because {0} > {1}' + ' for {2} in {3}'.format(scur, + threshold, + server, + backend)) ret.append(_server) return ret diff --git a/salt/beacons/inotify.py b/salt/beacons/inotify.py index 728a89efc7..ee1dfa4f78 100644 --- a/salt/beacons/inotify.py +++ b/salt/beacons/inotify.py @@ -23,6 +23,7 @@ import re # Import salt libs import salt.ext.six +from salt.ext.six.moves import map # Import third party libs try: @@ -79,7 +80,7 @@ def _get_notifier(config): return __context__['inotify.notifier'] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' @@ -105,37 +106,45 @@ def __validate__(config): ] # Configuration for inotify beacon should be a dict of dicts - log.debug('config {0}'.format(config)) - if not isinstance(config, dict): - return False, 'Configuration for inotify beacon must be a dictionary.' + if not isinstance(config, list): + return False, 'Configuration for inotify beacon must be a list.' else: - for config_item in config: - if not isinstance(config[config_item], dict): - return False, ('Configuration for inotify beacon must ' - 'be a dictionary of dictionaries.') - else: - if not any(j in ['mask', 'recurse', 'auto_add'] for j in config[config_item]): + _config = {} + list(map(_config.update, config)) + + if 'files' not in _config: + return False, 'Configuration for inotify beacon must include files.' + else: + for path in _config.get('files'): + + if not isinstance(_config['files'][path], dict): return False, ('Configuration for inotify beacon must ' - 'contain mask, recurse or auto_add items.') + 'be a list of dictionaries.') + else: + if not any(j in ['mask', + 'recurse', + 'auto_add'] for j in _config['files'][path]): + return False, ('Configuration for inotify beacon must ' + 'contain mask, recurse or auto_add items.') - if 'auto_add' in config[config_item]: - if not isinstance(config[config_item]['auto_add'], bool): - return False, ('Configuration for inotify beacon ' - 'auto_add must be boolean.') + if 'auto_add' in _config['files'][path]: + if not isinstance(_config['files'][path]['auto_add'], bool): + return False, ('Configuration for inotify beacon ' + 'auto_add must be boolean.') - if 'recurse' in config[config_item]: - if not isinstance(config[config_item]['recurse'], bool): - return False, ('Configuration for inotify beacon ' - 'recurse must be boolean.') + if 'recurse' in _config['files'][path]: + if not isinstance(_config['files'][path]['recurse'], bool): + return False, ('Configuration for inotify beacon ' + 'recurse must be boolean.') - if 'mask' in config[config_item]: - if not isinstance(config[config_item]['mask'], list): - return False, ('Configuration for inotify beacon ' - 'mask must be list.') - for mask in config[config_item]['mask']: - if mask not in VALID_MASK: - return False, ('Configuration for inotify beacon ' - 'invalid mask option {0}.'.format(mask)) + if 'mask' in _config['files'][path]: + if not isinstance(_config['files'][path]['mask'], list): + return False, ('Configuration for inotify beacon ' + 'mask must be list.') + for mask in _config['files'][path]['mask']: + if mask not in VALID_MASK: + return False, ('Configuration for inotify beacon ' + 'invalid mask option {0}.'.format(mask)) return True, 'Valid beacon configuration' @@ -149,19 +158,20 @@ def beacon(config): beacons: inotify: - /path/to/file/or/dir: - mask: - - open - - create - - close_write - recurse: True - auto_add: True - exclude: - - /path/to/file/or/dir/exclude1 - - /path/to/file/or/dir/exclude2 - - /path/to/file/or/dir/regex[a-m]*$: - regex: True - coalesce: True + - files: + /path/to/file/or/dir: + mask: + - open + - create + - close_write + recurse: True + auto_add: True + exclude: + - /path/to/file/or/dir/exclude1 + - /path/to/file/or/dir/exclude2 + - /path/to/file/or/dir/regex[a-m]*$: + regex: True + - coalesce: True The mask list can contain the following events (the default mask is create, delete, and modify): @@ -203,8 +213,11 @@ def beacon(config): affects all paths that are being watched. This is due to this option being at the Notifier level in pyinotify. ''' + _config = {} + list(map(_config.update, config)) + ret = [] - notifier = _get_notifier(config) + notifier = _get_notifier(_config) wm = notifier._watch_manager # Read in existing events @@ -219,11 +232,13 @@ def beacon(config): # Find the matching path in config path = event.path while path != '/': - if path in config: + if path in _config.get('files', {}): break path = os.path.dirname(path) - excludes = config[path].get('exclude', '') + for path in _config.get('files', {}): + excludes = _config['files'][path].get('exclude', '') + if excludes and isinstance(excludes, list): for exclude in excludes: if isinstance(exclude, dict): @@ -257,9 +272,10 @@ def beacon(config): # Update existing watches and add new ones # TODO: make the config handle more options - for path in config: - if isinstance(config[path], dict): - mask = config[path].get('mask', DEFAULT_MASK) + for path in _config.get('files', ()): + + if isinstance(_config['files'][path], dict): + mask = _config['files'][path].get('mask', DEFAULT_MASK) if isinstance(mask, list): r_mask = 0 for sub in mask: @@ -269,8 +285,8 @@ def beacon(config): else: r_mask = mask mask = r_mask - rec = config[path].get('recurse', False) - auto_add = config[path].get('auto_add', False) + rec = _config['files'][path].get('recurse', False) + auto_add = _config['files'][path].get('auto_add', False) else: mask = DEFAULT_MASK rec = False @@ -287,7 +303,7 @@ def beacon(config): if update: wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add) elif os.path.exists(path): - excludes = config[path].get('exclude', '') + excludes = _config['files'][path].get('exclude', '') excl = None if isinstance(excludes, list): excl = [] diff --git a/salt/beacons/journald.py b/salt/beacons/journald.py index d5cfdca9b4..9b566d587d 100644 --- a/salt/beacons/journald.py +++ b/salt/beacons/journald.py @@ -10,6 +10,7 @@ from __future__ import absolute_import import salt.utils import salt.utils.locales import salt.ext.six +from salt.ext.six.moves import map # Import third party libs try: @@ -43,18 +44,21 @@ def _get_journal(): return __context__['systemd.journald'] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for journald beacon should be a list of dicts - if not isinstance(config, dict): - return False + if not isinstance(config, list): + return (False, 'Configuration for journald beacon must be a list.') else: - for item in config: - if not isinstance(config[item], dict): - return False, ('Configuration for journald beacon must ' - 'be a dictionary of dictionaries.') + _config = {} + list(map(_config.update, config)) + + for name in _config.get('services', {}): + if not isinstance(_config['services'][name], dict): + return False, ('Services configuration for journald beacon ' + 'must be a list of dictionaries.') return True, 'Valid beacon configuration' @@ -69,25 +73,31 @@ def beacon(config): beacons: journald: - sshd: - SYSLOG_IDENTIFIER: sshd - PRIORITY: 6 + - services: + sshd: + SYSLOG_IDENTIFIER: sshd + PRIORITY: 6 ''' ret = [] journal = _get_journal() + + _config = {} + list(map(_config.update, config)) + while True: cur = journal.get_next() if not cur: break - for name in config: + + for name in _config.get('services', {}): n_flag = 0 - for key in config[name]: + for key in _config['services'][name]: if isinstance(key, salt.ext.six.string_types): key = salt.utils.locales.sdecode(key) if key in cur: - if config[name][key] == cur[key]: + if _config['services'][name][key] == cur[key]: n_flag += 1 - if n_flag == len(config[name]): + if n_flag == len(_config['services'][name]): # Match! sub = salt.utils.simple_types_filter(cur) sub.update({'tag': name}) diff --git a/salt/beacons/load.py b/salt/beacons/load.py index a3cba14fcb..8b57904b84 100644 --- a/salt/beacons/load.py +++ b/salt/beacons/load.py @@ -9,7 +9,8 @@ import logging import os # Import Salt libs -import salt.utils +import salt.utils.platform +from salt.ext.six.moves import map # Import Py3 compat from salt.ext.six.moves import zip @@ -22,13 +23,13 @@ LAST_STATUS = {} def __virtual__(): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return False else: return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' @@ -37,25 +38,26 @@ def __validate__(config): if not isinstance(config, list): return False, ('Configuration for load beacon must be a list.') else: - for config_item in config: - if not isinstance(config_item, dict): - return False, ('Configuration for load beacon must ' - 'be a list of dictionaries.') - else: - if not all(j in ['1m', '5m', '15m'] for j in config_item.keys()): - return False, ('Configuration for load beacon must ' - 'contain 1m, 5m or 15m items.') + _config = {} + list(map(_config.update, config)) + + if 'averages' not in _config: + return False, ('Averages configuration is required' + ' for load beacon.') + else: + + if not any(j in ['1m', '5m', '15m'] for j + in _config.get('averages', {})): + return False, ('Averages configuration for load beacon ' + 'must contain 1m, 5m or 15m items.') for item in ['1m', '5m', '15m']: - if item not in config_item: - continue - - if not isinstance(config_item[item], list): - return False, ('Configuration for load beacon: ' + if not isinstance(_config['averages'][item], list): + return False, ('Averages configuration for load beacon: ' '1m, 5m and 15m items must be ' 'a list of two items.') else: - if len(config_item[item]) != 2: + if len(_config['averages'][item]) != 2: return False, ('Configuration for load beacon: ' '1m, 5m and 15m items must be ' 'a list of two items.') @@ -82,33 +84,37 @@ def beacon(config): beacons: load: - 1m: - - 0.0 - - 2.0 - 5m: - - 0.0 - - 1.5 - 15m: - - 0.1 - - 1.0 - emitatstartup: True - onchangeonly: False + - averages: + 1m: + - 0.0 + - 2.0 + 5m: + - 0.0 + - 1.5 + 15m: + - 0.1 + - 1.0 + - emitatstartup: True + - onchangeonly: False ''' log.trace('load beacon starting') + _config = {} + list(map(_config.update, config)) + # Default config if not present - if 'emitatstartup' not in config: - config['emitatstartup'] = True - if 'onchangeonly' not in config: - config['onchangeonly'] = False + if 'emitatstartup' not in _config: + _config['emitatstartup'] = True + if 'onchangeonly' not in _config: + _config['onchangeonly'] = False ret = [] avgs = os.getloadavg() avg_keys = ['1m', '5m', '15m'] avg_dict = dict(zip(avg_keys, avgs)) - if config['onchangeonly']: + if _config['onchangeonly']: if not LAST_STATUS: for k in ['1m', '5m', '15m']: LAST_STATUS[k] = avg_dict[k] @@ -120,27 +126,40 @@ def beacon(config): # Check each entry for threshold for k in ['1m', '5m', '15m']: - if k in config: - if config['onchangeonly']: - # Emit if current is more that threshold and old value less that threshold - if float(avg_dict[k]) > float(config[k][1]) and float(LAST_STATUS[k]) < float(config[k][1]): - log.debug('Emit because {0} > {1} and last was {2}'.format(float(avg_dict[k]), float(config[k][1]), float(LAST_STATUS[k]))) + if k in _config.get('averages', {}): + if _config['onchangeonly']: + # Emit if current is more that threshold and old value less + # that threshold + if float(avg_dict[k]) > float(_config['averages'][k][1]) and \ + float(LAST_STATUS[k]) < float(_config['averages'][k][1]): + log.debug('Emit because {0} > {1} and last was ' + '{2}'.format(float(avg_dict[k]), + float(_config['averages'][k][1]), + float(LAST_STATUS[k]))) send_beacon = True break - # Emit if current is less that threshold and old value more that threshold - if float(avg_dict[k]) < float(config[k][0]) and float(LAST_STATUS[k]) > float(config[k][0]): - log.debug('Emit because {0} < {1} and last was {2}'.format(float(avg_dict[k]), float(config[k][0]), float(LAST_STATUS[k]))) + # Emit if current is less that threshold and old value more + # that threshold + if float(avg_dict[k]) < float(_config['averages'][k][0]) and \ + float(LAST_STATUS[k]) > float(_config['averages'][k][0]): + log.debug('Emit because {0} < {1} and last was' + '{2}'.format(float(avg_dict[k]), + float(_config['averages'][k][0]), + float(LAST_STATUS[k]))) send_beacon = True break else: # Emit no matter LAST_STATUS - if float(avg_dict[k]) < float(config[k][0]) or \ - float(avg_dict[k]) > float(config[k][1]): - log.debug('Emit because {0} < {1} or > {2}'.format(float(avg_dict[k]), float(config[k][0]), float(config[k][1]))) + if float(avg_dict[k]) < float(_config['averages'][k][0]) or \ + float(avg_dict[k]) > float(_config['averages'][k][1]): + log.debug('Emit because {0} < {1} or > ' + '{2}'.format(float(avg_dict[k]), + float(_config['averages'][k][0]), + float(_config['averages'][k][1]))) send_beacon = True break - if config['onchangeonly']: + if _config['onchangeonly']: for k in ['1m', '5m', '15m']: LAST_STATUS[k] = avg_dict[k] diff --git a/salt/beacons/log.py b/salt/beacons/log.py index d275170d03..5ce4be3029 100644 --- a/salt/beacons/log.py +++ b/salt/beacons/log.py @@ -11,8 +11,9 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.platform +from salt.ext.six.moves import map try: @@ -35,7 +36,7 @@ log = logging.getLogger(__name__) def __virtual__(): - if not salt.utils.is_windows() and HAS_REGEX: + if not salt.utils.platform.is_windows() and HAS_REGEX: return __virtualname__ return False @@ -48,13 +49,20 @@ def _get_loc(): return __context__[LOC_KEY] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' + _config = {} + list(map(_config.update, config)) + # Configuration for log beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for log beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for log beacon must be a list.') + + if 'file' not in _config: + return False, ('Configuration for log beacon ' + 'must contain file option.') return True, 'Valid beacon configuration' @@ -67,20 +75,24 @@ def beacon(config): beacons: log: - file: - : - regex: + - file: + - tags: + : + regex: ''' + _config = {} + list(map(_config.update, config)) + ret = [] - if 'file' not in config: + if 'file' not in _config: event = SKEL.copy() event['tag'] = 'global' event['error'] = 'file not defined in config' ret.append(event) return ret - with salt.utils.files.fopen(config['file'], 'r') as fp_: + with salt.utils.files.fopen(_config['file'], 'r') as fp_: loc = __context__.get(LOC_KEY, 0) if loc == 0: fp_.seek(0, 2) @@ -92,16 +104,17 @@ def beacon(config): fp_.seek(loc) txt = fp_.read() + log.info('txt {}'.format(txt)) d = {} - for tag in config: - if 'regex' not in config[tag]: + for tag in _config.get('tags', {}): + if 'regex' not in _config['tags'][tag]: continue - if len(config[tag]['regex']) < 1: + if len(_config['tags'][tag]['regex']) < 1: continue try: - d[tag] = re.compile(r'{0}'.format(config[tag]['regex'])) - except Exception: + d[tag] = re.compile(r'{0}'.format(_config['tags'][tag]['regex'])) + except Exception as e: event = SKEL.copy() event['tag'] = tag event['error'] = 'bad regex' diff --git a/salt/beacons/memusage.py b/salt/beacons/memusage.py index d779f9c58e..a64dc0794d 100644 --- a/salt/beacons/memusage.py +++ b/salt/beacons/memusage.py @@ -11,6 +11,7 @@ Beacon to monitor memory usage. from __future__ import absolute_import import logging import re +from salt.ext.six.moves import map # Import Third Party Libs try: @@ -31,14 +32,22 @@ def __virtual__(): return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for memusage beacon should be a list of dicts - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for memusage ' - 'beacon must be a dictionary.') + 'beacon must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'percent' not in _config: + return False, ('Configuration for memusage beacon ' + 'requires percent.') + return True, 'Valid beacon configuration' @@ -46,7 +55,8 @@ def beacon(config): ''' Monitor the memory usage of the minion - Specify thresholds for percent used and only emit a beacon if it is exceeded. + Specify thresholds for percent used and only emit a beacon + if it is exceeded. .. code-block:: yaml @@ -55,15 +65,17 @@ def beacon(config): - percent: 63% ''' ret = [] - for memusage in config: - mount = memusage.keys()[0] - _current_usage = psutil.virtual_memory() - current_usage = _current_usage.percent - monitor_usage = memusage[mount] - if '%' in monitor_usage: - monitor_usage = re.sub('%', '', monitor_usage) - monitor_usage = float(monitor_usage) - if current_usage >= monitor_usage: - ret.append({'memusage': current_usage}) + _config = {} + list(map(_config.update, config)) + + _current_usage = psutil.virtual_memory() + + current_usage = _current_usage.percent + monitor_usage = _config['percent'] + if '%' in monitor_usage: + monitor_usage = re.sub('%', '', monitor_usage) + monitor_usage = float(monitor_usage) + if current_usage >= monitor_usage: + ret.append({'memusage': current_usage}) return ret diff --git a/salt/beacons/network_info.py b/salt/beacons/network_info.py index be3f82a30f..e259271339 100644 --- a/salt/beacons/network_info.py +++ b/salt/beacons/network_info.py @@ -16,6 +16,9 @@ try: HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False + +from salt.ext.six.moves import map + # pylint: enable=import-error log = logging.getLogger(__name__) @@ -45,7 +48,7 @@ def __virtual__(): return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' @@ -57,15 +60,19 @@ def __validate__(config): ] # Configuration for load beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for load beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for network_info beacon must be a list.') else: - for item in config: - if not isinstance(config[item], dict): - return False, ('Configuration for load beacon must ' - 'be a dictionary of dictionaries.') + + _config = {} + list(map(_config.update, config)) + + for item in _config.get('interfaces', {}): + if not isinstance(_config['interfaces'][item], dict): + return False, ('Configuration for network_info beacon must ' + 'be a list of dictionaries.') else: - if not any(j in VALID_ITEMS for j in config[item]): + if not any(j in VALID_ITEMS for j in _config['interfaces'][item]): return False, ('Invalid configuration item in ' 'Beacon configuration.') return True, 'Valid beacon configuration' @@ -86,16 +93,17 @@ def beacon(config): beacons: network_info: - - eth0: - type: equal - bytes_sent: 100000 - bytes_recv: 100000 - packets_sent: 100000 - packets_recv: 100000 - errin: 100 - errout: 100 - dropin: 100 - dropout: 100 + - interfaces: + eth0: + type: equal + bytes_sent: 100000 + bytes_recv: 100000 + packets_sent: 100000 + packets_recv: 100000 + errin: 100 + errout: 100 + dropin: 100 + dropout: 100 Emit beacon when any values are greater than configured values. @@ -104,46 +112,53 @@ def beacon(config): beacons: network_info: - - eth0: - type: greater - bytes_sent: 100000 - bytes_recv: 100000 - packets_sent: 100000 - packets_recv: 100000 - errin: 100 - errout: 100 - dropin: 100 - dropout: 100 + - interfaces: + eth0: + type: greater + bytes_sent: 100000 + bytes_recv: 100000 + packets_sent: 100000 + packets_recv: 100000 + errin: 100 + errout: 100 + dropin: 100 + dropout: 100 ''' ret = [] + _config = {} + list(map(_config.update, config)) + + log.debug('psutil.net_io_counters {}'.format(psutil.net_io_counters)) + _stats = psutil.net_io_counters(pernic=True) - for interface_config in config: - interface = interface_config.keys()[0] + log.debug('_stats {}'.format(_stats)) + for interface in _config.get('interfaces', {}): if interface in _stats: + interface_config = _config['interfaces'][interface] _if_stats = _stats[interface] _diff = False for attr in __attrs: - if attr in interface_config[interface]: - if 'type' in interface_config[interface] and \ - interface_config[interface]['type'] == 'equal': + if attr in interface_config: + if 'type' in interface_config and \ + interface_config['type'] == 'equal': if getattr(_if_stats, attr, None) == \ - int(interface_config[interface][attr]): + int(interface_config[attr]): _diff = True - elif 'type' in interface_config[interface] and \ - interface_config[interface]['type'] == 'greater': + elif 'type' in interface_config and \ + interface_config['type'] == 'greater': if getattr(_if_stats, attr, None) > \ - int(interface_config[interface][attr]): + int(interface_config[attr]): _diff = True else: log.debug('attr {}'.format(getattr(_if_stats, attr, None))) else: if getattr(_if_stats, attr, None) == \ - int(interface_config[interface][attr]): + int(interface_config[attr]): _diff = True if _diff: ret.append({'interface': interface, diff --git a/salt/beacons/network_settings.py b/salt/beacons/network_settings.py index 78c387b2f2..ad545f19a3 100644 --- a/salt/beacons/network_settings.py +++ b/salt/beacons/network_settings.py @@ -18,6 +18,8 @@ import ast import re import salt.loader import logging +from salt.ext.six.moves import map + log = logging.getLogger(__name__) __virtual_name__ = 'network_settings' @@ -45,22 +47,23 @@ def __virtual__(): return False -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for network_settings ' - 'beacon must be a dictionary.') + 'beacon must be a list.') else: - for item in config: - if item == 'coalesce': - continue - if not isinstance(config[item], dict): - return False, ('Configuration for network_settings beacon must be a ' - 'dictionary of dictionaries.') + _config = {} + list(map(_config.update, config)) + + for item in _config.get('interfaces', {}): + if not isinstance(_config['interfaces'][item], dict): + return False, ('Configuration for network_settings beacon ' + ' must be a list of dictionaries.') else: - if not all(j in ATTRS for j in config[item]): + if not all(j in ATTRS for j in _config['interfaces'][item]): return False, ('Invalid configuration item in Beacon ' 'configuration.') return True, 'Valid beacon configuration' @@ -75,9 +78,10 @@ def _copy_interfaces_info(interfaces): for interface in interfaces: _interface_attrs_cpy = set() for attr in ATTRS: - attr_dict = Hashabledict() - attr_dict[attr] = repr(interfaces[interface][attr]) - _interface_attrs_cpy.add(attr_dict) + if attr in interfaces[interface]: + attr_dict = Hashabledict() + attr_dict[attr] = repr(interfaces[interface][attr]) + _interface_attrs_cpy.add(attr_dict) ret[interface] = _interface_attrs_cpy return ret @@ -89,8 +93,8 @@ def beacon(config): By default, the beacon will emit when there is a value change on one of the settings on watch. The config also support the onvalue parameter for each - setting, which instruct the beacon to only emit if the setting changed to the - value defined. + setting, which instruct the beacon to only emit if the setting changed to + the value defined. Example Config @@ -98,12 +102,13 @@ def beacon(config): beacons: network_settings: - eth0: - ipaddr: - promiscuity: - onvalue: 1 - eth1: - linkmode: + - interfaces: + - eth0: + ipaddr: + promiscuity: + onvalue: 1 + - eth1: + linkmode: The config above will check for value changes on eth0 ipaddr and eth1 linkmode. It will also emit if the promiscuity value changes to 1. @@ -118,12 +123,16 @@ def beacon(config): beacons: network_settings: - coalesce: True - eth0: - ipaddr: - promiscuity: + - coalesce: True + - interfaces: + - eth0: + ipaddr: + promiscuity: ''' + _config = {} + list(map(_config.update, config)) + ret = [] interfaces = [] expanded_config = {} @@ -137,45 +146,51 @@ def beacon(config): if not LAST_STATS: LAST_STATS = _stats - if 'coalesce' in config and config['coalesce']: + if 'coalesce' in _config and _config['coalesce']: coalesce = True changes = {} + log.debug('_stats {}'.format(_stats)) # Get list of interfaces included in config that are registered in the # system, including interfaces defined by wildcards (eth*, wlan*) - for item in config: - if item == 'coalesce': - continue - if item in _stats: - interfaces.append(item) + for interface in _config.get('interfaces', {}): + if interface in _stats: + interfaces.append(interface) else: # No direct match, try with * wildcard regexp - interface_regexp = item.replace('*', '[0-9]+') + interface_regexp = interface.replace('*', '[0-9]+') for interface in _stats: match = re.search(interface_regexp, interface) if match: interfaces.append(match.group()) - expanded_config[match.group()] = config[item] + expanded_config[match.group()] = config['interfaces'][interface] if expanded_config: config.update(expanded_config) + # config updated so update _config + list(map(_config.update, config)) + + log.debug('interfaces {}'.format(interfaces)) for interface in interfaces: _send_event = False _diff_stats = _stats[interface] - LAST_STATS[interface] _ret_diff = {} + interface_config = _config['interfaces'][interface] + log.debug('_diff_stats {}'.format(_diff_stats)) if _diff_stats: _diff_stats_dict = {} LAST_STATS[interface] = _stats[interface] for item in _diff_stats: _diff_stats_dict.update(item) - for attr in config[interface]: + for attr in interface_config: if attr in _diff_stats_dict: config_value = None - if config[interface][attr] and 'onvalue' in config[interface][attr]: - config_value = config[interface][attr]['onvalue'] + if interface_config[attr] and \ + 'onvalue' in interface_config[attr]: + config_value = interface_config[attr]['onvalue'] new_value = ast.literal_eval(_diff_stats_dict[attr]) if not config_value or config_value == new_value: _send_event = True @@ -185,7 +200,9 @@ def beacon(config): if coalesce: changes[interface] = _ret_diff else: - ret.append({'tag': interface, 'interface': interface, 'change': _ret_diff}) + ret.append({'tag': interface, + 'interface': interface, + 'change': _ret_diff}) if coalesce and changes: grains_info = salt.loader.grains(__opts__, True) diff --git a/salt/beacons/pkg.py b/salt/beacons/pkg.py index 25cbddc9e7..b57b61f6a0 100644 --- a/salt/beacons/pkg.py +++ b/salt/beacons/pkg.py @@ -21,15 +21,25 @@ def __virtual__(): return __virtualname__ if 'pkg.upgrade_available' in __salt__ else False -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for pkg beacon should be a list - if not isinstance(config, dict): - return False, ('Configuration for pkg beacon must be a dictionary.') - if 'pkgs' not in config: - return False, ('Configuration for pkg beacon requires list of pkgs.') + if not isinstance(config, list): + return False, ('Configuration for pkg beacon must be a list.') + + # Configuration for pkg beacon should contain pkgs + pkgs_found = False + pkgs_not_list = False + for config_item in config: + if 'pkgs' in config_item: + pkgs_found = True + if isinstance(config_item['pkgs'], list): + pkgs_not_list = True + + if not pkgs_found or not pkgs_not_list: + return False, 'Configuration for pkg beacon requires list of pkgs.' return True, 'Valid beacon configuration' @@ -48,14 +58,16 @@ def beacon(config): - refresh: True ''' ret = [] - _validate = __validate__(config) - if not _validate[0]: - return ret _refresh = False - if 'refresh' in config and config['refresh']: - _refresh = True - for pkg in config['pkgs']: + pkgs = [] + for config_item in config: + if 'pkgs' in config_item: + pkgs += config_item['pkgs'] + if 'refresh' in config and config['refresh']: + _refresh = True + + for pkg in pkgs: _installed = __salt__['pkg.version'](pkg) _latest = __salt__['pkg.latest_version'](pkg, refresh=_refresh) if _installed and _latest: diff --git a/salt/beacons/proxy_example.py b/salt/beacons/proxy_example.py index 329dae8671..1a93db93c8 100644 --- a/salt/beacons/proxy_example.py +++ b/salt/beacons/proxy_example.py @@ -15,6 +15,7 @@ import logging # Import salt libs import salt.utils.http +from salt.ext.six.moves import map # Important: If used with salt-proxy # this is required for the beacon to load!!! @@ -33,12 +34,12 @@ def __virtual__(): return True -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): - return False, ('Configuration for rest_example beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for proxy_example beacon must be a list.') return True, 'Valid beacon configuration' @@ -51,7 +52,7 @@ def beacon(config): beacons: proxy_example: - endpoint: beacon + - endpoint: beacon ''' # Important!!! # Although this toy example makes an HTTP call @@ -59,8 +60,11 @@ def beacon(config): # please be advised that doing CPU or IO intensive # operations in this method will cause the beacon loop # to block. + _config = {} + list(map(_config.update, config)) + beacon_url = '{0}{1}'.format(__opts__['proxy']['url'], - config['endpoint']) + _config['endpoint']) ret = salt.utils.http.query(beacon_url, decode_type='json', decode=True) diff --git a/salt/beacons/ps.py b/salt/beacons/ps.py index 38b1a91b04..b1f18431f4 100644 --- a/salt/beacons/ps.py +++ b/salt/beacons/ps.py @@ -14,6 +14,9 @@ try: HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False + +from salt.ext.six.moves import map + # pylint: enable=import-error log = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -23,17 +26,27 @@ __virtualname__ = 'ps' def __virtual__(): if not HAS_PSUTIL: - return (False, 'cannot load network_info beacon: psutil not available') + return (False, 'cannot load ps beacon: psutil not available') return __virtualname__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for ps beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for ps beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for ps beacon must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'processes' not in _config: + return False, ('Configuration for ps beacon requires processes.') + else: + if not isinstance(_config['processes'], dict): + return False, ('Processes for ps beacon must be a dictionary.') + return True, 'Valid beacon configuration' @@ -47,8 +60,9 @@ def beacon(config): beacons: ps: - - salt-master: running - - mysql: stopped + - processes: + salt-master: running + mysql: stopped The config above sets up beacons to check that processes are running or stopped. @@ -60,19 +74,21 @@ def beacon(config): if _name not in procs: procs.append(_name) - for entry in config: - for process in entry: - ret_dict = {} - if entry[process] == 'running': - if process in procs: - ret_dict[process] = 'Running' - ret.append(ret_dict) - elif entry[process] == 'stopped': - if process not in procs: - ret_dict[process] = 'Stopped' - ret.append(ret_dict) - else: - if process not in procs: - ret_dict[process] = False - ret.append(ret_dict) + _config = {} + list(map(_config.update, config)) + + for process in _config.get('processes', {}): + ret_dict = {} + if _config['processes'][process] == 'running': + if process in procs: + ret_dict[process] = 'Running' + ret.append(ret_dict) + elif _config['processes'][process] == 'stopped': + if process not in procs: + ret_dict[process] = 'Stopped' + ret.append(ret_dict) + else: + if process not in procs: + ret_dict[process] = False + ret.append(ret_dict) return ret diff --git a/salt/beacons/salt_proxy.py b/salt/beacons/salt_proxy.py index 8ad6acd574..f542d8b0dd 100644 --- a/salt/beacons/salt_proxy.py +++ b/salt/beacons/salt_proxy.py @@ -9,6 +9,7 @@ # Import python libs from __future__ import absolute_import import logging +from salt.ext.six.moves import map log = logging.getLogger(__name__) @@ -20,9 +21,7 @@ def _run_proxy_processes(proxies): aren't running ''' ret = [] - for prox_ in proxies: - # prox_ is a dict - proxy = prox_.keys()[0] + for proxy in proxies: result = {} if not __salt__['salt_proxy.is_running'](proxy)['result']: __salt__['salt_proxy.configure_proxy'](proxy, start=True) @@ -35,7 +34,29 @@ def _run_proxy_processes(proxies): return ret -def beacon(proxies): +def validate(config): + ''' + Validate the beacon configuration + ''' + # Configuration for adb beacon should be a dictionary with states array + if not isinstance(config, list): + log.info('Configuration for salt_proxy beacon must be a list.') + return False, ('Configuration for salt_proxy beacon must be a list.') + + else: + _config = {} + list(map(_config.update, config)) + + if 'proxies' not in _config: + return False, ('Configuration for salt_proxy' + ' beacon requires proxies.') + else: + if not isinstance(_config['proxies'], dict): + return False, ('Proxies for salt_proxy ' + 'beacon must be a dictionary.') + + +def beacon(config): ''' Handle configured proxies @@ -43,9 +64,13 @@ def beacon(proxies): beacons: salt_proxy: - - p8000: {} - - p8001: {} + - proxies: + p8000: {} + p8001: {} ''' log.trace('salt proxy beacon called') - return _run_proxy_processes(proxies) + _config = {} + list(map(_config.update, config)) + + return _run_proxy_processes(_config['proxies']) diff --git a/salt/beacons/sensehat.py b/salt/beacons/sensehat.py index 65f2ff134b..0784bb2bfd 100644 --- a/salt/beacons/sensehat.py +++ b/salt/beacons/sensehat.py @@ -11,6 +11,7 @@ of a Raspberry Pi. from __future__ import absolute_import import logging import re +from salt.ext.six.moves import map log = logging.getLogger(__name__) @@ -19,14 +20,22 @@ def __virtual__(): return 'sensehat.get_pressure' in __salt__ -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - # Configuration for sensehat beacon should be a dict - if not isinstance(config, dict): + # Configuration for sensehat beacon should be a list + if not isinstance(config, list): return False, ('Configuration for sensehat beacon ' - 'must be a dictionary.') + 'must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'sensors' not in _config: + return False, ('Configuration for sensehat' + ' beacon requires sensors.') + return True, 'Valid beacon configuration' @@ -48,10 +57,11 @@ def beacon(config): beacons: sensehat: - humidity: 70% - temperature: [20, 40] - temperature_from_pressure: 40 - pressure: 1500 + - sensors: + humidity: 70% + temperature: [20, 40] + temperature_from_pressure: 40 + pressure: 1500 ''' ret = [] min_default = { @@ -60,13 +70,16 @@ def beacon(config): 'temperature': '-273.15' } - for sensor in config: + _config = {} + list(map(_config.update, config)) + + for sensor in _config.get('sensors', {}): sensor_function = 'sensehat.get_{0}'.format(sensor) if sensor_function not in __salt__: log.error('No sensor for meassuring {0}. Skipping.'.format(sensor)) continue - sensor_config = config[sensor] + sensor_config = _config['sensors'][sensor] if isinstance(sensor_config, list): sensor_min = str(sensor_config[0]) sensor_max = str(sensor_config[1]) diff --git a/salt/beacons/service.py b/salt/beacons/service.py index 83ab484494..701f8e4cd0 100644 --- a/salt/beacons/service.py +++ b/salt/beacons/service.py @@ -9,19 +9,35 @@ from __future__ import absolute_import import os import logging import time +from salt.ext.six.moves import map log = logging.getLogger(__name__) # pylint: disable=invalid-name LAST_STATUS = {} +__virtualname__ = 'service' -def __validate__(config): + +def validate(config): ''' Validate the beacon configuration ''' # Configuration for service beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for service beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for service beacon must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if 'services' not in _config: + return False, ('Configuration for service beacon' + ' requires services.') + else: + for config_item in _config['services']: + if not isinstance(_config['services'][config_item], dict): + return False, ('Configuration for service beacon must ' + 'be a list of dictionaries.') + return True, 'Valid beacon configuration' @@ -35,8 +51,9 @@ def beacon(config): beacons: service: - salt-master: - mysql: + - services: + salt-master: + mysql: The config above sets up beacons to check for the salt-master and mysql services. @@ -79,14 +96,21 @@ def beacon(config): beacons: service: - nginx: - onchangeonly: True - delay: 30 - uncleanshutdown: /run/nginx.pid + - services: + nginx: + onchangeonly: True + delay: 30 + uncleanshutdown: /run/nginx.pid ''' ret = [] - for service in config: + _config = {} + list(map(_config.update, config)) + + for service in _config.get('services', {}): ret_dict = {} + + service_config = _config['services'][service] + ret_dict[service] = {'running': __salt__['service.status'](service)} ret_dict['service_name'] = service ret_dict['tag'] = service @@ -95,40 +119,43 @@ def beacon(config): # If no options is given to the service, we fall back to the defaults # assign a False value to oncleanshutdown and onchangeonly. Those # key:values are then added to the service dictionary. - if 'oncleanshutdown' not in config[service]: - config[service]['oncleanshutdown'] = False - if 'emitatstartup' not in config[service]: - config[service]['emitatstartup'] = True - if 'onchangeonly' not in config[service]: - config[service]['onchangeonly'] = False - if 'delay' not in config[service]: - config[service]['delay'] = 0 + if not service_config: + service_config = {} + if 'oncleanshutdown' not in service_config: + service_config['oncleanshutdown'] = False + if 'emitatstartup' not in service_config: + service_config['emitatstartup'] = True + if 'onchangeonly' not in service_config: + service_config['onchangeonly'] = False + if 'delay' not in service_config: + service_config['delay'] = 0 # We only want to report the nature of the shutdown # if the current running status is False # as well as if the config for the beacon asks for it - if 'uncleanshutdown' in config[service] and not ret_dict[service]['running']: - filename = config[service]['uncleanshutdown'] + if 'uncleanshutdown' in service_config and not ret_dict[service]['running']: + filename = service_config['uncleanshutdown'] ret_dict[service]['uncleanshutdown'] = True if os.path.exists(filename) else False - if 'onchangeonly' in config[service] and config[service]['onchangeonly'] is True: + if 'onchangeonly' in service_config and service_config['onchangeonly'] is True: if service not in LAST_STATUS: LAST_STATUS[service] = ret_dict[service] - if config[service]['delay'] > 0: + if service_config['delay'] > 0: LAST_STATUS[service]['time'] = currtime - elif not config[service]['emitatstartup']: + elif not service_config['emitatstartup']: continue else: ret.append(ret_dict) + if LAST_STATUS[service]['running'] != ret_dict[service]['running']: LAST_STATUS[service] = ret_dict[service] - if config[service]['delay'] > 0: + if service_config['delay'] > 0: LAST_STATUS[service]['time'] = currtime else: ret.append(ret_dict) if 'time' in LAST_STATUS[service]: elapsedtime = int(round(currtime - LAST_STATUS[service]['time'])) - if elapsedtime > config[service]['delay']: + if elapsedtime > service_config['delay']: del LAST_STATUS[service]['time'] ret.append(ret_dict) else: diff --git a/salt/beacons/sh.py b/salt/beacons/sh.py index c55462bd5d..7375a1352a 100644 --- a/salt/beacons/sh.py +++ b/salt/beacons/sh.py @@ -9,6 +9,7 @@ import time # Import salt libs import salt.utils +import salt.utils.path import salt.utils.vt __virtualname__ = 'sh' @@ -21,7 +22,7 @@ def __virtual__(): ''' Only load if strace is installed ''' - return __virtualname__ if salt.utils.which('strace') else False + return __virtualname__ if salt.utils.path.which('strace') else False def _get_shells(): @@ -40,13 +41,13 @@ def _get_shells(): return __context__['sh.shells'] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for sh beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for sh beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for sh beacon must be a list.') return True, 'Valid beacon configuration' @@ -57,7 +58,7 @@ def beacon(config): .. code-block:: yaml beacons: - sh: {} + sh: [] ''' ret = [] pkey = 'sh.vt' diff --git a/salt/beacons/status.py b/salt/beacons/status.py index bf1f5b3688..c06793a039 100644 --- a/salt/beacons/status.py +++ b/salt/beacons/status.py @@ -15,7 +15,7 @@ the minion config: .. code-block:: yaml beacons: - status: {} + status: [] By default, all of the information from the following execution module functions will be returned: @@ -96,19 +96,19 @@ import datetime import salt.exceptions # Import salt libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'status' -def __validate__(config): +def validate(config): ''' Validate the the config is a dict ''' - if not isinstance(config, dict): - return False, ('Configuration for status beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for status beacon must be a list.') return True, 'Valid beacon configuration' @@ -123,7 +123,7 @@ def beacon(config): log.debug(config) ctime = datetime.datetime.utcnow().isoformat() ret = {} - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return [{ 'tag': ctime, 'data': ret, diff --git a/salt/beacons/telegram_bot_msg.py b/salt/beacons/telegram_bot_msg.py index 2202641071..19ca7ea37b 100644 --- a/salt/beacons/telegram_bot_msg.py +++ b/salt/beacons/telegram_bot_msg.py @@ -1,11 +1,15 @@ # -*- coding: utf-8 -*- ''' Beacon to emit Telegram messages + +Requires the python-telegram-bot library + ''' # Import Python libs from __future__ import absolute_import import logging +from salt.ext.six.moves import map # Import 3rd Party libs try: @@ -28,20 +32,23 @@ def __virtual__(): return False -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for telegram_bot_msg ' - 'beacon must be a dictionary.') + 'beacon must be a list.') - if not all(config.get(required_config) + _config = {} + list(map(_config.update, config)) + + if not all(_config.get(required_config) for required_config in ['token', 'accept_from']): return False, ('Not all required configuration for ' 'telegram_bot_msg are set.') - if not isinstance(config.get('accept_from'), list): + if not isinstance(_config.get('accept_from'), list): return False, ('Configuration for telegram_bot_msg, ' 'accept_from must be a list of usernames.') @@ -57,18 +64,22 @@ def beacon(config): beacons: telegram_bot_msg: - token: "" - accept_from: + - token: "" + - accept_from: - "" - interval: 10 + - interval: 10 ''' + + _config = {} + list(map(_config.update, config)) + log.debug('telegram_bot_msg beacon starting') ret = [] output = {} output['msgs'] = [] - bot = telegram.Bot(config['token']) + bot = telegram.Bot(_config['token']) updates = bot.get_updates(limit=100, timeout=0, network_delay=10) log.debug('Num updates: {0}'.format(len(updates))) @@ -83,7 +94,7 @@ def beacon(config): if update.update_id > latest_update_id: latest_update_id = update.update_id - if message.chat.username in config['accept_from']: + if message.chat.username in _config['accept_from']: output['msgs'].append(message.to_dict()) # mark in the server that previous messages are processed diff --git a/salt/beacons/twilio_txt_msg.py b/salt/beacons/twilio_txt_msg.py index 29bb6eab7f..3034add116 100644 --- a/salt/beacons/twilio_txt_msg.py +++ b/salt/beacons/twilio_txt_msg.py @@ -6,10 +6,17 @@ Beacon to emit Twilio text messages # Import Python libs from __future__ import absolute_import import logging +from salt.ext.six.moves import map # Import 3rd Party libs try: - from twilio.rest import TwilioRestClient + import twilio + # Grab version, ensure elements are ints + twilio_version = tuple([int(x) for x in twilio.__version_info__]) + if twilio_version > (5, ): + from twilio.rest import Client as TwilioRestClient + else: + from twilio.rest import TwilioRestClient HAS_TWILIO = True except ImportError: HAS_TWILIO = False @@ -26,14 +33,24 @@ def __virtual__(): return False -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for twilio_txt_msg beacon should be a list of dicts - if not isinstance(config, dict): + if not isinstance(config, list): return False, ('Configuration for twilio_txt_msg beacon ' - 'must be a dictionary.') + 'must be a list.') + else: + _config = {} + list(map(_config.update, config)) + + if not all(x in _config for x in ('account_sid', + 'auth_token', + 'twilio_number')): + return False, ('Configuration for twilio_txt_msg beacon ' + 'must contain account_sid, auth_token ' + 'and twilio_number items.') return True, 'Valid beacon configuration' @@ -46,20 +63,26 @@ def beacon(config): beacons: twilio_txt_msg: - account_sid: "" - auth_token: "" - twilio_number: "+15555555555" - interval: 10 + - account_sid: "" + - auth_token: "" + - twilio_number: "+15555555555" + - interval: 10 ''' log.trace('twilio_txt_msg beacon starting') + + _config = {} + list(map(_config.update, config)) + ret = [] - if not all([config['account_sid'], config['auth_token'], config['twilio_number']]): + if not all([_config['account_sid'], + _config['auth_token'], + _config['twilio_number']]): return ret output = {} output['texts'] = [] - client = TwilioRestClient(config['account_sid'], config['auth_token']) - messages = client.messages.list(to=config['twilio_number']) + client = TwilioRestClient(_config['account_sid'], _config['auth_token']) + messages = client.messages.list(to=_config['twilio_number']) log.trace('Num messages: {0}'.format(len(messages))) if len(messages) < 1: log.trace('Twilio beacon has no texts') diff --git a/salt/beacons/wtmp.py b/salt/beacons/wtmp.py index 1c0788d848..c10a335e0c 100644 --- a/salt/beacons/wtmp.py +++ b/salt/beacons/wtmp.py @@ -5,7 +5,7 @@ Beacon to fire events at login of users as registered in the wtmp file .. code-block:: yaml beacons: - wtmp: {} + wtmp: [] ''' # Import Python libs @@ -16,6 +16,9 @@ import struct # Import salt libs import salt.utils.files +# Import 3rd-party libs +from salt.ext import six + __virtualname__ = 'wtmp' WTMP = '/var/log/wtmp' FMT = 'hi32s4s32s256shhiii4i20x' @@ -52,13 +55,13 @@ def _get_loc(): return __context__[LOC_KEY] -def __validate__(config): +def validate(config): ''' Validate the beacon configuration ''' # Configuration for wtmp beacon should be a list of dicts - if not isinstance(config, dict): - return False, ('Configuration for wtmp beacon must be a dictionary.') + if not isinstance(config, list): + return False, ('Configuration for wtmp beacon must be a list.') return True, 'Valid beacon configuration' @@ -70,7 +73,7 @@ def beacon(config): .. code-block:: yaml beacons: - wtmp: {} + wtmp: [] ''' ret = [] with salt.utils.files.fopen(WTMP, 'rb') as fp_: @@ -90,7 +93,7 @@ def beacon(config): event = {} for ind, field in enumerate(FIELDS): event[field] = pack[ind] - if isinstance(event[field], str): + if isinstance(event[field], six.string_types): event[field] = event[field].strip('\x00') ret.append(event) return ret diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py index fba83e9792..fc6c9da251 100644 --- a/salt/cache/redis_cache.py +++ b/salt/cache/redis_cache.py @@ -67,6 +67,29 @@ host: ``localhost`` port: ``6379`` The Redis server port. +cluster_mode: ``False`` + Whether cluster_mode is enabled or not + +cluster.startup_nodes: + A list of host, port dictionaries pointing to cluster members. At least one is required + but multiple nodes are better + + .. code-block::yaml + + cache.redis.cluster.startup_nodes + - host: redis-member-1 + port: 6379 + - host: redis-member-2 + port: 6379 + +cluster.skip_full_coverage_check: ``False`` + Some cluster providers restrict certain redis commands such as CONFIG for enhanced security. + Set this option to true to skip checks that required advanced privileges. + + .. note:: + + Most cloud hosted redis clusters will require this to be set to ``True`` + db: ``'0'`` The database index. @@ -89,6 +112,24 @@ Configuration Example: cache.redis.bank_keys_prefix: #BANKEYS cache.redis.key_prefix: #KEY cache.redis.separator: '@' + +Cluster Configuration Example: + +.. code-block::yaml + + cache.redis.cluster_mode: true + cache.redis.cluster.skip_full_coverage_check: true + cache.redis.cluster.startup_nodes: + - host: redis-member-1 + port: 6379 + - host: redis-member-2 + port: 6379 + cache.redis.db: '0' + cache.redis.password: my pass + cache.redis.bank_prefix: #BANK + cache.redis.bank_keys_prefix: #BANKEYS + cache.redis.key_prefix: #KEY + cache.redis.separator: '@' ''' from __future__ import absolute_import @@ -105,6 +146,12 @@ try: except ImportError: HAS_REDIS = False +try: + from rediscluster import StrictRedisCluster + HAS_REDIS_CLUSTER = True +except ImportError: + HAS_REDIS_CLUSTER = False + # Import salt from salt.ext.six.moves import range from salt.exceptions import SaltCacheError @@ -115,7 +162,7 @@ from salt.exceptions import SaltCacheError __virtualname__ = 'redis' __func_alias__ = { - 'list_': 'list' + 'ls': 'list' } log = logging.getLogger(__file__) @@ -135,9 +182,13 @@ REDIS_SERVER = None def __virtual__(): ''' The redis library must be installed for this module to work. + + The redis redis cluster library must be installed if cluster_mode is True ''' if not HAS_REDIS: return (False, "Please install the python-redis package.") + if not HAS_REDIS_CLUSTER and _get_redis_cache_opts()['cluster_mode']: + return (False, "Please install the redis-py-cluster package.") return __virtualname__ @@ -145,6 +196,9 @@ def __virtual__(): # helper functions -- will not be exported # ----------------------------------------------------------------------------- +def init_kwargs(kwargs): + return {} + def _get_redis_cache_opts(): ''' @@ -154,7 +208,10 @@ def _get_redis_cache_opts(): 'host': __opts__.get('cache.redis.host', 'localhost'), 'port': __opts__.get('cache.redis.port', 6379), 'db': __opts__.get('cache.redis.db', '0'), - 'password': __opts__.get('cache.redis.password', '') + 'password': __opts__.get('cache.redis.password', ''), + 'cluster_mode': __opts__.get('cache.redis.cluster_mode', False), + 'startup_nodes': __opts__.get('cache.redis.cluster.startup_nodes', {}), + 'skip_full_coverage_check': __opts__.get('cache.redis.cluster.skip_full_coverage_check', False), } @@ -168,10 +225,16 @@ def _get_redis_server(opts=None): return REDIS_SERVER if not opts: opts = _get_redis_cache_opts() - REDIS_SERVER = redis.Redis(opts['host'], - opts['port'], - db=opts['db'], - password=opts['password']) + + if opts['cluster_mode']: + REDIS_SERVER = StrictRedisCluster(startup_nodes=opts['startup_nodes'], + skip_full_coverage_check=opts['skip_full_coverage_check'], + decode_responses=True) + else: + REDIS_SERVER = redis.StrictRedis(opts['host'], + opts['port'], + db=opts['db'], + password=opts['password']) return REDIS_SERVER @@ -415,7 +478,7 @@ def flush(bank, key=None): return True -def list_(bank): +def ls(bank): ''' Lists entries stored in the specified bank. ''' diff --git a/salt/cli/batch.py b/salt/cli/batch.py index 3617b717a5..55880c3b83 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py @@ -11,14 +11,14 @@ import copy from datetime import datetime, timedelta # Import salt libs +import salt.utils # Can be removed once print_cli is moved import salt.client import salt.output import salt.exceptions -from salt.utils import print_cli # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: enable=import-error,no-name-in-module,redefined-builtin import logging @@ -73,7 +73,7 @@ class Batch(object): m = next(six.iterkeys(ret)) except StopIteration: if not self.quiet: - print_cli('No minions matched the target.') + salt.utils.print_cli('No minions matched the target.') break if m is not None: fret.add(m) @@ -95,7 +95,7 @@ class Batch(object): return int(self.opts['batch']) except ValueError: if not self.quiet: - print_cli('Invalid batch data sent: {0}\nData must be in the ' + salt.utils.print_cli('Invalid batch data sent: {0}\nData must be in the ' 'form of %10, 10% or 3'.format(self.opts['batch'])) def __update_wait(self, wait): @@ -146,7 +146,7 @@ class Batch(object): # We already know some minions didn't respond to the ping, so inform # the user we won't be attempting to run a job on them for down_minion in self.down_minions: - print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion)) + salt.utils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion)) # Iterate while we still have things to execute while len(ret) < len(self.minions): @@ -171,7 +171,7 @@ class Batch(object): if next_: if not self.quiet: - print_cli('\nExecuting run on {0}\n'.format(sorted(next_))) + salt.utils.print_cli('\nExecuting run on {0}\n'.format(sorted(next_))) # create a new iterator for this batch of minions new_iter = self.local.cmd_iter_no_block( *args, @@ -218,14 +218,14 @@ class Batch(object): if part['data']['id'] in minion_tracker[queue]['minions']: minion_tracker[queue]['minions'].remove(part['data']['id']) else: - print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id'])) + salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id'])) else: parts.update(part) for id in part: if id in minion_tracker[queue]['minions']: minion_tracker[queue]['minions'].remove(id) else: - print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id)) + salt.utils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id)) except StopIteration: # if a iterator is done: # - set it to inactive diff --git a/salt/cli/call.py b/salt/cli/call.py index b7cc235deb..3fb49348e8 100644 --- a/salt/cli/call.py +++ b/salt/cli/call.py @@ -1,16 +1,15 @@ # -*- coding: utf-8 -*- -from __future__ import print_function -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os -from salt.utils import parsers +import salt.utils.parsers from salt.utils.verify import verify_log from salt.config import _expand_glob_path import salt.cli.caller import salt.defaults.exitcodes -class SaltCall(parsers.SaltCallOptionParser): +class SaltCall(salt.utils.parsers.SaltCallOptionParser): ''' Used to locally execute a salt command ''' diff --git a/salt/cli/caller.py b/salt/cli/caller.py index 10938a34cb..75025ea473 100644 --- a/salt/cli/caller.py +++ b/salt/cli/caller.py @@ -20,19 +20,17 @@ import salt.minion import salt.output import salt.payload import salt.transport +import salt.utils # Can be removed once print_cli, activate_profile, and output_profile are moved import salt.utils.args import salt.utils.files import salt.utils.jid +import salt.utils.kinds as kinds import salt.utils.minion import salt.defaults.exitcodes -from salt.log import LOG_LEVELS -from salt.utils import is_windows -from salt.utils import print_cli -from salt.utils import kinds -from salt.utils import activate_profile -from salt.utils import output_profile -from salt.utils.process import MultiprocessingProcess from salt.cli import daemons +from salt.log import LOG_LEVELS +from salt.utils.platform import is_windows +from salt.utils.process import MultiprocessingProcess try: from raet import raeting, nacling @@ -47,7 +45,7 @@ except ImportError: pass # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Custom exceptions from salt.exceptions import ( @@ -115,7 +113,7 @@ class BaseCaller(object): docs[name] = func.__doc__ for name in sorted(docs): if name.startswith(self.opts.get('fun', '')): - print_cli('{0}:\n{1}\n'.format(name, docs[name])) + salt.utils.print_cli('{0}:\n{1}\n'.format(name, docs[name])) def print_grains(self): ''' @@ -130,14 +128,14 @@ class BaseCaller(object): ''' profiling_enabled = self.opts.get('profiling_enabled', False) try: - pr = activate_profile(profiling_enabled) + pr = salt.utils.activate_profile(profiling_enabled) try: ret = self.call() finally: - output_profile(pr, - stats_path=self.opts.get('profiling_path', - '/tmp/stats'), - stop=True) + salt.utils.output_profile( + pr, + stats_path=self.opts.get('profiling_path', '/tmp/stats'), + stop=True) out = ret.get('out', 'nested') if self.opts['print_metadata']: print_ret = ret @@ -165,6 +163,12 @@ class BaseCaller(object): ret['jid'] ) if fun not in self.minion.functions: + docs = self.minion.functions['sys.doc']('{0}*'.format(fun)) + if docs: + docs[fun] = self.minion.functions.missing_fun_string(fun) + ret['out'] = 'nested' + ret['return'] = docs + return ret sys.stderr.write(self.minion.functions.missing_fun_string(fun)) mod_name = fun.split('.')[0] if mod_name in self.minion.function_errors: @@ -205,7 +209,7 @@ class BaseCaller(object): ret['return'] = func(*args, **kwargs) except TypeError as exc: sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc)) - print_cli(func.__doc__) + salt.utils.print_cli(func.__doc__) active_level = LOG_LEVELS.get( self.opts['log_level'].lower(), logging.ERROR) if active_level <= logging.DEBUG: diff --git a/salt/cli/cp.py b/salt/cli/cp.py index dd1d17e5e8..d4259df45c 100644 --- a/salt/cli/cp.py +++ b/salt/cli/cp.py @@ -21,7 +21,9 @@ import salt.client import salt.utils.gzip_util import salt.utils.itertools import salt.utils.minions -from salt.utils import parsers, to_bytes +import salt.utils.parsers +import salt.utils.platform +import salt.utils.stringutils from salt.utils.verify import verify_log import salt.output @@ -31,7 +33,7 @@ from salt.ext import six log = logging.getLogger(__name__) -class SaltCPCli(parsers.SaltCPOptionParser): +class SaltCPCli(salt.utils.parsers.SaltCPOptionParser): ''' Run the salt-cp command line client ''' @@ -56,7 +58,7 @@ class SaltCP(object): ''' def __init__(self, opts): self.opts = opts - self.is_windows = salt.utils.is_windows() + self.is_windows = salt.utils.platform.is_windows() def _mode(self, path): if self.is_windows: @@ -152,7 +154,7 @@ class SaltCP(object): index = 1 failed = {} for chunk in reader(fn_, chunk_size=self.opts['salt_cp_chunk_size']): - chunk = base64.b64encode(to_bytes(chunk)) + chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk)) append = index > 1 log.debug( 'Copying %s to %starget \'%s\' as %s%s', diff --git a/salt/cli/daemons.py b/salt/cli/daemons.py index a510ad60c1..82828c47bd 100644 --- a/salt/cli/daemons.py +++ b/salt/cli/daemons.py @@ -41,10 +41,11 @@ import salt.log.setup # the try block below bypasses an issue at build time so that modules don't # cause the build to fail from salt.utils import migrations -from salt.utils import kinds +import salt.utils.kinds as kinds try: - from salt.utils import parsers, ip_bracket + from salt.utils import ip_bracket + import salt.utils.parsers from salt.utils.verify import check_user, verify_env, verify_socket except ImportError as exc: if exc.args[0] != 'No module named _msgpack': @@ -109,7 +110,7 @@ class DaemonsMixin(object): # pylint: disable=no-init self.shutdown(error) -class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-init +class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-init ''' Creates a master server ''' @@ -220,7 +221,7 @@ class Master(parsers.MasterOptionParser, DaemonsMixin): # pylint: disable=no-in super(Master, self).shutdown(exitcode, exitmsg) -class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-init +class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-init ''' Create a minion server ''' @@ -398,7 +399,7 @@ class Minion(parsers.MinionOptionParser, DaemonsMixin): # pylint: disable=no-in # pylint: enable=no-member -class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: disable=no-init +class ProxyMinion(salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: disable=no-init ''' Create a proxy minion server ''' @@ -549,7 +550,7 @@ class ProxyMinion(parsers.ProxyMinionOptionParser, DaemonsMixin): # pylint: dis # pylint: enable=no-member -class Syndic(parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-init +class Syndic(salt.utils.parsers.SyndicOptionParser, DaemonsMixin): # pylint: disable=no-init ''' Create a syndic server ''' diff --git a/salt/cli/key.py b/salt/cli/key.py index 1454a2b43c..42622c4078 100644 --- a/salt/cli/key.py +++ b/salt/cli/key.py @@ -3,11 +3,11 @@ from __future__ import print_function from __future__ import absolute_import -from salt.utils import parsers +import salt.utils.parsers from salt.utils.verify import check_user, verify_log -class SaltKey(parsers.SaltKeyOptionParser): +class SaltKey(salt.utils.parsers.SaltKeyOptionParser): ''' Initialize the Salt key manager ''' diff --git a/salt/cli/run.py b/salt/cli/run.py index ed8cf9324c..c780681368 100644 --- a/salt/cli/run.py +++ b/salt/cli/run.py @@ -2,15 +2,14 @@ from __future__ import print_function from __future__ import absolute_import -from salt.utils import parsers -from salt.utils import activate_profile -from salt.utils import output_profile +import salt.utils # Can be removed once activate_profile and output_profile are moved +import salt.utils.parsers from salt.utils.verify import check_user, verify_log from salt.exceptions import SaltClientError import salt.defaults.exitcodes # pylint: disable=W0611 -class SaltRun(parsers.SaltRunOptionParser): +class SaltRun(salt.utils.parsers.SaltRunOptionParser): ''' Used to execute Salt runners ''' @@ -36,7 +35,7 @@ class SaltRun(parsers.SaltRunOptionParser): # someone tries to use the runners via the python API try: if check_user(self.config['user']): - pr = activate_profile(profiling_enabled) + pr = salt.utils.activate_profile(profiling_enabled) try: ret = runner.run() # In older versions ret['data']['retcode'] was used @@ -50,7 +49,7 @@ class SaltRun(parsers.SaltRunOptionParser): elif isinstance(ret, dict) and 'retcode' in ret.get('data', {}): self.exit(ret['data']['retcode']) finally: - output_profile( + salt.utils.output_profile( pr, stats_path=self.options.profiling_path, stop=True) diff --git a/salt/cli/salt.py b/salt/cli/salt.py index bec851cfad..24e3a2ecd4 100644 --- a/salt/cli/salt.py +++ b/salt/cli/salt.py @@ -7,8 +7,8 @@ sys.modules['pkg_resources'] = None import os # Import Salt libs -from salt.ext.six import string_types -from salt.utils import parsers, print_cli +import salt.utils # Can be removed once print_cli is moved +import salt.utils.parsers from salt.utils.args import yamlify_arg from salt.utils.verify import verify_log from salt.exceptions import ( @@ -18,10 +18,10 @@ from salt.exceptions import ( ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six -class SaltCMD(parsers.SaltCMDOptionParser): +class SaltCMD(salt.utils.parsers.SaltCMDOptionParser): ''' The execution of a salt command happens here ''' @@ -93,7 +93,7 @@ class SaltCMD(parsers.SaltCMDOptionParser): # potentially switch to batch execution if self.options.batch_safe_limit > 1: if len(self._preview_target()) >= self.options.batch_safe_limit: - print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.') + salt.utils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.') self.options.batch = self.options.batch_safe_size self._run_batch() return @@ -140,7 +140,7 @@ class SaltCMD(parsers.SaltCMDOptionParser): if self.config['async']: jid = self.local_client.cmd_async(**kwargs) - print_cli('Executed command with job ID: {0}'.format(jid)) + salt.utils.print_cli('Executed command with job ID: {0}'.format(jid)) return # local will be None when there was an error @@ -279,12 +279,12 @@ class SaltCMD(parsers.SaltCMDOptionParser): def _print_errors_summary(self, errors): if errors: - print_cli('\n') - print_cli('---------------------------') - print_cli('Errors') - print_cli('---------------------------') + salt.utils.print_cli('\n') + salt.utils.print_cli('---------------------------') + salt.utils.print_cli('Errors') + salt.utils.print_cli('---------------------------') for error in errors: - print_cli(self._format_error(error)) + salt.utils.print_cli(self._format_error(error)) def _print_returns_summary(self, ret): ''' @@ -301,7 +301,7 @@ class SaltCMD(parsers.SaltCMDOptionParser): if isinstance(minion_ret, dict) and 'ret' in minion_ret: minion_ret = ret[each_minion].get('ret') if ( - isinstance(minion_ret, string_types) + isinstance(minion_ret, six.string_types) and minion_ret.startswith("Minion did not return") ): if "Not connected" in minion_ret: @@ -314,22 +314,22 @@ class SaltCMD(parsers.SaltCMDOptionParser): return_counter += 1 if self._get_retcode(ret[each_minion]): failed_minions.append(each_minion) - print_cli('\n') - print_cli('-------------------------------------------') - print_cli('Summary') - print_cli('-------------------------------------------') - print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter)) - print_cli('# of minions returned: {0}'.format(return_counter)) - print_cli('# of minions that did not return: {0}'.format(not_return_counter)) - print_cli('# of minions with errors: {0}'.format(len(failed_minions))) + salt.utils.print_cli('\n') + salt.utils.print_cli('-------------------------------------------') + salt.utils.print_cli('Summary') + salt.utils.print_cli('-------------------------------------------') + salt.utils.print_cli('# of minions targeted: {0}'.format(return_counter + not_return_counter)) + salt.utils.print_cli('# of minions returned: {0}'.format(return_counter)) + salt.utils.print_cli('# of minions that did not return: {0}'.format(not_return_counter)) + salt.utils.print_cli('# of minions with errors: {0}'.format(len(failed_minions))) if self.options.verbose: if not_connected_minions: - print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions))) + salt.utils.print_cli('Minions not connected: {0}'.format(" ".join(not_connected_minions))) if not_response_minions: - print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions))) + salt.utils.print_cli('Minions not responding: {0}'.format(" ".join(not_response_minions))) if failed_minions: - print_cli('Minions with failures: {0}'.format(" ".join(failed_minions))) - print_cli('-------------------------------------------') + salt.utils.print_cli('Minions with failures: {0}'.format(" ".join(failed_minions))) + salt.utils.print_cli('-------------------------------------------') def _progress_end(self, out): import salt.output @@ -406,10 +406,10 @@ class SaltCMD(parsers.SaltCMDOptionParser): docs = {} if not ret: self.exit(2, 'No minions found to gather docs from\n') - if isinstance(ret, str): + if isinstance(ret, six.string_types): self.exit(2, '{0}\n'.format(ret)) for host in ret: - if isinstance(ret[host], string_types) \ + if isinstance(ret[host], six.string_types) \ and (ret[host].startswith("Minion did not return") or ret[host] == 'VALUE TRIMMED'): continue @@ -421,6 +421,6 @@ class SaltCMD(parsers.SaltCMDOptionParser): salt.output.display_output({fun: docs[fun]}, 'nested', self.config) else: for fun in sorted(docs): - print_cli('{0}:'.format(fun)) - print_cli(docs[fun]) - print_cli('') + salt.utils.print_cli('{0}:'.format(fun)) + salt.utils.print_cli(docs[fun]) + salt.utils.print_cli('') diff --git a/salt/cli/ssh.py b/salt/cli/ssh.py index 0d6bf6ea7b..58fb4c6083 100644 --- a/salt/cli/ssh.py +++ b/salt/cli/ssh.py @@ -2,17 +2,21 @@ from __future__ import print_function from __future__ import absolute_import +import sys import salt.client.ssh -from salt.utils import parsers +import salt.utils.parsers from salt.utils.verify import verify_log -class SaltSSH(parsers.SaltSSHOptionParser): +class SaltSSH(salt.utils.parsers.SaltSSHOptionParser): ''' Used to Execute the salt ssh routine ''' def run(self): + if '-H' in sys.argv or '--hosts' in sys.argv: + sys.argv += ['x', 'x'] # Hack: pass a mandatory two options + # that won't be used anyways with -H or --hosts self.parse_args() self.setup_logfile_logger() verify_log(self.config) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 1f3a262860..9859a37a11 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -37,7 +37,9 @@ import salt.utils.args import salt.utils.event import salt.utils.files import salt.utils.minions +import salt.utils.platform import salt.utils.verify +import salt.utils.versions import salt.utils.jid import salt.syspaths as syspaths from salt.exceptions import ( @@ -46,7 +48,7 @@ from salt.exceptions import ( ) # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import zmq @@ -70,7 +72,7 @@ log = logging.getLogger(__name__) def get_local_client( - c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), + c_path=os.path.join(syspaths.CONFIG_DIR, u'master'), mopts=None, skip_perm_errors=False, io_loop=None, @@ -93,11 +95,11 @@ def get_local_client( # Late import to prevent circular import import salt.config opts = salt.config.client_config(c_path) - if opts['transport'] == 'raet': + if opts[u'transport'] == u'raet': import salt.client.raet return salt.client.raet.LocalClient(mopts=opts) # TODO: AIO core is separate from transport - elif opts['transport'] in ('zeromq', 'tcp'): + elif opts[u'transport'] in (u'zeromq', u'tcp'): return LocalClient( mopts=opts, skip_perm_errors=skip_perm_errors, @@ -134,7 +136,7 @@ class LocalClient(object): local.cmd('*', 'test.fib', [10]) ''' def __init__(self, - c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), + c_path=os.path.join(syspaths.CONFIG_DIR, u'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' @@ -149,10 +151,9 @@ class LocalClient(object): else: if os.path.isdir(c_path): log.warning( - '{0} expects a file path not a directory path({1}) to ' - 'it\'s \'c_path\' keyword argument'.format( - self.__class__.__name__, c_path - ) + u'%s expects a file path not a directory path(%s) to ' + u'its \'c_path\' keyword argument', + self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) @@ -161,9 +162,9 @@ class LocalClient(object): self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( - 'master', - self.opts['sock_dir'], - self.opts['transport'], + u'master', + self.opts[u'sock_dir'], + self.opts[u'transport'], opts=self.opts, listen=False, io_loop=io_loop, @@ -177,38 +178,38 @@ class LocalClient(object): Read in the rotating master authentication key ''' key_user = self.salt_user - if key_user == 'root': - if self.opts.get('user', 'root') != 'root': - key_user = self.opts.get('user', 'root') - if key_user.startswith('sudo_'): - key_user = self.opts.get('user', 'root') - if salt.utils.is_windows(): + if key_user == u'root': + if self.opts.get(u'user', u'root') != u'root': + key_user = self.opts.get(u'user', u'root') + if key_user.startswith(u'sudo_'): + key_user = self.opts.get(u'user', u'root') + if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. - key_user = key_user.replace('\\', '_') - keyfile = os.path.join(self.opts['cachedir'], - '.{0}_key'.format(key_user)) + key_user = key_user.replace(u'\\', u'_') + keyfile = os.path.join(self.opts[u'cachedir'], + u'.{0}_key'.format(key_user)) # Make sure all key parent directories are accessible - salt.utils.verify.check_path_traversal(self.opts['cachedir'], + salt.utils.verify.check_path_traversal(self.opts[u'cachedir'], key_user, self.skip_perm_errors) try: - with salt.utils.files.fopen(keyfile, 'r') as key: + with salt.utils.files.fopen(keyfile, u'r') as key: return key.read() except (OSError, IOError): # Fall back to eauth - return '' + return u'' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' - range_ = seco.range.Range(self.opts['range_server']) + range_ = seco.range.Range(self.opts[u'range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: - print('Range server exception: {0}'.format(err)) + print(u'Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): @@ -216,34 +217,34 @@ class LocalClient(object): Return the timeout to use ''' if timeout is None: - return self.opts['timeout'] + return self.opts[u'timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: - return self.opts['timeout'] + return self.opts[u'timeout'] # Looks like the timeout is invalid, use config - return self.opts['timeout'] + return self.opts[u'timeout'] def gather_job_info(self, jid, tgt, tgt_type, **kwargs): ''' Return the information about a given job ''' - log.debug('Checking whether jid {0} is still running'.format(jid)) - timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) + log.debug(u'Checking whether jid %s is still running', jid) + timeout = int(kwargs.get(u'gather_job_timeout', self.opts[u'gather_job_timeout'])) pub_data = self.run_job(tgt, - 'saltutil.find_job', + u'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, **kwargs ) - if 'jid' in pub_data: - self.event.subscribe(pub_data['jid']) + if u'jid' in pub_data: + self.event.subscribe(pub_data[u'jid']) return pub_data @@ -251,39 +252,39 @@ class LocalClient(object): ''' Common checks on the pub_data data structure returned from running pub ''' - if pub_data == '': + if pub_data == u'': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( - 'Failed to authenticate! This is most likely because this ' - 'user is not permitted to execute commands, but there is a ' - 'small possibility that a disk error occurred (check ' - 'disk/inode usage).' + u'Failed to authenticate! This is most likely because this ' + u'user is not permitted to execute commands, but there is a ' + u'small possibility that a disk error occurred (check ' + u'disk/inode usage).' ) # Failed to connect to the master and send the pub - if 'error' in pub_data: - print(pub_data['error']) - log.debug('_check_pub_data() error: {0}'.format(pub_data['error'])) + if u'error' in pub_data: + print(pub_data[u'error']) + log.debug(u'_check_pub_data() error: %s', pub_data[u'error']) return {} - elif 'jid' not in pub_data: + elif u'jid' not in pub_data: return {} - if pub_data['jid'] == '0': - print('Failed to connect to the Master, ' - 'is the Salt Master running?') + if pub_data[u'jid'] == u'0': + print(u'Failed to connect to the Master, ' + u'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found - if not self.opts.get('order_masters'): + if not self.opts.get(u'order_masters'): # Check for no minions - if not pub_data['minions']: - print('No minions matched the target. ' - 'No command was sent, no jid was assigned.') + if not pub_data[u'minions']: + print(u'No minions matched the target. ' + u'No command was sent, no jid was assigned.') return {} else: - self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') + self.event.subscribe(u'syndic/.*/{0}'.format(pub_data[u'jid']), u'regex') - self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) + self.event.subscribe(u'salt/job/{0}'.format(pub_data[u'jid'])) return pub_data @@ -292,10 +293,10 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', timeout=None, - jid='', + jid=u'', kwarg=None, listen=False, **kwargs): @@ -313,14 +314,14 @@ class LocalClient(object): >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) @@ -338,7 +339,7 @@ class LocalClient(object): except SaltClientError: # Re-raise error with specific message raise SaltClientError( - 'The salt master could not be contacted. Is master running?' + u'The salt master could not be contacted. Is master running?' ) except Exception as general_exception: # Convert to generic client error and pass along message @@ -355,10 +356,10 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', timeout=None, - jid='', + jid=u'', kwarg=None, listen=True, io_loop=None, @@ -377,14 +378,14 @@ class LocalClient(object): >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) @@ -403,7 +404,7 @@ class LocalClient(object): except SaltClientError: # Re-raise error with specific message raise SaltClientError( - 'The salt master could not be contacted. Is master running?' + u'The salt master could not be contacted. Is master running?' ) except Exception as general_exception: # Convert to generic client error and pass along message @@ -416,9 +417,9 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', - jid='', + tgt_type=u'glob', + ret=u'', + jid=u'', kwarg=None, **kwargs): ''' @@ -434,14 +435,14 @@ class LocalClient(object): >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) pub_data = self.run_job(tgt, @@ -452,7 +453,7 @@ class LocalClient(object): jid=jid, **kwargs) try: - return pub_data['jid'] + return pub_data[u'jid'] except KeyError: return 0 @@ -461,8 +462,8 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, sub=3, cli=False, @@ -481,17 +482,17 @@ class LocalClient(object): >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') minion_ret = self.cmd(tgt, - 'sys.list_functions', + u'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) @@ -509,7 +510,7 @@ class LocalClient(object): f_tgt, fun, arg, - tgt_type='list', + tgt_type=u'list', ret=ret, kwarg=kwarg, progress=progress, @@ -520,10 +521,10 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, - batch='10%', + batch=u'10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time @@ -544,42 +545,42 @@ class LocalClient(object): {'dave': {...}} {'stewart': {...}} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') import salt.cli.batch arg = salt.utils.args.condition_input(arg, kwarg) - opts = {'tgt': tgt, - 'fun': fun, - 'arg': arg, - 'tgt_type': tgt_type, - 'ret': ret, - 'batch': batch, - 'failhard': kwargs.get('failhard', False), - 'raw': kwargs.get('raw', False)} + opts = {u'tgt': tgt, + u'fun': fun, + u'arg': arg, + u'tgt_type': tgt_type, + u'ret': ret, + u'batch': batch, + u'failhard': kwargs.get(u'failhard', False), + u'raw': kwargs.get(u'raw', False)} - if 'timeout' in kwargs: - opts['timeout'] = kwargs['timeout'] - if 'gather_job_timeout' in kwargs: - opts['gather_job_timeout'] = kwargs['gather_job_timeout'] - if 'batch_wait' in kwargs: - opts['batch_wait'] = int(kwargs['batch_wait']) + if u'timeout' in kwargs: + opts[u'timeout'] = kwargs[u'timeout'] + if u'gather_job_timeout' in kwargs: + opts[u'gather_job_timeout'] = kwargs[u'gather_job_timeout'] + if u'batch_wait' in kwargs: + opts[u'batch_wait'] = int(kwargs[u'batch_wait']) eauth = {} - if 'eauth' in kwargs: - eauth['eauth'] = kwargs.pop('eauth') - if 'username' in kwargs: - eauth['username'] = kwargs.pop('username') - if 'password' in kwargs: - eauth['password'] = kwargs.pop('password') - if 'token' in kwargs: - eauth['token'] = kwargs.pop('token') + if u'eauth' in kwargs: + eauth[u'eauth'] = kwargs.pop(u'eauth') + if u'username' in kwargs: + eauth[u'username'] = kwargs.pop(u'username') + if u'password' in kwargs: + eauth[u'password'] = kwargs.pop(u'password') + if u'token' in kwargs: + eauth[u'token'] = kwargs.pop(u'token') for key, val in six.iteritems(self.opts): if key not in opts: @@ -593,9 +594,9 @@ class LocalClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', - jid='', + tgt_type=u'glob', + ret=u'', + jid=u'', full_return=False, kwarg=None, **kwargs): @@ -702,14 +703,14 @@ class LocalClient(object): minion ID. A compound command will return a sub-dictionary keyed by function name. ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) was_listening = self.event.cpub @@ -730,8 +731,8 @@ class LocalClient(object): ret = {} for fn_ret in self.get_cli_event_returns( - pub_data['jid'], - pub_data['minions'], + pub_data[u'jid'], + pub_data[u'minions'], self._get_timeout(timeout), tgt, tgt_type, @@ -740,9 +741,9 @@ class LocalClient(object): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return - else data.get('ret', {})) + else data.get(u'ret', {})) - for failed in list(set(pub_data['minions']) ^ set(ret)): + for failed in list(set(pub_data[u'minions']) - set(ret)): ret[failed] = False return ret finally: @@ -755,8 +756,8 @@ class LocalClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', verbose=False, kwarg=None, progress=False, @@ -771,14 +772,14 @@ class LocalClient(object): :param verbose: Print extra information about the running command :returns: A generator ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) was_listening = self.event.cpub @@ -799,8 +800,8 @@ class LocalClient(object): else: try: for fn_ret in self.get_cli_event_returns( - self.pub_data['jid'], - self.pub_data['minions'], + self.pub_data[u'jid'], + self.pub_data[u'minions'], self._get_timeout(timeout), tgt, tgt_type, @@ -814,14 +815,14 @@ class LocalClient(object): yield fn_ret except KeyboardInterrupt: raise SystemExit( - '\n' - 'This job\'s jid is: {0}\n' - 'Exiting gracefully on Ctrl-c\n' - 'The minions may not have all finished running and any ' - 'remaining minions will return upon completion. To look ' - 'up the return data for this job later, run the following ' - 'command:\n\n' - 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid']) + u'\n' + u'This job\'s jid is: {0}\n' + u'Exiting gracefully on Ctrl-c\n' + u'The minions may not have all finished running and any ' + u'remaining minions will return upon completion. To look ' + u'up the return data for this job later, run the following ' + u'command:\n\n' + u'salt-run jobs.lookup_jid {0}'.format(self.pub_data[u'jid']) ) finally: if not was_listening: @@ -833,8 +834,8 @@ class LocalClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, **kwargs): ''' @@ -854,14 +855,14 @@ class LocalClient(object): {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) was_listening = self.event.cpub @@ -880,10 +881,10 @@ class LocalClient(object): if not pub_data: yield pub_data else: - if kwargs.get('yield_pub_data'): + if kwargs.get(u'yield_pub_data'): yield pub_data - for fn_ret in self.get_iter_returns(pub_data['jid'], - pub_data['minions'], + for fn_ret in self.get_iter_returns(pub_data[u'jid'], + pub_data[u'minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, @@ -891,7 +892,7 @@ class LocalClient(object): if not fn_ret: continue yield fn_ret - self._clean_up_subscriptions(pub_data['jid']) + self._clean_up_subscriptions(pub_data[u'jid']) finally: if not was_listening: self.event.close_pub() @@ -902,8 +903,8 @@ class LocalClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, show_jid=False, verbose=False, @@ -930,14 +931,14 @@ class LocalClient(object): None {'stewart': {'ret': True}} ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) was_listening = self.event.cpub @@ -956,8 +957,8 @@ class LocalClient(object): if not pub_data: yield pub_data else: - for fn_ret in self.get_iter_returns(pub_data['jid'], - pub_data['minions'], + for fn_ret in self.get_iter_returns(pub_data[u'jid'], + pub_data[u'minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, @@ -965,10 +966,10 @@ class LocalClient(object): **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: - fn_ret[minion]['jid'] = pub_data['jid'] + fn_ret[minion][u'jid'] = pub_data[u'jid'] yield fn_ret - self._clean_up_subscriptions(pub_data['jid']) + self._clean_up_subscriptions(pub_data[u'jid']) finally: if not was_listening: self.event.close_pub() @@ -979,22 +980,22 @@ class LocalClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') arg = salt.utils.args.condition_input(arg, kwarg) was_listening = self.event.cpub @@ -1013,8 +1014,8 @@ class LocalClient(object): if not pub_data: return pub_data - return (self.get_cli_static_event_returns(pub_data['jid'], - pub_data['minions'], + return (self.get_cli_static_event_returns(pub_data[u'jid'], + pub_data[u'minions'], timeout, tgt, tgt_type, @@ -1028,8 +1029,8 @@ class LocalClient(object): jid, minions, timeout=None, - tgt='*', - tgt_type='glob', + tgt=u'*', + tgt_type=u'glob', verbose=False, show_jid=False, **kwargs): @@ -1038,23 +1039,23 @@ class LocalClient(object): :returns: all of the information for the JID ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') if verbose: - msg = 'Executing job with jid {0}'.format(jid) + msg = u'Executing job with jid {0}'.format(jid) print(msg) - print('-' * len(msg) + '\n') + print(u'-' * len(msg) + u'\n') elif show_jid: - print('jid: {0}'.format(jid)) + print(u'jid: {0}'.format(jid)) if timeout is None: - timeout = self.opts['timeout'] + timeout = self.opts[u'timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) @@ -1107,8 +1108,8 @@ class LocalClient(object): jid, minions, timeout=None, - tgt='*', - tgt_type='glob', + tgt=u'*', + tgt_type=u'glob', expect_minions=False, block=True, **kwargs): @@ -1117,14 +1118,14 @@ class LocalClient(object): :returns: all of the information for the JID ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') if not isinstance(minions, set): if isinstance(minions, six.string_types): @@ -1133,8 +1134,8 @@ class LocalClient(object): minions = set(list(minions)) if timeout is None: - timeout = self.opts['timeout'] - gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) + timeout = self.opts[u'timeout'] + gather_job_timeout = int(kwargs.get(u'gather_job_timeout', self.opts[u'gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time @@ -1143,34 +1144,33 @@ class LocalClient(object): found = set() # Check to see if the jid is real, if not return the empty dict try: - if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: - log.warning('jid does not exist') + if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}: + log.warning(u'jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: - log.warning('Returner unavailable: {exc}'.format(exc=exc)) + log.warning(u'Returner unavailable: %s', exc) # Wait for the hosts to check in last_time = False # iterator for this job's return - if self.opts['order_masters']: + if self.opts[u'order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. - ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') + ret_iter = self.get_returns_no_block(u'(salt/job|syndic/.*)/{0}'.format(jid), u'regex') else: - ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) + ret_iter = self.get_returns_no_block(u'salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout - gather_syndic_wait = time.time() + self.opts['syndic_wait'] + gather_syndic_wait = time.time() + self.opts[u'syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( - 'get_iter_returns for jid {0} sent to {1} will timeout at {2}'.format( - jid, minions, datetime.fromtimestamp(timeout_at).time() - ) + u'get_iter_returns for jid %s sent to %s will timeout at %s', + jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned @@ -1178,34 +1178,34 @@ class LocalClient(object): # if we got None, then there were no events if raw is None: break - if 'minions' in raw.get('data', {}): - minions.update(raw['data']['minions']) + if u'minions' in raw.get(u'data', {}): + minions.update(raw[u'data'][u'minions']) continue - if 'return' not in raw['data']: + if u'return' not in raw[u'data']: continue - if kwargs.get('raw', False): - found.add(raw['data']['id']) + if kwargs.get(u'raw', False): + found.add(raw[u'data'][u'id']) yield raw else: - found.add(raw['data']['id']) - ret = {raw['data']['id']: {'ret': raw['data']['return']}} - if 'out' in raw['data']: - ret[raw['data']['id']]['out'] = raw['data']['out'] - if 'retcode' in raw['data']: - ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] - if 'jid' in raw['data']: - ret[raw['data']['id']]['jid'] = raw['data']['jid'] - if kwargs.get('_cmd_meta', False): - ret[raw['data']['id']].update(raw['data']) - log.debug('jid {0} return from {1}'.format(jid, raw['data']['id'])) + found.add(raw[u'data'][u'id']) + ret = {raw[u'data'][u'id']: {u'ret': raw[u'data'][u'return']}} + if u'out' in raw[u'data']: + ret[raw[u'data'][u'id']][u'out'] = raw[u'data'][u'out'] + if u'retcode' in raw[u'data']: + ret[raw[u'data'][u'id']][u'retcode'] = raw[u'data'][u'retcode'] + if u'jid' in raw[u'data']: + ret[raw[u'data'][u'id']][u'jid'] = raw[u'data'][u'jid'] + if kwargs.get(u'_cmd_meta', False): + ret[raw[u'data'][u'id']].update(raw[u'data']) + log.debug(u'jid %s return from %s', jid, raw[u'data'][u'id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy - if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: + if len(found.intersection(minions)) >= len(minions) and not self.opts[u'order_masters']: # All minions have returned, break out of the loop - log.debug('jid {0} found all minions {1}'.format(jid, found)) + log.debug(u'jid %s found all minions %s', jid, found) break - elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: + elif len(found.intersection(minions)) >= len(minions) and self.opts[u'order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. @@ -1227,18 +1227,18 @@ class LocalClient(object): # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet - jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) + jinfo = self.gather_job_info(jid, list(minions - found), u'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send - if 'jid' not in jinfo: + if u'jid' not in jinfo: jinfo_iter = [] else: - jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) + jinfo_iter = self.get_returns_no_block(u'salt/job/{0}'.format(jinfo[u'jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer - if self.opts['order_masters']: - timeout_at += self.opts.get('syndic_wait', 1) + if self.opts[u'order_masters']: + timeout_at += self.opts.get(u'syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: @@ -1246,48 +1246,48 @@ class LocalClient(object): if raw is None: break try: - if raw['data']['retcode'] > 0: - log.error('saltutil returning errors on minion {0}'.format(raw['data']['id'])) - minions.remove(raw['data']['id']) + if raw[u'data'][u'retcode'] > 0: + log.error(u'saltutil returning errors on minion %s', raw[u'data'][u'id']) + minions.remove(raw[u'data'][u'id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. - missing_key = exc.__str__().strip('\'"') - if missing_key == 'retcode': - log.debug('retcode missing from client return') + missing_key = exc.__str__().strip(u'\'"') + if missing_key == u'retcode': + log.debug(u'retcode missing from client return') else: log.debug( - 'Passing on saltutil error. Key \'%s\' missing ' - 'from client return. This may be an error in ' - 'the client.', missing_key + u'Passing on saltutil error. Key \'%s\' missing ' + u'from client return. This may be an error in ' + u'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later - open_jids.add(jinfo['jid']) + open_jids.add(jinfo[u'jid']) # TODO: move to a library?? - if 'minions' in raw.get('data', {}): - minions.update(raw['data']['minions']) + if u'minions' in raw.get(u'data', {}): + minions.update(raw[u'data'][u'minions']) continue - if 'syndic' in raw.get('data', {}): - minions.update(raw['syndic']) + if u'syndic' in raw.get(u'data', {}): + minions.update(raw[u'syndic']) continue - if 'return' not in raw.get('data', {}): + if u'return' not in raw.get(u'data', {}): continue # if the job isn't running there anymore... don't count - if raw['data']['return'] == {}: + if raw[u'data'][u'return'] == {}: continue - if 'return' in raw['data']['return'] and \ - raw['data']['return']['return'] == {}: + if u'return' in raw[u'data'][u'return'] and \ + raw[u'data'][u'return'][u'return'] == {}: continue # if we didn't originally target the minion, lets add it to the list - if raw['data']['id'] not in minions: - minions.add(raw['data']['id']) + if raw[u'data'][u'id'] not in minions: + minions.add(raw[u'data'][u'id']) # update this minion's timeout, as long as the job is still running - minion_timeouts[raw['data']['id']] = time.time() + timeout + minion_timeouts[raw[u'data'][u'id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True @@ -1319,7 +1319,7 @@ class LocalClient(object): if expect_minions: for minion in list((minions - found)): - yield {minion: {'failed': True}} + yield {minion: {u'failed': True}} def get_returns( self, @@ -1331,49 +1331,47 @@ class LocalClient(object): ''' minions = set(minions) if timeout is None: - timeout = self.opts['timeout'] + timeout = self.opts[u'timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( - 'get_returns for jid {0} sent to {1} will timeout at {2}'.format( - jid, minions, datetime.fromtimestamp(timeout_at).time() - ) + u'get_returns for jid %s sent to %s will timeout at %s', + jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: - if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: - log.warning('jid does not exist') + if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}: + log.warning(u'jid does not exist') return ret except Exception as exc: - raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' - 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) + raise SaltClientError(u'Master job cache returner [{0}] failed to verify jid. ' + u'Exception details: {1}'.format(self.opts[u'master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) - if raw is not None and 'return' in raw: - found.add(raw['id']) - ret[raw['id']] = raw['return'] + if raw is not None and u'return' in raw: + found.add(raw[u'id']) + ret[raw[u'id']] = raw[u'return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop - log.debug('jid {0} found all minions'.format(jid)) + log.debug(u'jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop - log.debug('jid {0} found all minions'.format(jid)) + log.debug(u'jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( - 'jid {0} minions {1} did not return in time'.format( - jid, (minions - found) - ) + u'jid %s minions %s did not return in time', + jid, (minions - found) ) break time.sleep(0.01) @@ -1392,20 +1390,20 @@ class LocalClient(object): event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: - data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) + data = self.returners[u'{0}.get_jid'.format(self.opts[u'master_job_cache'])](jid) except Exception as exc: - raise SaltClientError('Returner {0} could not fetch jid data. ' - 'Exception details: {1}'.format( - self.opts['master_job_cache'], + raise SaltClientError(u'Returner {0} could not fetch jid data. ' + u'Exception details: {1}'.format( + self.opts[u'master_job_cache'], exc)) for minion in data: m_data = {} if u'return' in data[minion]: - m_data['ret'] = data[minion].get(u'return') + m_data[u'ret'] = data[minion].get(u'return') else: - m_data['ret'] = data[minion].get('return') - if 'out' in data[minion]: - m_data['out'] = data[minion]['out'] + m_data[u'ret'] = data[minion].get(u'return') + if u'out' in data[minion]: + m_data[u'out'] = data[minion][u'out'] if minion in ret: ret[minion].update(m_data) else: @@ -1441,20 +1439,20 @@ class LocalClient(object): ret = {} try: - data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) + data = self.returners[u'{0}.get_jid'.format(self.opts[u'master_job_cache'])](jid) except Exception as exc: - raise SaltClientError('Could not examine master job cache. ' - 'Error occurred in {0} returner. ' - 'Exception details: {1}'.format(self.opts['master_job_cache'], + raise SaltClientError(u'Could not examine master job cache. ' + u'Error occurred in {0} returner. ' + u'Exception details: {1}'.format(self.opts[u'master_job_cache'], exc)) for minion in data: m_data = {} if u'return' in data[minion]: - m_data['ret'] = data[minion].get(u'return') + m_data[u'ret'] = data[minion].get(u'return') else: - m_data['ret'] = data[minion].get('return') - if 'out' in data[minion]: - m_data['out'] = data[minion]['out'] + m_data[u'ret'] = data[minion].get(u'return') + if u'out' in data[minion]: + m_data[u'out'] = data[minion][u'out'] if minion in ret: ret[minion].update(m_data) else: @@ -1467,25 +1465,25 @@ class LocalClient(object): jid, minions, timeout=None, - tgt='*', - tgt_type='glob', + tgt=u'*', + tgt_type=u'glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' - log.trace('entered - function get_cli_static_event_returns()') + log.trace(u'entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: - msg = 'Executing job with jid {0}'.format(jid) + msg = u'Executing job with jid {0}'.format(jid) print(msg) - print('-' * len(msg) + '\n') + print(u'-' * len(msg) + u'\n') elif show_jid: - print('jid: {0}'.format(jid)) + print(u'jid: {0}'.format(jid)) if timeout is None: - timeout = self.opts['timeout'] + timeout = self.opts[u'timeout'] start = int(time.time()) timeout_at = start + timeout @@ -1493,13 +1491,13 @@ class LocalClient(object): ret = {} # Check to see if the jid is real, if not return the empty dict try: - if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: - log.warning('jid does not exist') + if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}: + log.warning(u'jid does not exist') return ret except Exception as exc: - raise SaltClientError('Load could not be retrieved from ' - 'returner {0}. Exception details: {1}'.format( - self.opts['master_job_cache'], + raise SaltClientError(u'Load could not be retrieved from ' + u'returner {0}. Exception details: {1}'.format( + self.opts[u'master_job_cache'], exc)) # Wait for the hosts to check in while True: @@ -1507,17 +1505,17 @@ class LocalClient(object): time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) - jid_tag = 'salt/job/{0}'.format(jid) + jid_tag = u'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) - if raw is not None and 'return' in raw: - if 'minions' in raw.get('data', {}): - minions.update(raw['data']['minions']) + if raw is not None and u'return' in raw: + if u'minions' in raw.get(u'data', {}): + minions.update(raw[u'data'][u'minions']) continue - found.add(raw['id']) - ret[raw['id']] = {'ret': raw['return']} - ret[raw['id']]['success'] = raw.get('success', False) - if 'out' in raw: - ret[raw['id']]['out'] = raw['out'] + found.add(raw[u'id']) + ret[raw[u'id']] = {u'ret': raw[u'return']} + ret[raw[u'id']][u'success'] = raw.get(u'success', False) + if u'out' in raw: + ret[raw[u'id']][u'out'] = raw[u'out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break @@ -1528,14 +1526,14 @@ class LocalClient(object): break if int(time.time()) > timeout_at: if verbose or show_timeout: - if self.opts.get('minion_data_cache', False) \ - or tgt_type in ('glob', 'pcre', 'list'): + if self.opts.get(u'minion_data_cache', False) \ + or tgt_type in (u'glob', u'pcre', u'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { - 'out': 'no_return', - 'ret': 'Minion did not return' + u'out': u'no_return', + u'ret': u'Minion did not return' } break time.sleep(0.01) @@ -1548,8 +1546,8 @@ class LocalClient(object): jid, minions, timeout=None, - tgt='*', - tgt_type='glob', + tgt=u'*', + tgt_type=u'glob', verbose=False, progress=False, show_timeout=False, @@ -1558,23 +1556,23 @@ class LocalClient(object): ''' Get the returns for the command line interface via the event system ''' - log.trace('func get_cli_event_returns()') + log.trace(u'func get_cli_event_returns()') - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') if verbose: - msg = 'Executing job with jid {0}'.format(jid) + msg = u'Executing job with jid {0}'.format(jid) print(msg) - print('-' * len(msg) + '\n') + print(u'-' * len(msg) + u'\n') elif show_jid: - print('jid: {0}'.format(jid)) + print(u'jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None @@ -1588,29 +1586,29 @@ class LocalClient(object): expect_minions=(verbose or show_timeout), **kwargs ): - log.debug('return event: %s', ret) + log.debug(u'return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): - if not min_ret.get('failed') is True: - yield {'minion_count': len(minions), 'return_count': return_count} + if not min_ret.get(u'failed') is True: + yield {u'minion_count': len(minions), u'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): - if min_ret.get('failed') is True: + if min_ret.get(u'failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() - if self.opts['minion_data_cache'] \ - and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ + if self.opts[u'minion_data_cache'] \ + and salt.cache.factory(self.opts).contains(u'minions/{0}'.format(id_), u'data') \ and connected_minions \ and id_ not in connected_minions: - yield {id_: {'out': 'no_return', - 'ret': 'Minion did not return. [Not connected]'}} + yield {id_: {u'out': u'no_return', + u'ret': u'Minion did not return. [Not connected]'}} else: # don't report syndics as unresponsive minions - if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): - yield {id_: {'out': 'no_return', - 'ret': 'Minion did not return. [No response]'}} + if not os.path.exists(os.path.join(self.opts[u'syndic_dir'], id_)): + yield {id_: {u'out': u'no_return', + u'ret': u'Minion did not return. [No response]'}} else: yield {id_: min_ret} @@ -1621,16 +1619,16 @@ class LocalClient(object): Gather the return data from the event system, break hard when timeout is reached. ''' - log.trace('entered - function get_event_iter_returns()') + log.trace(u'entered - function get_event_iter_returns()') if timeout is None: - timeout = self.opts['timeout'] + timeout = self.opts[u'timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict - if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: - log.warning('jid does not exist') + if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}: + log.warning(u'jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() @@ -1640,16 +1638,16 @@ class LocalClient(object): if raw is None or time.time() > timeout_at: # Timeout reached break - if 'minions' in raw.get('data', {}): + if u'minions' in raw.get(u'data', {}): continue try: - found.add(raw['id']) - ret = {raw['id']: {'ret': raw['return']}} + found.add(raw[u'id']) + ret = {raw[u'id']: {u'ret': raw[u'return']}} except KeyError: # Ignore other erroneous messages continue - if 'out' in raw: - ret[raw['id']]['out'] = raw['out'] + if u'out' in raw: + ret[raw[u'id']][u'out'] = raw[u'out'] yield ret time.sleep(0.02) @@ -1665,59 +1663,59 @@ class LocalClient(object): ''' Set up the payload_kwargs to be sent down to the master ''' - if tgt_type == 'nodegroup': - if tgt not in self.opts['nodegroups']: + if tgt_type == u'nodegroup': + if tgt not in self.opts[u'nodegroups']: conf_file = self.opts.get( - 'conf_file', 'the master config file' + u'conf_file', u'the master config file' ) raise SaltInvocationError( - 'Node group {0} unavailable in {1}'.format( + u'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, - self.opts['nodegroups']) - tgt_type = 'compound' + self.opts[u'nodegroups']) + tgt_type = u'compound' # Convert a range expression to a list of nodes and change expression # form to list - if tgt_type == 'range' and HAS_RANGE: + if tgt_type == u'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) - tgt_type = 'list' + tgt_type = u'list' # If an external job cache is specified add it to the ret list - if self.opts.get('ext_job_cache'): + if self.opts.get(u'ext_job_cache'): if ret: - ret += ',{0}'.format(self.opts['ext_job_cache']) + ret += u',{0}'.format(self.opts[u'ext_job_cache']) else: - ret = self.opts['ext_job_cache'] + ret = self.opts[u'ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload - payload_kwargs = {'cmd': 'publish', - 'tgt': tgt, - 'fun': fun, - 'arg': arg, - 'key': self.key, - 'tgt_type': tgt_type, - 'ret': ret, - 'jid': jid} + payload_kwargs = {u'cmd': u'publish', + u'tgt': tgt, + u'fun': fun, + u'arg': arg, + u'key': self.key, + u'tgt_type': tgt_type, + u'ret': ret, + u'jid': jid} # if kwargs are passed, pack them. if kwargs: - payload_kwargs['kwargs'] = kwargs + payload_kwargs[u'kwargs'] = kwargs # If we have a salt user, add it to the payload - if self.opts['syndic_master'] and 'user' in kwargs: - payload_kwargs['user'] = kwargs['user'] + if self.opts[u'syndic_master'] and u'user' in kwargs: + payload_kwargs[u'user'] = kwargs[u'user'] elif self.salt_user: - payload_kwargs['user'] = self.salt_user + payload_kwargs[u'user'] = self.salt_user # If we're a syndication master, pass the timeout - if self.opts['order_masters']: - payload_kwargs['to'] = timeout + if self.opts[u'order_masters']: + payload_kwargs[u'to'] = timeout return payload_kwargs @@ -1725,9 +1723,9 @@ class LocalClient(object): tgt, fun, arg=(), - tgt_type='glob', - ret='', - jid='', + tgt_type=u'glob', + ret=u'', + jid=u'', timeout=5, listen=False, **kwargs): @@ -1752,22 +1750,22 @@ class LocalClient(object): minions: A set, the targets that the tgt passed should match. ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') # Make sure the publisher is running by checking the unix socket - if (self.opts.get('ipc_mode', '') != 'tcp' and - not os.path.exists(os.path.join(self.opts['sock_dir'], - 'publish_pull.ipc'))): + if (self.opts.get(u'ipc_mode', u'') != u'tcp' and + not os.path.exists(os.path.join(self.opts[u'sock_dir'], + u'publish_pull.ipc'))): log.error( - 'Unable to connect to the salt master publisher at ' - '{0}'.format(self.opts['sock_dir']) + u'Unable to connect to the salt master publisher at %s', + self.opts[u'sock_dir'] ) raise SaltClientError @@ -1781,10 +1779,10 @@ class LocalClient(object): timeout, **kwargs) - master_uri = 'tcp://' + salt.utils.ip_bracket(self.opts['interface']) + \ - ':' + str(self.opts['ret_port']) + master_uri = u'tcp://' + salt.utils.ip_bracket(self.opts[u'interface']) + \ + u':' + str(self.opts[u'ret_port']) channel = salt.transport.Channel.factory(self.opts, - crypt='clear', + crypt=u'clear', master_uri=master_uri) try: @@ -1795,13 +1793,13 @@ class LocalClient(object): payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( - 'Salt request timed out. The master is not responding. You ' - 'may need to run your command with `--async` in order to ' - 'bypass the congested event bus. With `--async`, the CLI tool ' - 'will print the job id (jid) and exit immediately without ' - 'listening for responses. You can then use ' - '`salt-run jobs.lookup_jid` to look up the results of the job ' - 'in the job cache later.' + u'Salt request timed out. The master is not responding. You ' + u'may need to run your command with `--async` in order to ' + u'bypass the congested event bus. With `--async`, the CLI tool ' + u'will print the job id (jid) and exit immediately without ' + u'listening for responses. You can then use ' + u'`salt-run jobs.lookup_jid` to look up the results of the job ' + u'in the job cache later.' ) if not payload: @@ -1811,10 +1809,10 @@ class LocalClient(object): if key == self.key: return payload self.key = key - payload_kwargs['key'] = self.key + payload_kwargs[u'key'] = self.key payload = channel.send(payload_kwargs) - error = payload.pop('error', None) + error = payload.pop(u'error', None) if error is not None: raise PublishError(error) @@ -1824,17 +1822,17 @@ class LocalClient(object): # We have the payload, let's get rid of the channel fast(GC'ed faster) del channel - return {'jid': payload['load']['jid'], - 'minions': payload['load']['minions']} + return {u'jid': payload[u'load'][u'jid'], + u'minions': payload[u'load'][u'minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), - tgt_type='glob', - ret='', - jid='', + tgt_type=u'glob', + ret=u'', + jid=u'', timeout=5, io_loop=None, listen=True, @@ -1860,22 +1858,22 @@ class LocalClient(object): minions: A set, the targets that the tgt passed should match. ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') # Make sure the publisher is running by checking the unix socket - if (self.opts.get('ipc_mode', '') != 'tcp' and - not os.path.exists(os.path.join(self.opts['sock_dir'], - 'publish_pull.ipc'))): + if (self.opts.get(u'ipc_mode', u'') != u'tcp' and + not os.path.exists(os.path.join(self.opts[u'sock_dir'], + u'publish_pull.ipc'))): log.error( - 'Unable to connect to the salt master publisher at ' - '{0}'.format(self.opts['sock_dir']) + u'Unable to connect to the salt master publisher at %s', + self.opts[u'sock_dir'] ) raise SaltClientError @@ -1889,11 +1887,11 @@ class LocalClient(object): timeout, **kwargs) - master_uri = 'tcp://' + salt.utils.ip_bracket(self.opts['interface']) + \ - ':' + str(self.opts['ret_port']) + master_uri = u'tcp://' + salt.utils.ip_bracket(self.opts[u'interface']) + \ + u':' + str(self.opts[u'ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, - crypt='clear', + crypt=u'clear', master_uri=master_uri) try: @@ -1904,13 +1902,13 @@ class LocalClient(object): payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( - 'Salt request timed out. The master is not responding. You ' - 'may need to run your command with `--async` in order to ' - 'bypass the congested event bus. With `--async`, the CLI tool ' - 'will print the job id (jid) and exit immediately without ' - 'listening for responses. You can then use ' - '`salt-run jobs.lookup_jid` to look up the results of the job ' - 'in the job cache later.' + u'Salt request timed out. The master is not responding. You ' + u'may need to run your command with `--async` in order to ' + u'bypass the congested event bus. With `--async`, the CLI tool ' + u'will print the job id (jid) and exit immediately without ' + u'listening for responses. You can then use ' + u'`salt-run jobs.lookup_jid` to look up the results of the job ' + u'in the job cache later.' ) if not payload: @@ -1920,10 +1918,10 @@ class LocalClient(object): if key == self.key: raise tornado.gen.Return(payload) self.key = key - payload_kwargs['key'] = self.key + payload_kwargs[u'key'] = self.key payload = yield channel.send(payload_kwargs) - error = payload.pop('error', None) + error = payload.pop(u'error', None) if error is not None: raise PublishError(error) @@ -1933,21 +1931,21 @@ class LocalClient(object): # We have the payload, let's get rid of the channel fast(GC'ed faster) del channel - raise tornado.gen.Return({'jid': payload['load']['jid'], - 'minions': payload['load']['minions']}) + raise tornado.gen.Return({u'jid': payload[u'load'][u'jid'], + u'minions': payload[u'load'][u'minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client - if hasattr(self, 'event'): + if hasattr(self, u'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): - if self.opts.get('order_masters'): - self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') - self.event.unsubscribe('salt/job/{0}'.format(job_id)) + if self.opts.get(u'order_masters'): + self.event.unsubscribe(u'syndic/.*/{0}'.format(job_id), u'regex') + self.event.unsubscribe(u'salt/job/{0}'.format(job_id)) class FunctionWrapper(dict): @@ -1962,7 +1960,7 @@ class FunctionWrapper(dict): super(FunctionWrapper, self).__init__() self.opts = opts self.minion = minion - self.local = LocalClient(self.opts['conf_file']) + self.local = LocalClient(self.opts[u'conf_file']) self.functions = self.__load_functions() def __missing__(self, key): @@ -1979,7 +1977,7 @@ class FunctionWrapper(dict): Find out what functions are available on the minion ''' return set(self.local.cmd(self.minion, - 'sys.list_functions').get(self.minion, [])) + u'sys.list_functions').get(self.minion, [])) def run_key(self, key): ''' @@ -1992,7 +1990,7 @@ class FunctionWrapper(dict): ''' args = list(args) for _key, _val in kwargs: - args.append('{0}={1}'.format(_key, _val)) + args.append(u'{0}={1}'.format(_key, _val)) return self.local.cmd(self.minion, key, args) return func @@ -2035,7 +2033,7 @@ class Caller(object): __opts__['file_client'] = 'local' caller = salt.client.Caller(mopts=__opts__) ''' - def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'minion'), + def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, u'minion'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion diff --git a/salt/client/api.py b/salt/client/api.py index 5e18b50b32..55a6e32728 100644 --- a/salt/client/api.py +++ b/salt/client/api.py @@ -37,7 +37,7 @@ def tokenify(cmd, token=None): Otherwise return cmd ''' if token is not None: - cmd['token'] = token + cmd[u'token'] = token return cmd @@ -50,19 +50,19 @@ class APIClient(object): if not opts: opts = salt.config.client_config( os.environ.get( - 'SALT_MASTER_CONFIG', - os.path.join(syspaths.CONFIG_DIR, 'master') + u'SALT_MASTER_CONFIG', + os.path.join(syspaths.CONFIG_DIR, u'master') ) ) self.opts = opts - self.localClient = salt.client.get_local_client(self.opts['conf_file']) + self.localClient = salt.client.get_local_client(self.opts[u'conf_file']) self.runnerClient = salt.runner.RunnerClient(self.opts) self.wheelClient = salt.wheel.Wheel(self.opts) self.resolver = salt.auth.Resolver(self.opts) self.event = salt.utils.event.get_event( - 'master', - self.opts['sock_dir'], - self.opts['transport'], + u'master', + self.opts[u'sock_dir'], + self.opts[u'transport'], opts=self.opts, listen=listen) @@ -118,20 +118,20 @@ class APIClient(object): ''' cmd = dict(cmd) # make copy - client = 'minion' # default to local minion client - mode = cmd.get('mode', 'async') # default to 'async' + client = u'minion' # default to local minion client + mode = cmd.get(u'mode', u'async') # default to 'async' # check for wheel or runner prefix to fun name to use wheel or runner client - funparts = cmd.get('fun', '').split('.') - if len(funparts) > 2 and funparts[0] in ['wheel', 'runner']: # master + funparts = cmd.get(u'fun', u'').split(u'.') + if len(funparts) > 2 and funparts[0] in [u'wheel', u'runner']: # master client = funparts[0] - cmd['fun'] = '.'.join(funparts[1:]) # strip prefix + cmd[u'fun'] = u'.'.join(funparts[1:]) # strip prefix - if not ('token' in cmd or - ('eauth' in cmd and 'password' in cmd and 'username' in cmd)): - raise EauthAuthenticationError('No authentication credentials given') + if not (u'token' in cmd or + (u'eauth' in cmd and u'password' in cmd and u'username' in cmd)): + raise EauthAuthenticationError(u'No authentication credentials given') - executor = getattr(self, '{0}_{1}'.format(client, mode)) + executor = getattr(self, u'{0}_{1}'.format(client, mode)) result = executor(**cmd) return result @@ -204,9 +204,9 @@ class APIClient(object): Adds client per the command. ''' - cmd['client'] = 'minion' - if len(cmd['module'].split('.')) > 2 and cmd['module'].split('.')[0] in ['runner', 'wheel']: - cmd['client'] = 'master' + cmd[u'client'] = u'minion' + if len(cmd[u'module'].split(u'.')) > 2 and cmd[u'module'].split(u'.')[0] in [u'runner', u'wheel']: + cmd[u'client'] = u'master' return self._signature(cmd) def _signature(self, cmd): @@ -216,20 +216,20 @@ class APIClient(object): ''' result = {} - client = cmd.get('client', 'minion') - if client == 'minion': - cmd['fun'] = 'sys.argspec' - cmd['kwarg'] = dict(module=cmd['module']) + client = cmd.get(u'client', u'minion') + if client == u'minion': + cmd[u'fun'] = u'sys.argspec' + cmd[u'kwarg'] = dict(module=cmd[u'module']) result = self.run(cmd) - elif client == 'master': - parts = cmd['module'].split('.') + elif client == u'master': + parts = cmd[u'module'].split(u'.') client = parts[0] - module = '.'.join(parts[1:]) # strip prefix - if client == 'wheel': + module = u'.'.join(parts[1:]) # strip prefix + if client == u'wheel': functions = self.wheelClient.functions - elif client == 'runner': + elif client == u'runner': functions = self.runnerClient.functions - result = {'master': salt.utils.argspec_report(functions, module)} + result = {u'master': salt.utils.argspec_report(functions, module)} return result def create_token(self, creds): @@ -274,20 +274,20 @@ class APIClient(object): tokenage = self.resolver.mk_token(creds) except Exception as ex: raise EauthAuthenticationError( - "Authentication failed with {0}.".format(repr(ex))) + u"Authentication failed with {0}.".format(repr(ex))) - if 'token' not in tokenage: - raise EauthAuthenticationError("Authentication failed with provided credentials.") + if u'token' not in tokenage: + raise EauthAuthenticationError(u"Authentication failed with provided credentials.") # Grab eauth config for the current backend for the current user - tokenage_eauth = self.opts['external_auth'][tokenage['eauth']] - if tokenage['name'] in tokenage_eauth: - tokenage['perms'] = tokenage_eauth[tokenage['name']] + tokenage_eauth = self.opts[u'external_auth'][tokenage[u'eauth']] + if tokenage[u'name'] in tokenage_eauth: + tokenage[u'perms'] = tokenage_eauth[tokenage[u'name']] else: - tokenage['perms'] = tokenage_eauth['*'] + tokenage[u'perms'] = tokenage_eauth[u'*'] - tokenage['user'] = tokenage['name'] - tokenage['username'] = tokenage['name'] + tokenage[u'user'] = tokenage[u'name'] + tokenage[u'username'] = tokenage[u'name'] return tokenage @@ -300,11 +300,11 @@ class APIClient(object): result = self.resolver.get_token(token) except Exception as ex: raise EauthAuthenticationError( - "Token validation failed with {0}.".format(repr(ex))) + u"Token validation failed with {0}.".format(repr(ex))) return result - def get_event(self, wait=0.25, tag='', full=False): + def get_event(self, wait=0.25, tag=u'', full=False): ''' Get a single salt event. If no events are available, then block for up to ``wait`` seconds. @@ -322,4 +322,4 @@ class APIClient(object): Need to convert this to a master call with appropriate authentication ''' - return self.event.fire_event(data, tagify(tag, 'wui')) + return self.event.fire_event(data, tagify(tag, u'wui')) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 09680a0561..2e4214e52b 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -5,6 +5,7 @@ A collection of mixins useful for the various *Client interfaces # Import Python libs from __future__ import absolute_import, print_function, with_statement +import fnmatch import signal import logging import weakref @@ -22,10 +23,12 @@ import salt.utils.event import salt.utils.jid import salt.utils.job import salt.utils.lazy +import salt.utils.platform import salt.utils.process +import salt.utils.versions import salt.transport import salt.log.setup -import salt.ext.six as six +from salt.ext import six # Import 3rd-party libs import tornado.stack_context @@ -33,18 +36,18 @@ import tornado.stack_context log = logging.getLogger(__name__) CLIENT_INTERNAL_KEYWORDS = frozenset([ - 'client', - 'cmd', - 'eauth', - 'fun', - 'kwarg', - 'match', - 'token', - '__jid__', - '__tag__', - '__user__', - 'username', - 'password' + u'client', + u'cmd', + u'eauth', + u'fun', + u'kwarg', + u'match', + u'token', + u'__jid__', + u'__tag__', + u'__user__', + u'username', + u'password' ]) @@ -76,9 +79,9 @@ class ClientFuncsDict(collections.MutableMapping): raise KeyError def wrapper(*args, **kwargs): - low = {'fun': key, - 'args': args, - 'kwargs': kwargs, + low = {u'fun': key, + u'args': args, + u'kwargs': kwargs, } pub_data = {} # Copy kwargs keys so we can iterate over and pop the pub data @@ -86,18 +89,18 @@ class ClientFuncsDict(collections.MutableMapping): # pull out pub_data if you have it for kwargs_key in kwargs_keys: - if kwargs_key.startswith('__pub_'): + if kwargs_key.startswith(u'__pub_'): pub_data[kwargs_key] = kwargs.pop(kwargs_key) - async_pub = self.client._gen_async_pub(pub_data.get('__pub_jid')) + async_pub = self.client._gen_async_pub(pub_data.get(u'__pub_jid')) user = salt.utils.get_specific_user() return self.client._proc_function( key, low, user, - async_pub['tag'], # TODO: fix - async_pub['jid'], # TODO: fix + async_pub[u'tag'], # TODO: fix + async_pub[u'jid'], # TODO: fix False, # Don't daemonize ) return wrapper @@ -128,14 +131,14 @@ class SyncClientMixin(object): Execute a function through the master network interface. ''' load = kwargs - load['cmd'] = self.client + load[u'cmd'] = self.client channel = salt.transport.Channel.factory(self.opts, - crypt='clear', - usage='master_call') + crypt=u'clear', + usage=u'master_call') ret = channel.send(load) if isinstance(ret, collections.Mapping): - if 'error' in ret: - salt.utils.error.raise_error(**ret['error']) + if u'error' in ret: + salt.utils.error.raise_error(**ret[u'error']) return ret def cmd_sync(self, low, timeout=None, full_return=False): @@ -154,19 +157,19 @@ class SyncClientMixin(object): 'eauth': 'pam', }) ''' - event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True) + event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=True) job = self.master_call(**low) - ret_tag = salt.utils.event.tagify('ret', base=job['tag']) + ret_tag = salt.utils.event.tagify(u'ret', base=job[u'tag']) if timeout is None: - timeout = self.opts.get('rest_timeout', 300) + timeout = self.opts.get(u'rest_timeout', 300) ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True) if ret is None: raise salt.exceptions.SaltClientTimeout( - "RunnerClient job '{0}' timed out".format(job['jid']), - jid=job['jid']) + u"RunnerClient job '{0}' timed out".format(job[u'jid']), + jid=job[u'jid']) - return ret if full_return else ret['data']['return'] + return ret if full_return else ret[u'data'][u'return'] def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False): ''' @@ -201,40 +204,40 @@ class SyncClientMixin(object): arg = tuple() if not isinstance(arg, list) and not isinstance(arg, tuple): raise salt.exceptions.SaltInvocationError( - 'arg must be formatted as a list/tuple' + u'arg must be formatted as a list/tuple' ) if pub_data is None: pub_data = {} if not isinstance(pub_data, dict): raise salt.exceptions.SaltInvocationError( - 'pub_data must be formatted as a dictionary' + u'pub_data must be formatted as a dictionary' ) if kwarg is None: kwarg = {} if not isinstance(kwarg, dict): raise salt.exceptions.SaltInvocationError( - 'kwarg must be formatted as a dictionary' + u'kwarg must be formatted as a dictionary' ) arglist = salt.utils.args.parse_input( arg, - no_parse=self.opts.get('no_parse', [])) + no_parse=self.opts.get(u'no_parse', [])) # if you were passed kwarg, add it to arglist if kwarg: - kwarg['__kwarg__'] = True + kwarg[u'__kwarg__'] = True arglist.append(kwarg) args, kwargs = salt.minion.load_args_and_kwargs( self.functions[fun], arglist, pub_data ) - low = {'fun': fun, - 'arg': args, - 'kwarg': kwargs} + low = {u'fun': fun, + u'arg': args, + u'kwarg': kwargs} return self.low(fun, low, print_event=print_event, full_return=full_return) @property def mminion(self): - if not hasattr(self, '_mminion'): + if not hasattr(self, u'_mminion'): self._mminion = salt.minion.MasterMinion(self.opts, states=False, rend=False) return self._mminion @@ -243,15 +246,15 @@ class SyncClientMixin(object): Check for deprecated usage and allow until Salt Oxygen. ''' msg = [] - if 'args' in low: - msg.append('call with arg instead') - low['arg'] = low.pop('args') - if 'kwargs' in low: - msg.append('call with kwarg instead') - low['kwarg'] = low.pop('kwargs') + if u'args' in low: + msg.append(u'call with arg instead') + low[u'arg'] = low.pop(u'args') + if u'kwargs' in low: + msg.append(u'call with kwarg instead') + low[u'kwarg'] = low.pop(u'kwargs') if msg: - salt.utils.warn_until('Oxygen', ' '.join(msg)) + salt.utils.versions.warn_until(u'Oxygen', u' '.join(msg)) return self._low(fun, low, print_event=print_event, full_return=full_return) @@ -265,13 +268,13 @@ class SyncClientMixin(object): class_name = self.__class__.__name__.lower() except AttributeError: log.warning( - 'Unable to determine class name', + u'Unable to determine class name', exc_info_on_loglevel=logging.DEBUG ) return True try: - return self.opts['{0}_returns'.format(class_name)] + return self.opts[u'{0}_returns'.format(class_name)] except KeyError: # No such option, assume this isn't one we care about gating and # just return True. @@ -294,24 +297,24 @@ class SyncClientMixin(object): # this is not to clutter the output with the module loading # if we have a high debug level. self.mminion # pylint: disable=W0104 - jid = low.get('__jid__', salt.utils.jid.gen_jid()) - tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix)) + jid = low.get(u'__jid__', salt.utils.jid.gen_jid()) + tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix)) - data = {'fun': '{0}.{1}'.format(self.client, fun), - 'jid': jid, - 'user': low.get('__user__', 'UNKNOWN'), + data = {u'fun': u'{0}.{1}'.format(self.client, fun), + u'jid': jid, + u'user': low.get(u'__user__', u'UNKNOWN'), } event = salt.utils.event.get_event( - 'master', - self.opts['sock_dir'], - self.opts['transport'], + u'master', + self.opts[u'sock_dir'], + self.opts[u'transport'], opts=self.opts, listen=False) if print_event: print_func = self.print_async_event \ - if hasattr(self, 'print_async_event') \ + if hasattr(self, u'print_async_event') \ else None else: # Suppress printing of return event (this keeps us from printing @@ -326,12 +329,12 @@ class SyncClientMixin(object): # TODO: document these, and test that they exist # TODO: Other things to inject?? - func_globals = {'__jid__': jid, - '__user__': data['user'], - '__tag__': tag, + func_globals = {u'__jid__': jid, + u'__user__': data[u'user'], + u'__tag__': tag, # weak ref to avoid the Exception in interpreter # teardown of event - '__jid_event__': weakref.proxy(namespaced_event), + u'__jid_event__': weakref.proxy(namespaced_event), } try: @@ -343,9 +346,9 @@ class SyncClientMixin(object): completed_funcs = [] for mod_name in six.iterkeys(self_functions): - if '.' not in mod_name: + if u'.' not in mod_name: continue - mod, _ = mod_name.split('.', 1) + mod, _ = mod_name.split(u'.', 1) if mod in completed_funcs: continue completed_funcs.append(mod) @@ -361,85 +364,95 @@ class SyncClientMixin(object): # we make the transition we will load "kwargs" using format_call if # there are no kwargs in the low object passed in f_call = None - if 'arg' not in low: + if u'arg' not in low: f_call = salt.utils.format_call( self.functions[fun], low, expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - args = f_call.get('args', ()) + args = f_call.get(u'args', ()) else: - args = low['arg'] + args = low[u'arg'] - if 'kwarg' not in low: + if u'kwarg' not in low: log.critical( - 'kwargs must be passed inside the low data within the ' - '\'kwarg\' key. See usage of ' - 'salt.utils.args.parse_input() and ' - 'salt.minion.load_args_and_kwargs() elsewhere in the ' - 'codebase.' + u'kwargs must be passed inside the low data within the ' + u'\'kwarg\' key. See usage of ' + u'salt.utils.args.parse_input() and ' + u'salt.minion.load_args_and_kwargs() elsewhere in the ' + u'codebase.' ) kwargs = {} else: - kwargs = low['kwarg'] + kwargs = low[u'kwarg'] # Update the event data with loaded args and kwargs - data['fun_args'] = list(args) + ([kwargs] if kwargs else []) - func_globals['__jid_event__'].fire_event(data, 'new') + data[u'fun_args'] = list(args) + ([kwargs] if kwargs else []) + func_globals[u'__jid_event__'].fire_event(data, u'new') # Initialize a context for executing the method. with tornado.stack_context.StackContext(self.functions.context_dict.clone): - data['return'] = self.functions[fun](*args, **kwargs) - data['success'] = True - if isinstance(data['return'], dict) and 'data' in data['return']: + data[u'return'] = self.functions[fun](*args, **kwargs) + data[u'success'] = True + if isinstance(data[u'return'], dict) and u'data' in data[u'return']: # some functions can return boolean values - data['success'] = salt.utils.check_state_result(data['return']['data']) + data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data']) except (Exception, SystemExit) as ex: if isinstance(ex, salt.exceptions.NotImplemented): - data['return'] = str(ex) + data[u'return'] = str(ex) else: - data['return'] = 'Exception occurred in {0} {1}: {2}'.format( + data[u'return'] = u'Exception occurred in {0} {1}: {2}'.format( self.client, fun, traceback.format_exc(), ) - data['success'] = False - - namespaced_event.fire_event(data, 'ret') + data[u'success'] = False if self.store_job: try: salt.utils.job.store_job( self.opts, { - 'id': self.opts['id'], - 'tgt': self.opts['id'], - 'jid': data['jid'], - 'return': data, + u'id': self.opts[u'id'], + u'tgt': self.opts[u'id'], + u'jid': data[u'jid'], + u'return': data, }, event=None, mminion=self.mminion, ) except salt.exceptions.SaltCacheError: - log.error('Could not store job cache info. ' - 'Job details for this run may be unavailable.') + log.error(u'Could not store job cache info. ' + u'Job details for this run may be unavailable.') + + # Outputters _can_ mutate data so write to the job cache first! + namespaced_event.fire_event(data, u'ret') # if we fired an event, make sure to delete the event object. # This will ensure that we call destroy, which will do the 0MQ linger - log.info('Runner completed: {0}'.format(data['jid'])) + log.info(u'Runner completed: %s', data[u'jid']) del event del namespaced_event - return data if full_return else data['return'] + return data if full_return else data[u'return'] def get_docs(self, arg=None): ''' Return a dictionary of functions and the inline documentation for each ''' if arg: - target_mod = arg + '.' if not arg.endswith('.') else arg - docs = [(fun, self.functions[fun].__doc__) - for fun in sorted(self.functions) - if fun == arg or fun.startswith(target_mod)] + if u'*' in arg: + target_mod = arg + _use_fnmatch = True + else: + target_mod = arg + u'.' if not arg.endswith(u'.') else arg + _use_fnmatch = False + if _use_fnmatch: + docs = [(fun, self.functions[fun].__doc__) + for fun in fnmatch.filter(self.functions, target_mod)] + else: + docs = [(fun, self.functions[fun].__doc__) + for fun in sorted(self.functions) + if fun == arg or fun.startswith(target_mod)] else: docs = [(fun, self.functions[fun].__doc__) for fun in sorted(self.functions)] @@ -459,7 +472,7 @@ class AsyncClientMixin(object): Run this method in a multiprocess target to execute the function in a multiprocess and fire the return data on the event bus ''' - if daemonize and not salt.utils.is_windows(): + if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() @@ -469,9 +482,9 @@ class AsyncClientMixin(object): salt.log.setup.setup_multiprocessing_logging() # pack a few things into low - low['__jid__'] = jid - low['__user__'] = user - low['__tag__'] = tag + low[u'__jid__'] = jid + low[u'__user__'] = user + low[u'__tag__'] = tag return self.low(fun, low, full_return=False) @@ -499,9 +512,9 @@ class AsyncClientMixin(object): if jid is None: jid = salt.utils.jid.gen_jid() tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix) - return {'tag': tag, 'jid': jid} + return {u'tag': tag, u'jid': jid} - def async(self, fun, low, user='UNKNOWN', pub=None): + def async(self, fun, low, user=u'UNKNOWN', pub=None): ''' Execute the function in a multiprocess and return the event tag to use to watch for the return @@ -510,7 +523,7 @@ class AsyncClientMixin(object): proc = salt.utils.process.SignalHandlingMultiprocessingProcess( target=self._proc_function, - args=(fun, low, user, async_pub['tag'], async_pub['jid'])) + args=(fun, low, user, async_pub[u'tag'], async_pub[u'jid'])) with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers @@ -526,29 +539,29 @@ class AsyncClientMixin(object): return # if we are "quiet", don't print - if self.opts.get('quiet', False): + if self.opts.get(u'quiet', False): return # some suffixes we don't want to print - if suffix in ('new',): + if suffix in (u'new',): return try: - outputter = self.opts.get('output', event.get('outputter', None) or event.get('return').get('outputter')) + outputter = self.opts.get(u'output', event.get(u'outputter', None) or event.get(u'return').get(u'outputter')) except AttributeError: outputter = None # if this is a ret, we have our own set of rules - if suffix == 'ret': + if suffix == u'ret': # Check if outputter was passed in the return data. If this is the case, # then the return data will be a dict two keys: 'data' and 'outputter' - if isinstance(event.get('return'), dict) \ - and set(event['return']) == set(('data', 'outputter')): - event_data = event['return']['data'] - outputter = event['return']['outputter'] + if isinstance(event.get(u'return'), dict) \ + and set(event[u'return']) == set((u'data', u'outputter')): + event_data = event[u'return'][u'data'] + outputter = event[u'return'][u'outputter'] else: - event_data = event['return'] + event_data = event[u'return'] else: - event_data = {'suffix': suffix, 'event': event} + event_data = {u'suffix': suffix, u'event': event} salt.output.display_output(event_data, outputter, self.opts) diff --git a/salt/client/netapi.py b/salt/client/netapi.py index 4c4967770c..a5e0719318 100644 --- a/salt/client/netapi.py +++ b/salt/client/netapi.py @@ -20,7 +20,7 @@ class NetapiClient(object): ''' def __init__(self, opts): self.opts = opts - self.process_manager = salt.utils.process.ProcessManager(name='NetAPIProcessManager') + self.process_manager = salt.utils.process.ProcessManager(name=u'NetAPIProcessManager') self.netapi = salt.loader.netapi(self.opts) def run(self): @@ -28,11 +28,11 @@ class NetapiClient(object): Load and start all available api modules ''' if not len(self.netapi): - log.error("Did not find any netapi configurations, nothing to start") + log.error(u"Did not find any netapi configurations, nothing to start") for fun in self.netapi: - if fun.endswith('.start'): - log.info('Starting {0} netapi module'.format(fun)) + if fun.endswith(u'.start'): + log.info(u'Starting %s netapi module', fun) self.process_manager.add_process(self.netapi[fun]) # Install the SIGINT/SIGTERM handlers if not done so far diff --git a/salt/client/raet/__init__.py b/salt/client/raet/__init__.py index efce87088d..36546efaf0 100644 --- a/salt/client/raet/__init__.py +++ b/salt/client/raet/__init__.py @@ -12,9 +12,9 @@ import logging # Import Salt libs import salt.config import salt.client -import salt.utils +import salt.utils.kinds as kinds +import salt.utils.versions import salt.syspaths as syspaths -from salt.utils import kinds try: from raet import raeting, nacling @@ -32,7 +32,7 @@ class LocalClient(salt.client.LocalClient): The RAET LocalClient ''' def __init__(self, - c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), + c_path=os.path.join(syspaths.CONFIG_DIR, u'master'), mopts=None): salt.client.LocalClient.__init__(self, c_path, mopts) @@ -41,22 +41,22 @@ class LocalClient(salt.client.LocalClient): tgt, fun, arg=(), - tgt_type='glob', - ret='', - jid='', + tgt_type=u'glob', + ret=u'', + jid=u'', timeout=5, **kwargs): ''' Publish the command! ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') payload_kwargs = self._prep_pub( tgt, @@ -68,21 +68,21 @@ class LocalClient(salt.client.LocalClient): timeout=timeout, **kwargs) - kind = self.opts['__role'] + kind = self.opts[u'__role'] if kind not in kinds.APPL_KINDS: - emsg = ("Invalid application kind = '{0}' for Raet LocalClient.".format(kind)) - log.error(emsg + "\n") + emsg = (u"Invalid application kind = '{0}' for Raet LocalClient.".format(kind)) + log.error(emsg + u"\n") raise ValueError(emsg) if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master], kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]: - lanename = 'master' + lanename = u'master' else: - emsg = ("Unsupported application kind '{0}' for Raet LocalClient.".format(kind)) - log.error(emsg + '\n') + emsg = (u"Unsupported application kind '{0}' for Raet LocalClient.".format(kind)) + log.error(emsg + u'\n') raise ValueError(emsg) - sockdirpath = self.opts['sock_dir'] - name = 'client' + nacling.uuid(size=18) + sockdirpath = self.opts[u'sock_dir'] + name = u'client' + nacling.uuid(size=18) stack = LaneStack( name=name, lanename=lanename, @@ -91,12 +91,12 @@ class LocalClient(salt.client.LocalClient): manor_yard = RemoteYard( stack=stack, lanename=lanename, - name='manor', + name=u'manor', dirpath=sockdirpath) stack.addRemote(manor_yard) - route = {'dst': (None, manor_yard.name, 'local_cmd'), - 'src': (None, stack.local.name, None)} - msg = {'route': route, 'load': payload_kwargs} + route = {u'dst': (None, manor_yard.name, u'local_cmd'), + u'src': (None, stack.local.name, None)} + msg = {u'route': route, u'load': payload_kwargs} stack.transmit(msg) stack.serviceAll() while True: @@ -104,9 +104,9 @@ class LocalClient(salt.client.LocalClient): stack.serviceAll() while stack.rxMsgs: msg, sender = stack.rxMsgs.popleft() - ret = msg.get('return', {}) - if 'ret' in ret: + ret = msg.get(u'return', {}) + if u'ret' in ret: stack.server.close() - return ret['ret'] + return ret[u'ret'] stack.server.close() return ret diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index df5795bbc2..de5f6a8ded 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -17,12 +17,11 @@ import os import re import sys import time -import yaml import uuid import tempfile import binascii import sys -import ntpath +import datetime # Import salt libs import salt.output @@ -43,14 +42,19 @@ import salt.utils.atomicfile import salt.utils.event import salt.utils.files import salt.utils.network +import salt.utils.path +import salt.utils.stringutils import salt.utils.thin import salt.utils.url import salt.utils.verify -from salt.utils import is_windows +from salt.utils.locales import sdecode +from salt.utils.platform import is_windows from salt.utils.process import MultiprocessingProcess +import salt.roster +from salt.template import compile_template # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin try: import saltwinshell @@ -64,7 +68,7 @@ except ImportError: HAS_ZMQ = False # The directory where salt thin is deployed -DEFAULT_THIN_DIR = '/var/tmp/.%%USER%%_%%FQDNUUID%%_salt' +DEFAULT_THIN_DIR = u'/var/tmp/.%%USER%%_%%FQDNUUID%%_salt' # RSTR is just a delimiter to distinguish the beginning of salt STDOUT # and STDERR. There is no special meaning. Messages prior to RSTR in @@ -79,11 +83,11 @@ DEFAULT_THIN_DIR = '/var/tmp/.%%USER%%_%%FQDNUUID%%_salt' # Failure in SHIM # RSTR in stderr, No RSTR in stdout: # Undefined behavior -RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878' +RSTR = u'_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878' # The regex to find RSTR in output - Must be on an output line by itself # NOTE - must use non-grouping match groups or output splitting will fail. -RSTR_RE = r'(?:^|\r?\n)' + RSTR + '(?:\r?\n|$)' +RSTR_RE = sdecode(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') # future lint: disable=non-unicode-string # METHODOLOGY: # @@ -128,8 +132,9 @@ RSTR_RE = r'(?:^|\r?\n)' + RSTR + '(?:\r?\n|$)' # to be able to define the string with indentation for readability but # still strip the white space for compactness and to avoid issues with # some multi-line embedded python code having indentation errors +# future lint: disable=non-unicode-string SSH_SH_SHIM = \ - '\n'.join( + u'\n'.join( [s.strip() for s in r'''/bin/sh << 'EOF' set -e set -u @@ -167,7 +172,7 @@ do "from __future__ import print_function; import sys; import os; - map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \ + map(sys.stdout.write, [u'{{{{0}}}}={{{{1}}}} ' \ .format(x, os.environ[x]) for x in [$ex_vars]])") exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \ MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \ @@ -189,13 +194,14 @@ echo "ERROR: Unable to locate appropriate python command" >&2 exit $EX_PYTHON_INVALID EOF'''.format( EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID, - ).split('\n')]) + ).split(u'\n')]) +# future lint: enable=non-unicode-string if not is_windows(): - shim_file = os.path.join(os.path.dirname(__file__), 'ssh_py_shim.py') + shim_file = os.path.join(os.path.dirname(__file__), u'ssh_py_shim.py') if not os.path.exists(shim_file): # On esky builds we only have the .pyc file - shim_file += "c" + shim_file += u'c' with salt.utils.files.fopen(shim_file) as ssh_py_shim: SSH_PY_SHIM = ssh_py_shim.read() @@ -206,13 +212,16 @@ class SSH(object): ''' Create an SSH execution system ''' + ROSTER_UPDATE_FLAG = '#__needs_update' + def __init__(self, opts): + self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True} pull_sock = os.path.join(opts['sock_dir'], 'master_event_pull.ipc') if os.path.isfile(pull_sock) and HAS_ZMQ: self.event = salt.utils.event.get_event( - 'master', - opts['sock_dir'], - opts['transport'], + u'master', + opts[u'sock_dir'], + opts[u'transport'], opts=opts, listen=False) else: @@ -220,146 +229,234 @@ class SSH(object): self.opts = opts if self.opts['regen_thin']: self.opts['ssh_wipe'] = True - if not salt.utils.which('ssh'): - raise salt.exceptions.SaltSystemExit('No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.') + if not salt.utils.path.which('ssh'): + raise salt.exceptions.SaltSystemExit('No ssh binary found in path -- ssh must be ' + 'installed for salt-ssh to run. Exiting.') self.opts['_ssh_version'] = ssh_version() self.tgt_type = self.opts['selected_target_option'] \ if self.opts['selected_target_option'] else 'glob' - self.roster = salt.roster.Roster(opts, opts.get('roster', 'flat')) + self._expand_target() + self.roster = salt.roster.Roster(self.opts, self.opts.get('roster', 'flat')) self.targets = self.roster.targets( - self.opts['tgt'], + self.opts[u'tgt'], self.tgt_type) + if not self.targets: + self._update_targets() # If we're in a wfunc, we need to get the ssh key location from the # top level opts, stored in __master_opts__ - if '__master_opts__' in self.opts: - if self.opts['__master_opts__'].get('ssh_use_home_key') and \ - os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')): - priv = os.path.expanduser('~/.ssh/id_rsa') + if u'__master_opts__' in self.opts: + if self.opts[u'__master_opts__'].get(u'ssh_use_home_key') and \ + os.path.isfile(os.path.expanduser(u'~/.ssh/id_rsa')): + priv = os.path.expanduser(u'~/.ssh/id_rsa') else: - priv = self.opts['__master_opts__'].get( - 'ssh_priv', + priv = self.opts[u'__master_opts__'].get( + u'ssh_priv', os.path.join( - self.opts['__master_opts__']['pki_dir'], - 'ssh', - 'salt-ssh.rsa' + self.opts[u'__master_opts__'][u'pki_dir'], + u'ssh', + u'salt-ssh.rsa' ) ) else: priv = self.opts.get( - 'ssh_priv', + u'ssh_priv', os.path.join( - self.opts['pki_dir'], - 'ssh', - 'salt-ssh.rsa' + self.opts[u'pki_dir'], + u'ssh', + u'salt-ssh.rsa' ) ) - if priv != 'agent-forwarding': + if priv != u'agent-forwarding': if not os.path.isfile(priv): try: salt.client.ssh.shell.gen_key(priv) except OSError: raise salt.exceptions.SaltClientError( - 'salt-ssh could not be run because it could not generate keys.\n\n' - 'You can probably resolve this by executing this script with ' - 'increased permissions via sudo or by running as root.\n' - 'You could also use the \'-c\' option to supply a configuration ' - 'directory that you have permissions to read and write to.' + u'salt-ssh could not be run because it could not generate keys.\n\n' + u'You can probably resolve this by executing this script with ' + u'increased permissions via sudo or by running as root.\n' + u'You could also use the \'-c\' option to supply a configuration ' + u'directory that you have permissions to read and write to.' ) self.defaults = { - 'user': self.opts.get( - 'ssh_user', - salt.config.DEFAULT_MASTER_OPTS['ssh_user'] + u'user': self.opts.get( + u'ssh_user', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_user'] ), - 'port': self.opts.get( - 'ssh_port', - salt.config.DEFAULT_MASTER_OPTS['ssh_port'] + u'port': self.opts.get( + u'ssh_port', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_port'] ), - 'passwd': self.opts.get( - 'ssh_passwd', - salt.config.DEFAULT_MASTER_OPTS['ssh_passwd'] + u'passwd': self.opts.get( + u'ssh_passwd', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_passwd'] ), - 'priv': priv, - 'timeout': self.opts.get( - 'ssh_timeout', - salt.config.DEFAULT_MASTER_OPTS['ssh_timeout'] + u'priv': priv, + u'timeout': self.opts.get( + u'ssh_timeout', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_timeout'] ) + self.opts.get( - 'timeout', - salt.config.DEFAULT_MASTER_OPTS['timeout'] + u'timeout', + salt.config.DEFAULT_MASTER_OPTS[u'timeout'] ), - 'sudo': self.opts.get( - 'ssh_sudo', - salt.config.DEFAULT_MASTER_OPTS['ssh_sudo'] + u'sudo': self.opts.get( + u'ssh_sudo', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_sudo'] ), - 'sudo_user': self.opts.get( - 'ssh_sudo_user', - salt.config.DEFAULT_MASTER_OPTS['ssh_sudo_user'] + u'sudo_user': self.opts.get( + u'ssh_sudo_user', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_sudo_user'] ), - 'identities_only': self.opts.get( - 'ssh_identities_only', - salt.config.DEFAULT_MASTER_OPTS['ssh_identities_only'] + u'identities_only': self.opts.get( + u'ssh_identities_only', + salt.config.DEFAULT_MASTER_OPTS[u'ssh_identities_only'] ), - 'remote_port_forwards': self.opts.get( - 'ssh_remote_port_forwards' + u'remote_port_forwards': self.opts.get( + u'ssh_remote_port_forwards' ), - 'ssh_options': self.opts.get( - 'ssh_options' + u'ssh_options': self.opts.get( + u'ssh_options' ) } - if self.opts.get('rand_thin_dir'): - self.defaults['thin_dir'] = os.path.join( - '/var/tmp', - '.{0}'.format(uuid.uuid4().hex[:6])) - self.opts['ssh_wipe'] = 'True' + if self.opts.get(u'rand_thin_dir'): + self.defaults[u'thin_dir'] = os.path.join( + u'/var/tmp', + u'.{0}'.format(uuid.uuid4().hex[:6])) + self.opts[u'ssh_wipe'] = u'True' self.serial = salt.payload.Serial(opts) self.returners = salt.loader.returners(self.opts, {}) self.fsclient = salt.fileclient.FSClient(self.opts) - self.thin = salt.utils.thin.gen_thin(self.opts['cachedir'], - extra_mods=self.opts.get('thin_extra_mods'), - overwrite=self.opts['regen_thin'], - python2_bin=self.opts['python2_bin'], - python3_bin=self.opts['python3_bin']) + self.thin = salt.utils.thin.gen_thin(self.opts[u'cachedir'], + extra_mods=self.opts.get(u'thin_extra_mods'), + overwrite=self.opts[u'regen_thin'], + python2_bin=self.opts[u'python2_bin'], + python3_bin=self.opts[u'python3_bin']) self.mods = mod_data(self.fsclient) + def _get_roster(self): + ''' + Read roster filename as a key to the data. + :return: + ''' + roster_file = salt.roster.get_roster_file(self.opts) + if roster_file not in self.__parsed_rosters: + roster_data = compile_template(roster_file, salt.loader.render(self.opts, {}), + self.opts['renderer'], self.opts['renderer_blacklist'], + self.opts['renderer_whitelist']) + self.__parsed_rosters[roster_file] = roster_data + return roster_file + + def _expand_target(self): + ''' + Figures out if the target is a reachable host without wildcards, expands if any. + :return: + ''' + # TODO: Support -L + target = self.opts['tgt'] + if isinstance(target, list): + return + + hostname = self.opts['tgt'].split('@')[-1] + needs_expansion = '*' not in hostname and salt.utils.network.is_reachable_host(hostname) + if needs_expansion: + hostname = salt.utils.network.ip_to_host(hostname) + self._get_roster() + for roster_filename in self.__parsed_rosters: + roster_data = self.__parsed_rosters[roster_filename] + if not isinstance(roster_data, bool): + for host_id in roster_data: + if hostname in [host_id, roster_data.get('host')]: + if hostname != self.opts['tgt']: + self.opts['tgt'] = hostname + self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False + return + + def _update_roster(self): + ''' + Update default flat roster with the passed in information. + :return: + ''' + roster_file = self._get_roster() + if os.access(roster_file, os.W_OK): + if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]: + with salt.utils.files.fopen(roster_file, 'a') as roster_fp: + roster_fp.write('# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: ' + '{hostname}\n user: {user}' + '\n passwd: {passwd}\n'.format(s_user=getpass.getuser(), + s_time=datetime.datetime.utcnow().isoformat(), + hostname=self.opts.get('tgt', ''), + user=self.opts.get('ssh_user', ''), + passwd=self.opts.get('ssh_passwd', ''))) + log.info('The host {0} has been added to the roster {1}'.format(self.opts.get('tgt', ''), + roster_file)) + else: + log.error('Unable to update roster {0}: access denied'.format(roster_file)) + + def _update_targets(self): + ''' + Uptade targets in case hostname was directly passed without the roster. + :return: + ''' + + hostname = self.opts.get('tgt', '') + if '@' in hostname: + user, hostname = hostname.split('@', 1) + else: + user = self.opts.get('ssh_user') + if hostname == '*': + hostname = '' + + if salt.utils.network.is_reachable_host(hostname): + hostname = salt.utils.network.ip_to_host(hostname) + self.opts['tgt'] = hostname + self.targets[hostname] = { + 'passwd': self.opts.get('ssh_passwd', ''), + 'host': hostname, + 'user': user, + } + if not self.opts.get('ssh_skip_roster'): + self._update_roster() + def get_pubkey(self): ''' Return the key string for the SSH public key ''' - if '__master_opts__' in self.opts and \ - self.opts['__master_opts__'].get('ssh_use_home_key') and \ - os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')): - priv = os.path.expanduser('~/.ssh/id_rsa') + if u'__master_opts__' in self.opts and \ + self.opts[u'__master_opts__'].get(u'ssh_use_home_key') and \ + os.path.isfile(os.path.expanduser(u'~/.ssh/id_rsa')): + priv = os.path.expanduser(u'~/.ssh/id_rsa') else: priv = self.opts.get( - 'ssh_priv', + u'ssh_priv', os.path.join( - self.opts['pki_dir'], - 'ssh', - 'salt-ssh.rsa' + self.opts[u'pki_dir'], + u'ssh', + u'salt-ssh.rsa' ) ) - pub = '{0}.pub'.format(priv) - with salt.utils.files.fopen(pub, 'r') as fp_: - return '{0} rsa root@master'.format(fp_.read().split()[1]) + pub = u'{0}.pub'.format(priv) + with salt.utils.files.fopen(pub, u'r') as fp_: + return u'{0} rsa root@master'.format(fp_.read().split()[1]) def key_deploy(self, host, ret): ''' Deploy the SSH key if the minions don't auth ''' - if not isinstance(ret[host], dict) or self.opts.get('ssh_key_deploy'): + if not isinstance(ret[host], dict) or self.opts.get(u'ssh_key_deploy'): target = self.targets[host] - if target.get('passwd', False) or self.opts['ssh_passwd']: + if target.get(u'passwd', False) or self.opts[u'ssh_passwd']: self._key_deploy_run(host, target, False) return ret - if ret[host].get('stderr', '').count('Permission denied'): + if ret[host].get(u'stderr', u'').count(u'Permission denied'): target = self.targets[host] # permission denied, attempt to auto deploy ssh key - print(('Permission denied for host {0}, do you want to deploy ' - 'the salt-ssh key? (password required):').format(host)) - deploy = input('[Y/n] ') - if deploy.startswith(('n', 'N')): + print((u'Permission denied for host {0}, do you want to deploy ' + u'the salt-ssh key? (password required):').format(host)) + deploy = input(u'[Y/n] ') + if deploy.startswith((u'n', u'N')): return ret - target['passwd'] = getpass.getpass( - 'Password for {0}@{1}: '.format(target['user'], host) + target[u'passwd'] = getpass.getpass( + u'Password for {0}@{1}: '.format(target[u'user'], host) ) return self._key_deploy_run(host, target, True) return ret @@ -369,8 +466,8 @@ class SSH(object): The ssh-copy-id routine ''' argv = [ - 'ssh.set_auth_key', - target.get('user', 'root'), + u'ssh.set_auth_key', + target.get(u'user', u'root'), self.get_pubkey(), ] @@ -382,16 +479,16 @@ class SSH(object): fsclient=self.fsclient, thin=self.thin, **target) - if salt.utils.which('ssh-copy-id'): + if salt.utils.path.which(u'ssh-copy-id'): # we have ssh-copy-id, use it! stdout, stderr, retcode = single.shell.copy_id() else: stdout, stderr, retcode = single.run() if re_run: - target.pop('passwd') + target.pop(u'passwd') single = Single( self.opts, - self.opts['argv'], + self.opts[u'argv'], host, mods=self.mods, fsclient=self.fsclient, @@ -400,11 +497,11 @@ class SSH(object): stdout, stderr, retcode = single.cmd_block() try: data = salt.utils.find_json(stdout) - return {host: data.get('local', data)} + return {host: data.get(u'local', data)} except Exception: if stderr: return {host: stderr} - return {host: 'Bad Return'} + return {host: u'Bad Return'} if salt.defaults.exitcodes.EX_OK != retcode: return {host: stderr} return {host: stdout} @@ -416,31 +513,31 @@ class SSH(object): opts = copy.deepcopy(opts) single = Single( opts, - opts['argv'], + opts[u'argv'], host, mods=self.mods, fsclient=self.fsclient, thin=self.thin, mine=mine, **target) - ret = {'id': single.id} + ret = {u'id': single.id} stdout, stderr, retcode = single.run() # This job is done, yield try: data = salt.utils.find_json(stdout) - if len(data) < 2 and 'local' in data: - ret['ret'] = data['local'] + if len(data) < 2 and u'local' in data: + ret[u'ret'] = data[u'local'] else: - ret['ret'] = { - 'stdout': stdout, - 'stderr': stderr, - 'retcode': retcode, + ret[u'ret'] = { + u'stdout': stdout, + u'stderr': stderr, + u'retcode': retcode, } except Exception: - ret['ret'] = { - 'stdout': stdout, - 'stderr': stderr, - 'retcode': retcode, + ret[u'ret'] = { + u'stdout': stdout, + u'stderr': stderr, + u'retcode': retcode, } que.put(ret) @@ -457,9 +554,9 @@ class SSH(object): init = False while True: if not self.targets: - log.error('No matching targets found in roster.') + log.error(u'No matching targets found in roster.') break - if len(running) < self.opts.get('ssh_max_procs', 25) and not init: + if len(running) < self.opts.get(u'ssh_max_procs', 25) and not init: try: host = next(target_iter) except StopIteration: @@ -468,6 +565,8 @@ class SSH(object): for default in self.defaults: if default not in self.targets[host]: self.targets[host][default] = self.defaults[default] + if 'host' not in self.targets[host]: + self.targets[host]['host'] = host args = ( que, self.opts, @@ -479,14 +578,14 @@ class SSH(object): target=self.handle_routine, args=args) routine.start() - running[host] = {'thread': routine} + running[host] = {u'thread': routine} continue ret = {} try: ret = que.get(False) - if 'id' in ret: - returned.add(ret['id']) - yield {ret['id']: ret['ret']} + if u'id' in ret: + returned.add(ret[u'id']) + yield {ret[u'id']: ret[u'ret']} except Exception: # This bare exception is here to catch spurious exceptions # thrown by que.get during healthy operation. Please do not @@ -494,27 +593,27 @@ class SSH(object): # control program flow. pass for host in running: - if not running[host]['thread'].is_alive(): + if not running[host][u'thread'].is_alive(): if host not in returned: # Try to get any returns that came through since we # last checked try: while True: ret = que.get(False) - if 'id' in ret: - returned.add(ret['id']) - yield {ret['id']: ret['ret']} + if u'id' in ret: + returned.add(ret[u'id']) + yield {ret[u'id']: ret[u'ret']} except Exception: pass if host not in returned: - error = ('Target \'{0}\' did not return any data, ' - 'probably due to an error.').format(host) - ret = {'id': host, - 'ret': error} + error = (u'Target \'{0}\' did not return any data, ' + u'probably due to an error.').format(host) + ret = {u'id': host, + u'ret': error} log.error(error) - yield {ret['id']: ret['ret']} - running[host]['thread'].join() + yield {ret[u'id']: ret[u'ret']} + running[host][u'thread'].join() rets.add(host) for host in rets: if host in running: @@ -522,7 +621,7 @@ class SSH(object): if len(rets) >= len(self.targets): break # Sleep when limit or all threads started - if len(running) >= self.opts.get('ssh_max_procs', 25) or len(self.targets) >= len(running): + if len(running) >= self.opts.get(u'ssh_max_procs', 25) or len(self.targets) >= len(running): time.sleep(0.1) def run_iter(self, mine=False, jid=None): @@ -534,33 +633,33 @@ class SSH(object): pillar, or master config (they will be checked in that order) and will modify the argv with the arguments from mine_functions ''' - fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) - jid = self.returners[fstr](passed_jid=jid or self.opts.get('jid', None)) + fstr = u'{0}.prep_jid'.format(self.opts[u'master_job_cache']) + jid = self.returners[fstr](passed_jid=jid or self.opts.get(u'jid', None)) # Save the invocation information - argv = self.opts['argv'] + argv = self.opts[u'argv'] - if self.opts.get('raw_shell', False): - fun = 'ssh._raw' + if self.opts.get(u'raw_shell', False): + fun = u'ssh._raw' args = argv else: - fun = argv[0] if argv else '' + fun = argv[0] if argv else u'' args = argv[1:] job_load = { - 'jid': jid, - 'tgt_type': self.tgt_type, - 'tgt': self.opts['tgt'], - 'user': self.opts['user'], - 'fun': fun, - 'arg': args, + u'jid': jid, + u'tgt_type': self.tgt_type, + u'tgt': self.opts[u'tgt'], + u'user': self.opts[u'user'], + u'fun': fun, + u'arg': args, } # save load to the master job cache - if self.opts['master_job_cache'] == 'local_cache': - self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load, minions=self.targets.keys()) + if self.opts[u'master_job_cache'] == u'local_cache': + self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load, minions=self.targets.keys()) else: - self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load) + self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load) for ret in self.handle_ssh(mine=mine): host = next(six.iterkeys(ret)) @@ -569,69 +668,85 @@ class SSH(object): self.event.fire_event( ret, salt.utils.event.tagify( - [jid, 'ret', host], - 'job')) + [jid, u'ret', host], + u'job')) yield ret def cache_job(self, jid, id_, ret, fun): ''' Cache the job information ''' - self.returners['{0}.returner'.format(self.opts['master_job_cache'])]({'jid': jid, - 'id': id_, - 'return': ret, - 'fun': fun}) + self.returners[u'{0}.returner'.format(self.opts[u'master_job_cache'])]({u'jid': jid, + u'id': id_, + u'return': ret, + u'fun': fun}) def run(self, jid=None): ''' Execute the overall routine, print results via outputters ''' + if self.opts['list_hosts']: + self._get_roster() + ret = {} + for roster_file in self.__parsed_rosters: + if roster_file.startswith('#'): + continue + ret[roster_file] = {} + for host_id in self.__parsed_rosters[roster_file]: + hostname = self.__parsed_rosters[roster_file][host_id]['host'] + ret[roster_file][host_id] = hostname + salt.output.display_output(ret, 'nested', self.opts) + sys.exit() + fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) jid = self.returners[fstr](passed_jid=jid or self.opts.get('jid', None)) # Save the invocation information - argv = self.opts['argv'] + argv = self.opts[u'argv'] - if self.opts.get('raw_shell', False): - fun = 'ssh._raw' + if self.opts.get(u'raw_shell', False): + fun = u'ssh._raw' args = argv else: - fun = argv[0] if argv else '' + fun = argv[0] if argv else u'' args = argv[1:] job_load = { - 'jid': jid, - 'tgt_type': self.tgt_type, - 'tgt': self.opts['tgt'], - 'user': self.opts['user'], - 'fun': fun, - 'arg': args, + u'jid': jid, + u'tgt_type': self.tgt_type, + u'tgt': self.opts[u'tgt'], + u'user': self.opts[u'user'], + u'fun': fun, + u'arg': args, } # save load to the master job cache try: if isinstance(jid, bytes): - jid = jid.decode('utf-8') - if self.opts['master_job_cache'] == 'local_cache': - self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load, minions=self.targets.keys()) + jid = jid.decode(u'utf-8') + if self.opts[u'master_job_cache'] == u'local_cache': + self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load, minions=self.targets.keys()) else: - self.returners['{0}.save_load'.format(self.opts['master_job_cache'])](jid, job_load) + self.returners[u'{0}.save_load'.format(self.opts[u'master_job_cache'])](jid, job_load) except Exception as exc: log.exception(exc) - log.error('Could not save load with returner {0}: {1}'.format(self.opts['master_job_cache'], exc)) + log.error( + u'Could not save load with returner %s: %s', + self.opts[u'master_job_cache'], exc + ) - if self.opts.get('verbose'): - msg = 'Executing job with jid {0}'.format(jid) + if self.opts.get(u'verbose'): + msg = u'Executing job with jid {0}'.format(jid) print(msg) - print('-' * len(msg) + '\n') - print('') + print(u'-' * len(msg) + u'\n') + print(u'') sret = {} - outputter = self.opts.get('output', 'nested') + outputter = self.opts.get(u'output', u'nested') final_exit = 0 for ret in self.handle_ssh(): host = next(six.iterkeys(ret)) if isinstance(ret[host], dict): - host_ret = ret[host].get('retcode', 0) + host_ret = ret[host].get(u'retcode', 0) if host_ret != 0: final_exit = 1 else: @@ -641,17 +756,17 @@ class SSH(object): self.cache_job(jid, host, ret[host], fun) ret = self.key_deploy(host, ret) - if isinstance(ret[host], dict) and ret[host].get('stderr', '').startswith('ssh:'): - ret[host] = ret[host]['stderr'] + if isinstance(ret[host], dict) and ret[host].get(u'stderr', u'').startswith(u'ssh:'): + ret[host] = ret[host][u'stderr'] if not isinstance(ret[host], dict): p_data = {host: ret[host]} - elif 'return' not in ret[host]: + elif u'return' not in ret[host]: p_data = ret else: - outputter = ret[host].get('out', self.opts.get('output', 'nested')) - p_data = {host: ret[host].get('return', {})} - if self.opts.get('static'): + outputter = ret[host].get(u'out', self.opts.get(u'output', u'nested')) + p_data = {host: ret[host].get(u'return', {})} + if self.opts.get(u'static'): sret.update(p_data) else: salt.output.display_output( @@ -662,9 +777,9 @@ class SSH(object): self.event.fire_event( ret, salt.utils.event.tagify( - [jid, 'ret', host], - 'job')) - if self.opts.get('static'): + [jid, u'ret', host], + u'job')) + if self.opts.get(u'static'): salt.output.display_output( sret, outputter, @@ -707,35 +822,35 @@ class Single(object): **kwargs): # Get mine setting and mine_functions if defined in kwargs (from roster) self.mine = mine - self.mine_functions = kwargs.get('mine_functions') - self.cmd_umask = kwargs.get('cmd_umask', None) + self.mine_functions = kwargs.get(u'mine_functions') + self.cmd_umask = kwargs.get(u'cmd_umask', None) self.winrm = winrm self.opts = opts self.tty = tty - if kwargs.get('wipe'): - self.wipe = 'False' + if kwargs.get(u'wipe'): + self.wipe = u'False' else: - self.wipe = 'True' if self.opts.get('ssh_wipe') else 'False' - if kwargs.get('thin_dir'): - self.thin_dir = kwargs['thin_dir'] + self.wipe = u'True' if self.opts.get(u'ssh_wipe') else u'False' + if kwargs.get(u'thin_dir'): + self.thin_dir = kwargs[u'thin_dir'] elif self.winrm: saltwinshell.set_winvars(self) else: if user: - thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user) + thin_dir = DEFAULT_THIN_DIR.replace(u'%%USER%%', user) else: - thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', 'root') + thin_dir = DEFAULT_THIN_DIR.replace(u'%%USER%%', u'root') self.thin_dir = thin_dir.replace( - '%%FQDNUUID%%', + u'%%FQDNUUID%%', uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[:6] ) - self.opts['thin_dir'] = self.thin_dir + self.opts[u'thin_dir'] = self.thin_dir self.fsclient = fsclient - self.context = {'master_opts': self.opts, - 'fileclient': self.fsclient} + self.context = {u'master_opts': self.opts, + u'fileclient': self.fsclient} if isinstance(argv, six.string_types): self.argv = [argv] @@ -746,34 +861,34 @@ class Single(object): self.id = id_ self.mods = mods if isinstance(mods, dict) else {} - args = {'host': host, - 'user': user, - 'port': port, - 'passwd': passwd, - 'priv': priv, - 'timeout': timeout, - 'sudo': sudo, - 'tty': tty, - 'mods': self.mods, - 'identities_only': identities_only, - 'sudo_user': sudo_user, - 'remote_port_forwards': remote_port_forwards, - 'winrm': winrm, - 'ssh_options': ssh_options} + args = {u'host': host, + u'user': user, + u'port': port, + u'passwd': passwd, + u'priv': priv, + u'timeout': timeout, + u'sudo': sudo, + u'tty': tty, + u'mods': self.mods, + u'identities_only': identities_only, + u'sudo_user': sudo_user, + u'remote_port_forwards': remote_port_forwards, + u'winrm': winrm, + u'ssh_options': ssh_options} # Pre apply changeable defaults self.minion_opts = { - 'grains_cache': True, + u'grains_cache': True, } - self.minion_opts.update(opts.get('ssh_minion_opts', {})) + self.minion_opts.update(opts.get(u'ssh_minion_opts', {})) if minion_opts is not None: self.minion_opts.update(minion_opts) # Post apply system needed defaults self.minion_opts.update({ - 'root_dir': os.path.join(self.thin_dir, 'running_data'), - 'id': self.id, - 'sock_dir': '/', - 'log_file': 'salt-call.log', - 'fileserver_list_cache_time': 3, + u'root_dir': os.path.join(self.thin_dir, u'running_data'), + u'id': self.id, + u'sock_dir': u'/', + u'log_file': u'salt-call.log', + u'fileserver_list_cache_time': 3, }) self.minion_config = salt.serializers.yaml.serialize(self.minion_opts) self.target = kwargs @@ -781,17 +896,17 @@ class Single(object): self.serial = salt.payload.Serial(opts) self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context) self.shell = salt.client.ssh.shell.gen_shell(opts, **args) - self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir']) + self.thin = thin if thin else salt.utils.thin.thin_path(opts[u'cachedir']) def __arg_comps(self): ''' Return the function name and the arg list ''' - fun = self.argv[0] if self.argv else '' + fun = self.argv[0] if self.argv else u'' parsed = salt.utils.args.parse_input( self.argv[1:], condition=False, - no_parse=self.opts.get('no_parse', [])) + no_parse=self.opts.get(u'no_parse', [])) args = parsed[0] kws = parsed[1] return fun, args, kws @@ -806,7 +921,7 @@ class Single(object): ''' if self.winrm: return arg - return ''.join(['\\' + char if re.match(r'\W', char) else char for char in arg]) + return u''.join([u'\\' + char if re.match(sdecode(r'\W'), char) else char for char in arg]) # future lint: disable=non-unicode-string def deploy(self): ''' @@ -814,7 +929,7 @@ class Single(object): ''' self.shell.send( self.thin, - os.path.join(self.thin_dir, 'salt-thin.tgz'), + os.path.join(self.thin_dir, u'salt-thin.tgz'), ) self.deploy_ext() return True @@ -823,10 +938,10 @@ class Single(object): ''' Deploy the ext_mods tarball ''' - if self.mods.get('file'): + if self.mods.get(u'file'): self.shell.send( - self.mods['file'], - os.path.join(self.thin_dir, 'salt-ext_mods.tgz'), + self.mods[u'file'], + os.path.join(self.thin_dir, u'salt-ext_mods.tgz'), ) return True @@ -844,8 +959,8 @@ class Single(object): ''' stdout = stderr = retcode = None - if self.opts.get('raw_shell', False): - cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv]) + if self.opts.get(u'raw_shell', False): + cmd_str = u' '.join([self._escape_arg(arg) for arg in self.argv]) stdout, stderr, retcode = self.shell.exec_cmd(cmd_str) elif self.fun in self.wfuncs or self.mine: @@ -866,24 +981,24 @@ class Single(object): # Execute routine data_cache = False data = None - cdir = os.path.join(self.opts['cachedir'], 'minions', self.id) + cdir = os.path.join(self.opts[u'cachedir'], u'minions', self.id) if not os.path.isdir(cdir): os.makedirs(cdir) - datap = os.path.join(cdir, 'ssh_data.p') + datap = os.path.join(cdir, u'ssh_data.p') refresh = False if not os.path.isfile(datap): refresh = True else: passed_time = (time.time() - os.stat(datap).st_mtime) / 60 - if passed_time > self.opts.get('cache_life', 60): + if passed_time > self.opts.get(u'cache_life', 60): refresh = True - if self.opts.get('refresh_cache'): + if self.opts.get(u'refresh_cache'): refresh = True conf_grains = {} # Save conf file grains before they get clobbered - if 'ssh_grains' in self.opts: - conf_grains = self.opts['ssh_grains'] + if u'ssh_grains' in self.opts: + conf_grains = self.opts[u'ssh_grains'] if not data_cache: refresh = True if refresh: @@ -895,72 +1010,72 @@ class Single(object): fsclient=self.fsclient, minion_opts=self.minion_opts, **self.target) - opts_pkg = pre_wrapper['test.opts_pkg']() # pylint: disable=E1102 - if '_error' in opts_pkg: + opts_pkg = pre_wrapper[u'test.opts_pkg']() # pylint: disable=E1102 + if u'_error' in opts_pkg: #Refresh failed - retcode = opts_pkg['retcode'] - ret = json.dumps({'local': opts_pkg}) + retcode = opts_pkg[u'retcode'] + ret = json.dumps({u'local': opts_pkg}) return ret, retcode - opts_pkg['file_roots'] = self.opts['file_roots'] - opts_pkg['pillar_roots'] = self.opts['pillar_roots'] - opts_pkg['ext_pillar'] = self.opts['ext_pillar'] - opts_pkg['extension_modules'] = self.opts['extension_modules'] - opts_pkg['_ssh_version'] = self.opts['_ssh_version'] - opts_pkg['__master_opts__'] = self.context['master_opts'] - if '_caller_cachedir' in self.opts: - opts_pkg['_caller_cachedir'] = self.opts['_caller_cachedir'] + opts_pkg[u'file_roots'] = self.opts[u'file_roots'] + opts_pkg[u'pillar_roots'] = self.opts[u'pillar_roots'] + opts_pkg[u'ext_pillar'] = self.opts[u'ext_pillar'] + opts_pkg[u'extension_modules'] = self.opts[u'extension_modules'] + opts_pkg[u'_ssh_version'] = self.opts[u'_ssh_version'] + opts_pkg[u'__master_opts__'] = self.context[u'master_opts'] + if u'_caller_cachedir' in self.opts: + opts_pkg[u'_caller_cachedir'] = self.opts[u'_caller_cachedir'] else: - opts_pkg['_caller_cachedir'] = self.opts['cachedir'] + opts_pkg[u'_caller_cachedir'] = self.opts[u'cachedir'] # Use the ID defined in the roster file - opts_pkg['id'] = self.id + opts_pkg[u'id'] = self.id retcode = 0 # Restore master grains for grain in conf_grains: - opts_pkg['grains'][grain] = conf_grains[grain] + opts_pkg[u'grains'][grain] = conf_grains[grain] # Enable roster grains support - if 'grains' in self.target: - for grain in self.target['grains']: - opts_pkg['grains'][grain] = self.target['grains'][grain] + if u'grains' in self.target: + for grain in self.target[u'grains']: + opts_pkg[u'grains'][grain] = self.target[u'grains'][grain] popts = {} - popts.update(opts_pkg['__master_opts__']) + popts.update(opts_pkg[u'__master_opts__']) popts.update(opts_pkg) pillar = salt.pillar.Pillar( popts, - opts_pkg['grains'], - opts_pkg['id'], - opts_pkg.get('environment', 'base') + opts_pkg[u'grains'], + opts_pkg[u'id'], + opts_pkg.get(u'environment', u'base') ) pillar_dirs = {} pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs) # TODO: cache minion opts in datap in master.py - data = {'opts': opts_pkg, - 'grains': opts_pkg['grains'], - 'pillar': pillar_data} + data = {u'opts': opts_pkg, + u'grains': opts_pkg[u'grains'], + u'pillar': pillar_data} if data_cache: - with salt.utils.files.fopen(datap, 'w+b') as fp_: + with salt.utils.files.fopen(datap, u'w+b') as fp_: fp_.write( self.serial.dumps(data) ) if not data and data_cache: - with salt.utils.files.fopen(datap, 'rb') as fp_: + with salt.utils.files.fopen(datap, u'rb') as fp_: data = self.serial.load(fp_) - opts = data.get('opts', {}) - opts['grains'] = data.get('grains') + opts = data.get(u'opts', {}) + opts[u'grains'] = data.get(u'grains') # Restore master grains for grain in conf_grains: - opts['grains'][grain] = conf_grains[grain] + opts[u'grains'][grain] = conf_grains[grain] # Enable roster grains support - if 'grains' in self.target: - for grain in self.target['grains']: - opts['grains'][grain] = self.target['grains'][grain] + if u'grains' in self.target: + for grain in self.target[u'grains']: + opts[u'grains'][grain] = self.target[u'grains'][grain] - opts['pillar'] = data.get('pillar') + opts[u'pillar'] = data.get(u'pillar') wrapper = salt.client.ssh.wrapper.FunctionWrapper( opts, self.id, @@ -979,18 +1094,18 @@ class Single(object): if self.mine_functions and self.fun in self.mine_functions: mine_fun_data = self.mine_functions[self.fun] - elif opts['pillar'] and self.fun in opts['pillar'].get('mine_functions', {}): - mine_fun_data = opts['pillar']['mine_functions'][self.fun] - elif self.fun in self.context['master_opts'].get('mine_functions', {}): - mine_fun_data = self.context['master_opts']['mine_functions'][self.fun] + elif opts[u'pillar'] and self.fun in opts[u'pillar'].get(u'mine_functions', {}): + mine_fun_data = opts[u'pillar'][u'mine_functions'][self.fun] + elif self.fun in self.context[u'master_opts'].get(u'mine_functions', {}): + mine_fun_data = self.context[u'master_opts'][u'mine_functions'][self.fun] if isinstance(mine_fun_data, dict): - mine_fun = mine_fun_data.pop('mine_function', mine_fun) + mine_fun = mine_fun_data.pop(u'mine_function', mine_fun) mine_args = mine_fun_data elif isinstance(mine_fun_data, list): for item in mine_fun_data[:]: - if isinstance(item, dict) and 'mine_function' in item: - mine_fun = item['mine_function'] + if isinstance(item, dict) and u'mine_function' in item: + mine_fun = item[u'mine_function'] mine_fun_data.pop(mine_fun_data.index(item)) mine_args = mine_fun_data else: @@ -1010,49 +1125,49 @@ class Single(object): else: result = self.wfuncs[self.fun](*self.args, **self.kwargs) except TypeError as exc: - result = 'TypeError encountered executing {0}: {1}'.format(self.fun, exc) + result = u'TypeError encountered executing {0}: {1}'.format(self.fun, exc) log.error(result, exc_info_on_loglevel=logging.DEBUG) retcode = 1 except Exception as exc: - result = 'An Exception occurred while executing {0}: {1}'.format(self.fun, exc) + result = u'An Exception occurred while executing {0}: {1}'.format(self.fun, exc) log.error(result, exc_info_on_loglevel=logging.DEBUG) retcode = 1 # Mimic the json data-structure that "salt-call --local" will # emit (as seen in ssh_py_shim.py) - if isinstance(result, dict) and 'local' in result: - ret = json.dumps({'local': result['local']}) + if isinstance(result, dict) and u'local' in result: + ret = json.dumps({u'local': result[u'local']}) else: - ret = json.dumps({'local': {'return': result}}) + ret = json.dumps({u'local': {u'return': result}}) return ret, retcode def _cmd_str(self): ''' Prepare the command string ''' - sudo = 'sudo' if self.target['sudo'] else '' - sudo_user = self.target['sudo_user'] - if '_caller_cachedir' in self.opts: - cachedir = self.opts['_caller_cachedir'] + sudo = u'sudo' if self.target[u'sudo'] else u'' + sudo_user = self.target[u'sudo_user'] + if u'_caller_cachedir' in self.opts: + cachedir = self.opts[u'_caller_cachedir'] else: - cachedir = self.opts['cachedir'] - thin_sum = salt.utils.thin.thin_sum(cachedir, 'sha1') - debug = '' - if not self.opts.get('log_level'): - self.opts['log_level'] = 'info' - if salt.log.LOG_LEVELS['debug'] >= salt.log.LOG_LEVELS[self.opts.get('log_level', 'info')]: - debug = '1' - arg_str = ''' + cachedir = self.opts[u'cachedir'] + thin_sum = salt.utils.thin.thin_sum(cachedir, u'sha1') + debug = u'' + if not self.opts.get(u'log_level'): + self.opts[u'log_level'] = u'info' + if salt.log.LOG_LEVELS[u'debug'] >= salt.log.LOG_LEVELS[self.opts.get(u'log_level', u'info')]: + debug = u'1' + arg_str = u''' OPTIONS = OBJ() OPTIONS.config = \ """ {0} """ -OPTIONS.delimiter = '{1}' -OPTIONS.saltdir = '{2}' -OPTIONS.checksum = '{3}' -OPTIONS.hashfunc = '{4}' -OPTIONS.version = '{5}' -OPTIONS.ext_mods = '{6}' +OPTIONS.delimiter = u'{1}' +OPTIONS.saltdir = u'{2}' +OPTIONS.checksum = u'{3}' +OPTIONS.hashfunc = u'{4}' +OPTIONS.version = u'{5}' +OPTIONS.ext_mods = u'{6}' OPTIONS.wipe = {7} OPTIONS.tty = {8} OPTIONS.cmd_umask = {9} @@ -1060,18 +1175,18 @@ ARGS = {10}\n'''.format(self.minion_config, RSTR, self.thin_dir, thin_sum, - 'sha1', + u'sha1', salt.version.__version__, - self.mods.get('version', ''), + self.mods.get(u'version', u''), self.wipe, self.tty, self.cmd_umask, self.argv) - py_code = SSH_PY_SHIM.replace('#%%OPTS', arg_str) + py_code = SSH_PY_SHIM.replace(u'#%%OPTS', arg_str) if six.PY2: - py_code_enc = py_code.encode('base64') + py_code_enc = py_code.encode(u'base64') else: - py_code_enc = base64.encodebytes(py_code.encode('utf-8')).decode('utf-8') + py_code_enc = base64.encodebytes(py_code.encode(u'utf-8')).decode(u'utf-8') if not self.winrm: cmd = SSH_SH_SHIM.format( DEBUG=debug, @@ -1085,7 +1200,7 @@ ARGS = {10}\n'''.format(self.minion_config, return cmd - def shim_cmd(self, cmd_str, extension='py'): + def shim_cmd(self, cmd_str, extension=u'py'): ''' Run a shim command. @@ -1096,13 +1211,13 @@ ARGS = {10}\n'''.format(self.minion_config, return self.shell.exec_cmd(cmd_str) # Write the shim to a temporary file in the default temp directory - with tempfile.NamedTemporaryFile(mode='w+b', - prefix='shim_', + with tempfile.NamedTemporaryFile(mode=u'w+b', + prefix=u'shim_', delete=False) as shim_tmp_file: - shim_tmp_file.write(salt.utils.to_bytes(cmd_str)) + shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str)) # Copy shim to target system, under $HOME/. - target_shim_file = '.{0}.{1}'.format(binascii.hexlify(os.urandom(6)), extension) + target_shim_file = u'.{0}.{1}'.format(binascii.hexlify(os.urandom(6)), extension) if self.winrm: target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file) self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True) @@ -1114,19 +1229,19 @@ ARGS = {10}\n'''.format(self.minion_config, pass # Execute shim - if extension == 'ps1': - ret = self.shell.exec_cmd('"powershell {0}"'.format(target_shim_file)) + if extension == u'ps1': + ret = self.shell.exec_cmd(u'"powershell {0}"'.format(target_shim_file)) else: if not self.winrm: - ret = self.shell.exec_cmd('/bin/sh \'$HOME/{0}\''.format(target_shim_file)) + ret = self.shell.exec_cmd(u'/bin/sh \'$HOME/{0}\''.format(target_shim_file)) else: ret = saltwinshell.call_python(self, target_shim_file) # Remove shim from target system if not self.winrm: - self.shell.exec_cmd('rm \'$HOME/{0}\''.format(target_shim_file)) + self.shell.exec_cmd(u'rm \'$HOME/{0}\''.format(target_shim_file)) else: - self.shell.exec_cmd('del {0}'.format(target_shim_file)) + self.shell.exec_cmd(u'del {0}'.format(target_shim_file)) return ret @@ -1142,36 +1257,39 @@ ARGS = {10}\n'''.format(self.minion_config, 6. return command results ''' self.argv = _convert_args(self.argv) - log.debug('Performing shimmed, blocking command as follows:\n{0}'.format(' '.join(self.argv))) + log.debug( + u'Performing shimmed, blocking command as follows:\n%s', + u' '.join(self.argv) + ) cmd_str = self._cmd_str() stdout, stderr, retcode = self.shim_cmd(cmd_str) - log.trace('STDOUT {1}\n{0}'.format(stdout, self.target['host'])) - log.trace('STDERR {1}\n{0}'.format(stderr, self.target['host'])) - log.debug('RETCODE {1}: {0}'.format(retcode, self.target['host'])) + log.trace(u'STDOUT %s\n%s', self.target[u'host'], stdout) + log.trace(u'STDERR %s\n%s', self.target[u'host'], stderr) + log.debug(u'RETCODE %s: %s', self.target[u'host'], retcode) error = self.categorize_shim_errors(stdout, stderr, retcode) if error: - if error == 'Python environment not found on Windows system': + if error == u'Python environment not found on Windows system': saltwinshell.deploy_python(self) stdout, stderr, retcode = self.shim_cmd(cmd_str) while re.search(RSTR_RE, stdout): stdout = re.split(RSTR_RE, stdout, 1)[1].strip() while re.search(RSTR_RE, stderr): stderr = re.split(RSTR_RE, stderr, 1)[1].strip() - elif error == 'Undefined SHIM state': + elif error == u'Undefined SHIM state': self.deploy() stdout, stderr, retcode = self.shim_cmd(cmd_str) if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr): # If RSTR is not seen in both stdout and stderr then there # was a thin deployment problem. - return 'ERROR: Failure deploying thin, undefined state: {0}'.format(stdout), stderr, retcode + return u'ERROR: Failure deploying thin, undefined state: {0}'.format(stdout), stderr, retcode while re.search(RSTR_RE, stdout): stdout = re.split(RSTR_RE, stdout, 1)[1].strip() while re.search(RSTR_RE, stderr): stderr = re.split(RSTR_RE, stderr, 1)[1].strip() else: - return 'ERROR: {0}'.format(error), stderr, retcode + return u'ERROR: {0}'.format(error), stderr, retcode # FIXME: this discards output from ssh_shim if the shim succeeds. It should # always save the shim output regardless of shim success or failure. @@ -1187,35 +1305,35 @@ ARGS = {10}\n'''.format(self.minion_config, else: # RSTR was found in stdout but not stderr - which means there # is a SHIM command for the master. - shim_command = re.split(r'\r?\n', stdout, 1)[0].strip() - log.debug('SHIM retcode({0}) and command: {1}'.format(retcode, shim_command)) - if 'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY: + shim_command = re.split(sdecode(r'\r?\n'), stdout, 1)[0].strip() # future lint: disable=non-unicode-string + log.debug(u'SHIM retcode(%s) and command: %s', retcode, shim_command) + if u'deploy' == shim_command and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY: self.deploy() stdout, stderr, retcode = self.shim_cmd(cmd_str) if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr): if not self.tty: # If RSTR is not seen in both stdout and stderr then there # was a thin deployment problem. - log.error('ERROR: Failure deploying thin, retrying: {0}\n{1}'.format(stdout, stderr)) + log.error(u'ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr) return self.cmd_block() elif not re.search(RSTR_RE, stdout): # If RSTR is not seen in stdout with tty, then there # was a thin deployment problem. - log.error('ERROR: Failure deploying thin, retrying: {0}\n{1}'.format(stdout, stderr)) + log.error(u'ERROR: Failure deploying thin, retrying: %s\n%s', stdout, stderr) while re.search(RSTR_RE, stdout): stdout = re.split(RSTR_RE, stdout, 1)[1].strip() if self.tty: - stderr = '' + stderr = u'' else: while re.search(RSTR_RE, stderr): stderr = re.split(RSTR_RE, stderr, 1)[1].strip() - elif 'ext_mods' == shim_command: + elif u'ext_mods' == shim_command: self.deploy_ext() stdout, stderr, retcode = self.shim_cmd(cmd_str) if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr): # If RSTR is not seen in both stdout and stderr then there # was a thin deployment problem. - return 'ERROR: Failure deploying ext_mods: {0}'.format(stdout), stderr, retcode + return u'ERROR: Failure deploying ext_mods: {0}'.format(stdout), stderr, retcode while re.search(RSTR_RE, stdout): stdout = re.split(RSTR_RE, stdout, 1)[1].strip() while re.search(RSTR_RE, stderr): @@ -1224,7 +1342,7 @@ ARGS = {10}\n'''.format(self.minion_config, return stdout, stderr, retcode def categorize_shim_errors(self, stdout, stderr, retcode): - if re.search(RSTR_RE, stdout) and stdout != RSTR+'\n': + if re.search(RSTR_RE, stdout) and stdout != RSTR+u'\n': # RSTR was found in stdout which means that the shim # functioned without *errors* . . . but there may be shim # commands, unless the only thing we found is RSTR @@ -1232,75 +1350,75 @@ ARGS = {10}\n'''.format(self.minion_config, if re.search(RSTR_RE, stderr): # Undefined state - return 'Undefined SHIM state' + return u'Undefined SHIM state' - if stderr.startswith('Permission denied'): + if stderr.startswith(u'Permission denied'): # SHIM was not even reached return None - perm_error_fmt = 'Permissions problem, target user may need '\ - 'to be root or use sudo:\n {0}' + perm_error_fmt = u'Permissions problem, target user may need '\ + u'to be root or use sudo:\n {0}' errors = [ ( (), - 'sudo: no tty present and no askpass program specified', - 'sudo expected a password, NOPASSWD required' + u'sudo: no tty present and no askpass program specified', + u'sudo expected a password, NOPASSWD required' ), ( (salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,), - 'Python interpreter is too old', - 'salt requires python 2.6 or newer on target hosts, must have same major version as origin host' + u'Python interpreter is too old', + u'salt requires python 2.6 or newer on target hosts, must have same major version as origin host' ), ( (salt.defaults.exitcodes.EX_THIN_CHECKSUM,), - 'checksum mismatched', - 'The salt thin transfer was corrupted' + u'checksum mismatched', + u'The salt thin transfer was corrupted' ), ( (salt.defaults.exitcodes.EX_SCP_NOT_FOUND,), - 'scp not found', - 'No scp binary. openssh-clients package required' + u'scp not found', + u'No scp binary. openssh-clients package required' ), ( (salt.defaults.exitcodes.EX_CANTCREAT,), - 'salt path .* exists but is not a directory', - 'A necessary path for salt thin unexpectedly exists:\n ' + stderr, + u'salt path .* exists but is not a directory', + u'A necessary path for salt thin unexpectedly exists:\n ' + stderr, ), ( (), - 'sudo: sorry, you must have a tty to run sudo', - 'sudo is configured with requiretty' + u'sudo: sorry, you must have a tty to run sudo', + u'sudo is configured with requiretty' ), ( (), - 'Failed to open log file', + u'Failed to open log file', perm_error_fmt.format(stderr) ), ( (), - 'Permission denied:.*/salt', + u'Permission denied:.*/salt', perm_error_fmt.format(stderr) ), ( (), - 'Failed to create directory path.*/salt', + u'Failed to create directory path.*/salt', perm_error_fmt.format(stderr) ), ( (salt.defaults.exitcodes.EX_SOFTWARE,), - 'exists but is not', - 'An internal error occurred with the shim, please investigate:\n ' + stderr, + u'exists but is not', + u'An internal error occurred with the shim, please investigate:\n ' + stderr, ), ( (), - 'The system cannot find the path specified', - 'Python environment not found on Windows system', + u'The system cannot find the path specified', + u'Python environment not found on Windows system', ), ( (), - 'is not recognized', - 'Python environment not found on Windows system', + u'is not recognized', + u'Python environment not found on Windows system', ), ] @@ -1328,14 +1446,14 @@ def lowstate_file_refs(chunks): ''' refs = {} for chunk in chunks: - saltenv = 'base' + saltenv = u'base' crefs = [] for state in chunk: - if state == '__env__': + if state == u'__env__': saltenv = chunk[state] - elif state == 'saltenv': + elif state == u'saltenv': saltenv = chunk[state] - elif state.startswith('__'): + elif state.startswith(u'__'): continue crefs.extend(salt_refs(chunk[state])) if crefs: @@ -1349,14 +1467,14 @@ def salt_refs(data): ''' Pull salt file references out of the states ''' - proto = 'salt://' + proto = u'salt://' ret = [] - if isinstance(data, str): + if isinstance(data, six.string_types): if data.startswith(proto): return [data] if isinstance(data, list): for comp in data: - if isinstance(comp, str): + if isinstance(comp, six.string_types): if comp.startswith(proto): ret.append(comp) return ret @@ -1368,23 +1486,23 @@ def mod_data(fsclient): ''' # TODO, change out for a fileserver backend sync_refs = [ - 'modules', - 'states', - 'grains', - 'renderers', - 'returners', + u'modules', + u'states', + u'grains', + u'renderers', + u'returners', ] ret = {} envs = fsclient.envs() - ver_base = '' + ver_base = u'' for env in envs: files = fsclient.file_list(env) for ref in sync_refs: mods_data = {} - pref = '_{0}'.format(ref) + pref = u'_{0}'.format(ref) for fn_ in sorted(files): if fn_.startswith(pref): - if fn_.endswith(('.py', '.so', '.pyx')): + if fn_.endswith((u'.py', u'.so', u'.pyx')): full = salt.utils.url.create(fn_) mod_path = fsclient.cache_file(full, env) if not os.path.isfile(mod_path): @@ -1401,21 +1519,21 @@ def mod_data(fsclient): return {} if six.PY3: - ver_base = salt.utils.to_bytes(ver_base) + ver_base = salt.utils.stringutils.to_bytes(ver_base) ver = hashlib.sha1(ver_base).hexdigest() ext_tar_path = os.path.join( - fsclient.opts['cachedir'], - 'ext_mods.{0}.tgz'.format(ver)) - mods = {'version': ver, - 'file': ext_tar_path} + fsclient.opts[u'cachedir'], + u'ext_mods.{0}.tgz'.format(ver)) + mods = {u'version': ver, + u'file': ext_tar_path} if os.path.isfile(ext_tar_path): return mods - tfp = tarfile.open(ext_tar_path, 'w:gz') - verfile = os.path.join(fsclient.opts['cachedir'], 'ext_mods.ver') - with salt.utils.files.fopen(verfile, 'w+') as fp_: + tfp = tarfile.open(ext_tar_path, u'w:gz') + verfile = os.path.join(fsclient.opts[u'cachedir'], u'ext_mods.ver') + with salt.utils.files.fopen(verfile, u'w+') as fp_: fp_.write(ver) - tfp.add(verfile, 'ext_version') + tfp.add(verfile, u'ext_version') for ref in ret: for fn_ in ret[ref]: tfp.add(ret[ref][fn_], os.path.join(ref, fn_)) @@ -1430,11 +1548,11 @@ def ssh_version(): # This function needs more granular checks and to be validated against # older versions of ssh ret = subprocess.Popen( - ['ssh', '-V'], + [u'ssh', u'-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() try: - version_parts = ret[1].split(b',')[0].split(b'_')[1] + version_parts = ret[1].split(b',')[0].split(b'_')[1] # future lint: disable=non-unicode-string parts = [] for part in version_parts: try: @@ -1455,9 +1573,9 @@ def _convert_args(args): for arg in args: if isinstance(arg, dict): for key in list(arg.keys()): - if key == '__kwarg__': + if key == u'__kwarg__': continue - converted.append('{0}={1}'.format(key, arg[key])) + converted.append(u'{0}={1}'.format(key, arg[key])) else: converted.append(arg) return converted diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py index 46ad336e44..e41a5245ed 100644 --- a/salt/client/ssh/client.py +++ b/salt/client/ssh/client.py @@ -9,6 +9,7 @@ import random # Import Salt libs import salt.config +import salt.utils.versions import salt.syspaths as syspaths from salt.exceptions import SaltClientError # Temporary @@ -22,7 +23,7 @@ class SSHClient(object): .. versionadded:: 2015.5.0 ''' def __init__(self, - c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), + c_path=os.path.join(syspaths.CONFIG_DIR, u'master'), mopts=None, disable_custom_roster=False): if mopts: @@ -30,15 +31,14 @@ class SSHClient(object): else: if os.path.isdir(c_path): log.warning( - '{0} expects a file path not a directory path({1}) to ' - 'it\'s \'c_path\' keyword argument'.format( - self.__class__.__name__, c_path - ) + u'%s expects a file path not a directory path(%s) to ' + u'its \'c_path\' keyword argument', + self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) # Salt API should never offer a custom roster! - self.opts['__disable_custom_roster'] = disable_custom_roster + self.opts[u'__disable_custom_roster'] = disable_custom_roster def _prep_ssh( self, @@ -46,30 +46,30 @@ class SSHClient(object): fun, arg=(), timeout=None, - tgt_type='glob', + tgt_type=u'glob', kwarg=None, **kwargs): ''' Prepare the arguments ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') opts = copy.deepcopy(self.opts) opts.update(kwargs) if timeout: - opts['timeout'] = timeout + opts[u'timeout'] = timeout arg = salt.utils.args.condition_input(arg, kwarg) - opts['argv'] = [fun] + arg - opts['selected_target_option'] = tgt_type - opts['tgt'] = tgt - opts['arg'] = arg + opts[u'argv'] = [fun] + arg + opts[u'selected_target_option'] = tgt_type + opts[u'tgt'] = tgt + opts[u'arg'] = arg return salt.client.ssh.SSH(opts) def cmd_iter( @@ -78,8 +78,8 @@ class SSHClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, **kwargs): ''' @@ -88,14 +88,14 @@ class SSHClient(object): .. versionadded:: 2015.5.0 ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') ssh = self._prep_ssh( tgt, @@ -105,7 +105,7 @@ class SSHClient(object): tgt_type, kwarg, **kwargs) - for ret in ssh.run_iter(jid=kwargs.get('jid', None)): + for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)): yield ret def cmd(self, @@ -113,7 +113,7 @@ class SSHClient(object): fun, arg=(), timeout=None, - tgt_type='glob', + tgt_type=u'glob', kwarg=None, **kwargs): ''' @@ -122,14 +122,14 @@ class SSHClient(object): .. versionadded:: 2015.5.0 ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') ssh = self._prep_ssh( tgt, @@ -140,7 +140,7 @@ class SSHClient(object): kwarg, **kwargs) final = {} - for ret in ssh.run_iter(jid=kwargs.get('jid', None)): + for ret in ssh.run_iter(jid=kwargs.get(u'jid', None)): final.update(ret) return final @@ -166,16 +166,16 @@ class SSHClient(object): kwargs = copy.deepcopy(low) - for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']: + for ignore in [u'tgt', u'fun', u'arg', u'timeout', u'tgt_type', u'kwarg']: if ignore in kwargs: del kwargs[ignore] - return self.cmd(low['tgt'], - low['fun'], - low.get('arg', []), - low.get('timeout'), - low.get('tgt_type'), - low.get('kwarg'), + return self.cmd(low[u'tgt'], + low[u'fun'], + low.get(u'arg', []), + low.get(u'timeout'), + low.get(u'tgt_type'), + low.get(u'kwarg'), **kwargs) def cmd_async(self, low, timeout=None): @@ -204,8 +204,8 @@ class SSHClient(object): fun, arg=(), timeout=None, - tgt_type='glob', - ret='', + tgt_type=u'glob', + ret=u'', kwarg=None, sub=3, **kwargs): @@ -226,24 +226,24 @@ class SSHClient(object): .. versionadded:: 2017.7.0 ''' - if 'expr_form' in kwargs: - salt.utils.warn_until( - 'Fluorine', - 'The target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + if u'expr_form' in kwargs: + salt.utils.versions.warn_until( + u'Fluorine', + u'The target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) - tgt_type = kwargs.pop('expr_form') + tgt_type = kwargs.pop(u'expr_form') minion_ret = self.cmd(tgt, - 'sys.list_functions', + u'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: - if fun in minion_ret[minion]['return']: + if fun in minion_ret[minion][u'return']: f_tgt.append(minion) if len(f_tgt) >= sub: break - return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type='list', ret=ret, kwarg=kwarg, **kwargs) + return self.cmd_iter(f_tgt, fun, arg, timeout, tgt_type=u'list', ret=ret, kwarg=kwarg, **kwargs) diff --git a/salt/client/ssh/shell.py b/salt/client/ssh/shell.py index 73c0b4aead..a3b59abe95 100644 --- a/salt/client/ssh/shell.py +++ b/salt/client/ssh/shell.py @@ -21,12 +21,12 @@ import salt.utils.vt log = logging.getLogger(__name__) -SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M) -KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*') +SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M) # future lint: disable=non-unicode-string +KEY_VALID_RE = re.compile(r'.*\(yes\/no\).*') # future lint: disable=non-unicode-string # Keep these in sync with ./__init__.py -RSTR = '_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878' -RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') +RSTR = u'_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878' +RSTR_RE = re.compile(r'(?:^|\r?\n)' + RSTR + r'(?:\r?\n|$)') # future lint: disable=non-unicode-string class NoPasswdError(Exception): @@ -41,7 +41,7 @@ def gen_key(path): ''' Generate a key for use with salt-ssh ''' - cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path) + cmd = u'ssh-keygen -P "" -f {0} -t rsa -q'.format(path) if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) subprocess.call(cmd, shell=True) @@ -51,12 +51,12 @@ def gen_shell(opts, **kwargs): ''' Return the correct shell interface for the target system ''' - if kwargs['winrm']: + if kwargs[u'winrm']: try: import saltwinshell shell = saltwinshell.Shell(opts, **kwargs) except ImportError: - log.error('The saltwinshell library is not available') + log.error(u'The saltwinshell library is not available') sys.exit(salt.defaults.exitcodes.EX_GENERIC) else: shell = Shell(opts, **kwargs) @@ -86,7 +86,7 @@ class Shell(object): ssh_options=None): self.opts = opts # ssh , but scp [ (4, 9): - options.append('GSSAPIAuthentication=no') - options.append('ConnectTimeout={0}'.format(self.timeout)) - if self.opts.get('ignore_host_keys'): - options.append('StrictHostKeyChecking=no') - if self.opts.get('no_host_keys'): - options.extend(['StrictHostKeyChecking=no', - 'UserKnownHostsFile=/dev/null']) - known_hosts = self.opts.get('known_hosts_file') + options.append(u'PasswordAuthentication=no') + if self.opts.get(u'_ssh_version', (0,)) > (4, 9): + options.append(u'GSSAPIAuthentication=no') + options.append(u'ConnectTimeout={0}'.format(self.timeout)) + if self.opts.get(u'ignore_host_keys'): + options.append(u'StrictHostKeyChecking=no') + if self.opts.get(u'no_host_keys'): + options.extend([u'StrictHostKeyChecking=no', + u'UserKnownHostsFile=/dev/null']) + known_hosts = self.opts.get(u'known_hosts_file') if known_hosts and os.path.isfile(known_hosts): - options.append('UserKnownHostsFile={0}'.format(known_hosts)) + options.append(u'UserKnownHostsFile={0}'.format(known_hosts)) if self.port: - options.append('Port={0}'.format(self.port)) + options.append(u'Port={0}'.format(self.port)) if self.priv: - options.append('IdentityFile={0}'.format(self.priv)) + options.append(u'IdentityFile={0}'.format(self.priv)) if self.user: - options.append('User={0}'.format(self.user)) + options.append(u'User={0}'.format(self.user)) if self.identities_only: - options.append('IdentitiesOnly=yes') + options.append(u'IdentitiesOnly=yes') ret = [] for option in options: - ret.append('-o {0} '.format(option)) - return ''.join(ret) + ret.append(u'-o {0} '.format(option)) + return u''.join(ret) def _passwd_opts(self): ''' @@ -156,42 +156,42 @@ class Shell(object): # TODO ControlMaster does not work without ControlPath # user could take advantage of it if they set ControlPath in their # ssh config. Also, ControlPersist not widely available. - options = ['ControlMaster=auto', - 'StrictHostKeyChecking=no', + options = [u'ControlMaster=auto', + u'StrictHostKeyChecking=no', ] - if self.opts['_ssh_version'] > (4, 9): - options.append('GSSAPIAuthentication=no') - options.append('ConnectTimeout={0}'.format(self.timeout)) - if self.opts.get('ignore_host_keys'): - options.append('StrictHostKeyChecking=no') - if self.opts.get('no_host_keys'): - options.extend(['StrictHostKeyChecking=no', - 'UserKnownHostsFile=/dev/null']) + if self.opts[u'_ssh_version'] > (4, 9): + options.append(u'GSSAPIAuthentication=no') + options.append(u'ConnectTimeout={0}'.format(self.timeout)) + if self.opts.get(u'ignore_host_keys'): + options.append(u'StrictHostKeyChecking=no') + if self.opts.get(u'no_host_keys'): + options.extend([u'StrictHostKeyChecking=no', + u'UserKnownHostsFile=/dev/null']) if self.passwd: - options.extend(['PasswordAuthentication=yes', - 'PubkeyAuthentication=yes']) + options.extend([u'PasswordAuthentication=yes', + u'PubkeyAuthentication=yes']) else: - options.extend(['PasswordAuthentication=no', - 'PubkeyAuthentication=yes', - 'KbdInteractiveAuthentication=no', - 'ChallengeResponseAuthentication=no', - 'BatchMode=yes']) + options.extend([u'PasswordAuthentication=no', + u'PubkeyAuthentication=yes', + u'KbdInteractiveAuthentication=no', + u'ChallengeResponseAuthentication=no', + u'BatchMode=yes']) if self.port: - options.append('Port={0}'.format(self.port)) + options.append(u'Port={0}'.format(self.port)) if self.user: - options.append('User={0}'.format(self.user)) + options.append(u'User={0}'.format(self.user)) if self.identities_only: - options.append('IdentitiesOnly=yes') + options.append(u'IdentitiesOnly=yes') ret = [] for option in options: - ret.append('-o {0} '.format(option)) - return ''.join(ret) + ret.append(u'-o {0} '.format(option)) + return u''.join(ret) def _ssh_opts(self): - return ' '.join(['-o {0}'.format(opt) - for opt in self.ssh_options]) + return u' '.join([u'-o {0}'.format(opt) + for opt in self.ssh_options]) def _copy_id_str_old(self): ''' @@ -200,9 +200,9 @@ class Shell(object): if self.passwd: # Using single quotes prevents shell expansion and # passwords containing '$' - return "{0} {1} '{2} -p {3} {4} {5}@{6}'".format( - 'ssh-copy-id', - '-i {0}.pub'.format(self.priv), + return u"{0} {1} '{2} -p {3} {4} {5}@{6}'".format( + u'ssh-copy-id', + u'-i {0}.pub'.format(self.priv), self._passwd_opts(), self.port, self._ssh_opts(), @@ -218,9 +218,9 @@ class Shell(object): if self.passwd: # Using single quotes prevents shell expansion and # passwords containing '$' - return "{0} {1} {2} -p {3} {4} {5}@{6}".format( - 'ssh-copy-id', - '-i {0}.pub'.format(self.priv), + return u"{0} {1} {2} -p {3} {4} {5}@{6}".format( + u'ssh-copy-id', + u'-i {0}.pub'.format(self.priv), self._passwd_opts(), self.port, self._ssh_opts(), @@ -233,11 +233,11 @@ class Shell(object): Execute ssh-copy-id to plant the id file on the target ''' stdout, stderr, retcode = self._run_cmd(self._copy_id_str_old()) - if salt.defaults.exitcodes.EX_OK != retcode and 'Usage' in stderr: + if salt.defaults.exitcodes.EX_OK != retcode and u'Usage' in stderr: stdout, stderr, retcode = self._run_cmd(self._copy_id_str_new()) return stdout, stderr, retcode - def _cmd_str(self, cmd, ssh='ssh'): + def _cmd_str(self, cmd, ssh=u'ssh'): ''' Return the cmd string to execute ''' @@ -246,21 +246,21 @@ class Shell(object): # need to deliver the SHIM to the remote host and execute it there command = [ssh] - if ssh != 'scp': + if ssh != u'scp': command.append(self.host) - if self.tty and ssh == 'ssh': - command.append('-t -t') + if self.tty and ssh == u'ssh': + command.append(u'-t -t') if self.passwd or self.priv: command.append(self.priv and self._key_opts() or self._passwd_opts()) - if ssh != 'scp' and self.remote_port_forwards: - command.append(' '.join(['-R {0}'.format(item) - for item in self.remote_port_forwards.split(',')])) + if ssh != u'scp' and self.remote_port_forwards: + command.append(u' '.join([u'-R {0}'.format(item) + for item in self.remote_port_forwards.split(u',')])) if self.ssh_options: command.append(self._ssh_opts()) command.append(cmd) - return ' '.join(command) + return u' '.join(command) def _old_run_cmd(self, cmd): ''' @@ -277,7 +277,7 @@ class Shell(object): data = proc.communicate() return data[0], data[1], proc.returncode except Exception: - return ('local', 'Unknown Error', None) + return (u'local', u'Unknown Error', None) def _run_nb_cmd(self, cmd): ''' @@ -301,7 +301,7 @@ class Shell(object): err = self.get_error(err) yield out, err, rcode except Exception: - yield ('', 'Unknown Error', None) + yield (u'', u'Unknown Error', None) def exec_nb_cmd(self, cmd): ''' @@ -312,9 +312,9 @@ class Shell(object): rcode = None cmd = self._cmd_str(cmd) - logmsg = 'Executing non-blocking command: {0}'.format(cmd) + logmsg = u'Executing non-blocking command: {0}'.format(cmd) if self.passwd: - logmsg = logmsg.replace(self.passwd, ('*' * 6)) + logmsg = logmsg.replace(self.passwd, (u'*' * 6)) log.debug(logmsg) for out, err, rcode in self._run_nb_cmd(cmd): @@ -323,7 +323,7 @@ class Shell(object): if err is not None: r_err.append(err) yield None, None, None - yield ''.join(r_out), ''.join(r_err), rcode + yield u''.join(r_out), u''.join(r_err), rcode def exec_cmd(self, cmd): ''' @@ -331,11 +331,11 @@ class Shell(object): ''' cmd = self._cmd_str(cmd) - logmsg = 'Executing command: {0}'.format(cmd) + logmsg = u'Executing command: {0}'.format(cmd) if self.passwd: - logmsg = logmsg.replace(self.passwd, ('*' * 6)) - if 'decode("base64")' in logmsg or 'base64.b64decode(' in logmsg: - log.debug('Executed SHIM command. Command logged to TRACE') + logmsg = logmsg.replace(self.passwd, (u'*' * 6)) + if u'decode("base64")' in logmsg or u'base64.b64decode(' in logmsg: + log.debug(u'Executed SHIM command. Command logged to TRACE') log.trace(logmsg) else: log.debug(logmsg) @@ -348,19 +348,19 @@ class Shell(object): scp a file or files to a remote system ''' if makedirs: - self.exec_cmd('mkdir -p {0}'.format(os.path.dirname(remote))) + self.exec_cmd(u'mkdir -p {0}'.format(os.path.dirname(remote))) # scp needs [ function mappings for sanitizing grain output. This # is used when the 'sanitize' flag is given. _SANITIZERS = { - 'serialnumber': _serial_sanitizer, - 'domain': _DOMAINNAME_SANITIZER, - 'fqdn': _FQDN_SANITIZER, - 'id': _FQDN_SANITIZER, - 'host': _HOSTNAME_SANITIZER, - 'localhost': _HOSTNAME_SANITIZER, - 'nodename': _HOSTNAME_SANITIZER, + u'serialnumber': _serial_sanitizer, + u'domain': _DOMAINNAME_SANITIZER, + u'fqdn': _FQDN_SANITIZER, + u'id': _FQDN_SANITIZER, + u'host': _HOSTNAME_SANITIZER, + u'localhost': _HOSTNAME_SANITIZER, + u'nodename': _HOSTNAME_SANITIZER, } -def get(key, default='', delimiter=DEFAULT_TARGET_DELIM, ordered=True): +def get(key, default=u'', delimiter=DEFAULT_TARGET_DELIM, ordered=True): ''' Attempt to retrieve the named value from grains, if the named value is not available return the passed default. The default return is an empty string. @@ -149,7 +151,7 @@ def item(*args, **kwargs): ret[arg] = __grains__[arg] except KeyError: pass - if salt.utils.is_true(kwargs.get('sanitize')): + if salt.utils.is_true(kwargs.get(u'sanitize')): for arg, func in six.iteritems(_SANITIZERS): if arg in ret: ret[arg] = func(ret[arg]) @@ -170,9 +172,9 @@ def ls(): # pylint: disable=C0103 def filter_by(lookup_dict, - grain='os_family', + grain=u'os_family', merge=None, - default='default', + default=u'default', base=None): ''' .. versionadded:: 0.17.0 @@ -266,12 +268,12 @@ def filter_by(lookup_dict, elif isinstance(base_values, collections.Mapping): if not isinstance(ret, collections.Mapping): - raise SaltException('filter_by default and look-up values must both be dictionaries.') + raise SaltException(u'filter_by default and look-up values must both be dictionaries.') ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret) if merge: if not isinstance(merge, collections.Mapping): - raise SaltException('filter_by merge argument must be a dictionary.') + raise SaltException(u'filter_by merge argument must be a dictionary.') else: if ret is None: ret = merge diff --git a/salt/client/ssh/wrapper/mine.py b/salt/client/ssh/wrapper/mine.py index 32d7a2e3fd..71f6f05a3c 100644 --- a/salt/client/ssh/wrapper/mine.py +++ b/salt/client/ssh/wrapper/mine.py @@ -14,7 +14,7 @@ import copy import salt.client.ssh -def get(tgt, fun, tgt_type='glob', roster='flat'): +def get(tgt, fun, tgt_type=u'glob', roster=u'flat'): ''' Get data from the mine based on the target, function and tgt_type @@ -36,15 +36,15 @@ def get(tgt, fun, tgt_type='glob', roster='flat'): salt-ssh '*' mine.get '192.168.5.0' network.ipaddrs roster=scan ''' # Set up opts for the SSH object - opts = copy.deepcopy(__context__['master_opts']) + opts = copy.deepcopy(__context__[u'master_opts']) minopts = copy.deepcopy(__opts__) opts.update(minopts) if roster: - opts['roster'] = roster - opts['argv'] = [fun] - opts['selected_target_option'] = tgt_type - opts['tgt'] = tgt - opts['arg'] = [] + opts[u'roster'] = roster + opts[u'argv'] = [fun] + opts[u'selected_target_option'] = tgt_type + opts[u'tgt'] = tgt + opts[u'arg'] = [] # Create the SSH object to handle the actual call ssh = salt.client.ssh.SSH(opts) @@ -56,8 +56,8 @@ def get(tgt, fun, tgt_type='glob', roster='flat'): cret = {} for host in rets: - if 'return' in rets[host]: - cret[host] = rets[host]['return'] + if u'return' in rets[host]: + cret[host] = rets[host][u'return'] else: cret[host] = rets[host] return cret diff --git a/salt/client/ssh/wrapper/pillar.py b/salt/client/ssh/wrapper/pillar.py index 68c671aec8..8b71558e4a 100644 --- a/salt/client/ssh/wrapper/pillar.py +++ b/salt/client/ssh/wrapper/pillar.py @@ -13,7 +13,7 @@ import salt.utils from salt.defaults import DEFAULT_TARGET_DELIM -def get(key, default='', merge=False, delimiter=DEFAULT_TARGET_DELIM): +def get(key, default=u'', merge=False, delimiter=DEFAULT_TARGET_DELIM): ''' .. versionadded:: 0.14 @@ -130,10 +130,10 @@ def keys(key, delimiter=DEFAULT_TARGET_DELIM): __pillar__, key, KeyError, delimiter) if ret is KeyError: - raise KeyError("Pillar key not found: {0}".format(key)) + raise KeyError(u"Pillar key not found: {0}".format(key)) if not isinstance(ret, dict): - raise ValueError("Pillar value in key {0} is not a dict".format(key)) + raise ValueError(u"Pillar value in key {0} is not a dict".format(key)) return ret.keys() diff --git a/salt/client/ssh/wrapper/publish.py b/salt/client/ssh/wrapper/publish.py index 62a803e9ea..478275256b 100644 --- a/salt/client/ssh/wrapper/publish.py +++ b/salt/client/ssh/wrapper/publish.py @@ -17,6 +17,8 @@ import logging # Import salt libs import salt.client.ssh import salt.runner +import salt.utils.args +import salt.utils.versions log = logging.getLogger(__name__) @@ -24,10 +26,10 @@ log = logging.getLogger(__name__) def _publish(tgt, fun, arg=None, - tgt_type='glob', - returner='', + tgt_type=u'glob', + returner=u'', timeout=None, - form='clean', + form=u'clean', roster=None): ''' Publish a command "from the minion out to other minions". In reality, the @@ -53,13 +55,13 @@ def _publish(tgt, salt-ssh system.example.com publish.publish '*' cmd.run 'ls -la /tmp' ''' - if fun.startswith('publish.'): - log.info('Cannot publish publish calls. Returning {}') + if fun.startswith(u'publish.'): + log.info(u'Cannot publish publish calls. Returning {}') return {} # TODO: implement returners? Do they make sense for salt-ssh calls? if returner: - log.warning('Returners currently not supported in salt-ssh publish') + log.warning(u'Returners currently not supported in salt-ssh publish') # Make sure args have been processed if arg is None: @@ -72,17 +74,17 @@ def _publish(tgt, arg = [] # Set up opts for the SSH object - opts = copy.deepcopy(__context__['master_opts']) + opts = copy.deepcopy(__context__[u'master_opts']) minopts = copy.deepcopy(__opts__) opts.update(minopts) if roster: - opts['roster'] = roster + opts[u'roster'] = roster if timeout: - opts['timeout'] = timeout - opts['argv'] = [fun] + arg - opts['selected_target_option'] = tgt_type - opts['tgt'] = tgt - opts['arg'] = arg + opts[u'timeout'] = timeout + opts[u'argv'] = [fun] + arg + opts[u'selected_target_option'] = tgt_type + opts[u'tgt'] = tgt + opts[u'arg'] = arg # Create the SSH object to handle the actual call ssh = salt.client.ssh.SSH(opts) @@ -92,11 +94,11 @@ def _publish(tgt, for ret in ssh.run_iter(): rets.update(ret) - if form == 'clean': + if form == u'clean': cret = {} for host in rets: - if 'return' in rets[host]: - cret[host] = rets[host]['return'] + if u'return' in rets[host]: + cret[host] = rets[host][u'return'] else: cret[host] = rets[host] return cret @@ -107,8 +109,8 @@ def _publish(tgt, def publish(tgt, fun, arg=None, - tgt_type='glob', - returner='', + tgt_type=u'glob', + returner=u'', timeout=5, roster=None, expr_form=None): @@ -173,11 +175,11 @@ def publish(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( - 'Fluorine', - 'the target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + salt.utils.versions.warn_until( + u'Fluorine', + u'the target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) tgt_type = expr_form @@ -187,15 +189,15 @@ def publish(tgt, tgt_type=tgt_type, returner=returner, timeout=timeout, - form='clean', + form=u'clean', roster=roster) def full_data(tgt, fun, arg=None, - tgt_type='glob', - returner='', + tgt_type=u'glob', + returner=u'', timeout=5, roster=None, expr_form=None): @@ -223,11 +225,11 @@ def full_data(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( - 'Fluorine', - 'the target type should be passed using the \'tgt_type\' ' - 'argument instead of \'expr_form\'. Support for using ' - '\'expr_form\' will be removed in Salt Fluorine.' + salt.utils.versions.warn_until( + u'Fluorine', + u'the target type should be passed using the \'tgt_type\' ' + u'argument instead of \'expr_form\'. Support for using ' + u'\'expr_form\' will be removed in Salt Fluorine.' ) tgt_type = expr_form @@ -237,7 +239,7 @@ def full_data(tgt, tgt_type=tgt_type, returner=returner, timeout=timeout, - form='full', + form=u'full', roster=roster) @@ -260,5 +262,5 @@ def runner(fun, arg=None, timeout=5): arg = [] # Create and run the runner - runner = salt.runner.RunnerClient(__opts__['__master_opts__']) + runner = salt.runner.RunnerClient(__opts__[u'__master_opts__']) return runner.cmd(fun, arg) diff --git a/salt/client/ssh/wrapper/state.py b/salt/client/ssh/wrapper/state.py index 2710d12ff8..d927ce2e55 100644 --- a/salt/client/ssh/wrapper/state.py +++ b/salt/client/ssh/wrapper/state.py @@ -20,10 +20,12 @@ import salt.state import salt.loader import salt.minion import salt.log -from salt.ext.six import string_types + +# Import 3rd-party libs +from salt.ext import six __func_alias__ = { - 'apply_': 'apply' + u'apply_': u'apply' } log = logging.getLogger(__name__) @@ -34,37 +36,37 @@ def _merge_extra_filerefs(*args): ''' ret = [] for arg in args: - if isinstance(arg, string_types): + if isinstance(arg, six.string_types): if arg: - ret.extend(arg.split(',')) + ret.extend(arg.split(u',')) elif isinstance(arg, list): if arg: ret.extend(arg) - return ','.join(ret) + return u','.join(ret) -def sls(mods, saltenv='base', test=None, exclude=None, **kwargs): +def sls(mods, saltenv=u'base', test=None, exclude=None, **kwargs): ''' Create the seed file for a state.sls run ''' st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ - __pillar__.update(kwargs.get('pillar', {})) + __opts__[u'grains'] = __grains__ + __pillar__.update(kwargs.get(u'pillar', {})) st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) - if isinstance(mods, str): - mods = mods.split(',') + __context__[u'fileclient']) + if isinstance(mods, six.string_types): + mods = mods.split(u',') high_data, errors = st_.render_highstate({saltenv: mods}) if exclude: - if isinstance(exclude, str): - exclude = exclude.split(',') - if '__exclude__' in high_data: - high_data['__exclude__'].extend(exclude) + if isinstance(exclude, six.string_types): + exclude = exclude.split(u',') + if u'__exclude__' in high_data: + high_data[u'__exclude__'].extend(exclude) else: - high_data['__exclude__'] = exclude + high_data[u'__exclude__'] = exclude high_data, ext_errors = st_.state.reconcile_extend(high_data) errors += ext_errors errors += st_.state.verify_high(high_data) @@ -81,38 +83,38 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs): file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) - cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( - __opts__['thin_dir'], + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) + cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( + __opts__[u'thin_dir'], test, trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) stdout, stderr, _ = single.cmd_block() # Clean up our tar @@ -125,7 +127,7 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout @@ -144,51 +146,51 @@ def low(data, **kwargs): salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}' ''' st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ chunks = [data] st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) for chunk in chunks: - chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__'] + chunk[u'__id__'] = chunk[u'name'] if not chunk.get(u'__id__') else chunk[u'__id__'] err = st_.state.verify_data(data) if err: return err file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) - cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format( - __opts__['thin_dir'], + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) + cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format( + __opts__[u'thin_dir'], trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) stdout, stderr, _ = single.cmd_block() # Clean up our tar @@ -201,7 +203,7 @@ def low(data, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout @@ -219,49 +221,49 @@ def high(data, **kwargs): salt '*' state.high '{"vim": {"pkg": ["installed"]}}' ''' - __pillar__.update(kwargs.get('pillar', {})) + __pillar__.update(kwargs.get(u'pillar', {})) st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) chunks = st_.state.compile_high_data(data) file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) - cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format( - __opts__['thin_dir'], + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) + cmd = u'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format( + __opts__[u'thin_dir'], trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) stdout, stderr, _ = single.cmd_block() # Clean up our tar @@ -274,7 +276,7 @@ def high(data, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout @@ -316,56 +318,56 @@ def highstate(test=None, **kwargs): salt '*' state.highstate exclude=sls_to_exclude salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" ''' - __pillar__.update(kwargs.get('pillar', {})) + __pillar__.update(kwargs.get(u'pillar', {})) st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) chunks = st_.compile_low_chunks() file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) # Check for errors for chunk in chunks: if not isinstance(chunk, dict): - __context__['retcode'] = 1 + __context__[u'retcode'] = 1 return chunks - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) - cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( - __opts__['thin_dir'], + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) + cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( + __opts__[u'thin_dir'], test, trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) stdout, stderr, _ = single.cmd_block() # Clean up our tar @@ -378,7 +380,7 @@ def highstate(test=None, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout @@ -397,55 +399,55 @@ def top(topfn, test=None, **kwargs): salt '*' state.top reverse_top.sls exclude=sls_to_exclude salt '*' state.top reverse_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]" ''' - __pillar__.update(kwargs.get('pillar', {})) + __pillar__.update(kwargs.get(u'pillar', {})) st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ if salt.utils.test_mode(test=test, **kwargs): - __opts__['test'] = True + __opts__[u'test'] = True else: - __opts__['test'] = __opts__.get('test', None) + __opts__[u'test'] = __opts__.get(u'test', None) st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) - st_.opts['state_top'] = os.path.join('salt://', topfn) + __context__[u'fileclient']) + st_.opts[u'state_top'] = os.path.join(u'salt://', topfn) chunks = st_.compile_low_chunks() file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) - cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( - __opts__['thin_dir'], + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) + cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( + __opts__[u'thin_dir'], test, trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) stdout, stderr, _ = single.cmd_block() # Clean up our tar @@ -458,7 +460,7 @@ def top(topfn, test=None, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout @@ -475,12 +477,12 @@ def show_highstate(): salt '*' state.show_highstate ''' - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) return st_.compile_highstate() @@ -494,16 +496,16 @@ def show_lowstate(): salt '*' state.show_lowstate ''' - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) return st_.compile_low_chunks() -def show_sls(mods, saltenv='base', test=None, **kwargs): +def show_sls(mods, saltenv=u'base', test=None, **kwargs): ''' Display the state data from a specific sls or list of sls files on the master @@ -514,20 +516,20 @@ def show_sls(mods, saltenv='base', test=None, **kwargs): salt '*' state.show_sls core,edit.vim dev ''' - __pillar__.update(kwargs.get('pillar', {})) - __opts__['grains'] = __grains__ + __pillar__.update(kwargs.get(u'pillar', {})) + __opts__[u'grains'] = __grains__ opts = copy.copy(__opts__) if salt.utils.test_mode(test=test, **kwargs): - opts['test'] = True + opts[u'test'] = True else: - opts['test'] = __opts__.get('test', None) + opts[u'test'] = __opts__.get(u'test', None) st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) - if isinstance(mods, string_types): - mods = mods.split(',') + __context__[u'fileclient']) + if isinstance(mods, six.string_types): + mods = mods.split(u',') high_data, errors = st_.render_highstate({saltenv: mods}) high_data, ext_errors = st_.state.reconcile_extend(high_data) errors += ext_errors @@ -543,7 +545,7 @@ def show_sls(mods, saltenv='base', test=None, **kwargs): return high_data -def show_low_sls(mods, saltenv='base', test=None, **kwargs): +def show_low_sls(mods, saltenv=u'base', test=None, **kwargs): ''' Display the low state data from a specific sls or list of sls files on the master. @@ -556,21 +558,21 @@ def show_low_sls(mods, saltenv='base', test=None, **kwargs): salt '*' state.show_sls core,edit.vim dev ''' - __pillar__.update(kwargs.get('pillar', {})) - __opts__['grains'] = __grains__ + __pillar__.update(kwargs.get(u'pillar', {})) + __opts__[u'grains'] = __grains__ opts = copy.copy(__opts__) if salt.utils.test_mode(test=test, **kwargs): - opts['test'] = True + opts[u'test'] = True else: - opts['test'] = __opts__.get('test', None) + opts[u'test'] = __opts__.get(u'test', None) st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) - if isinstance(mods, string_types): - mods = mods.split(',') + __context__[u'fileclient']) + if isinstance(mods, six.string_types): + mods = mods.split(u',') high_data, errors = st_.render_highstate({saltenv: mods}) high_data, ext_errors = st_.state.reconcile_extend(high_data) errors += ext_errors @@ -597,12 +599,12 @@ def show_top(): salt '*' state.show_top ''' - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ st_ = salt.client.ssh.state.SSHHighState( __opts__, __pillar__, __salt__, - __context__['fileclient']) + __context__[u'fileclient']) top_data = st_.get_top() errors = [] errors += st_.verify_tops(top_data) @@ -632,30 +634,30 @@ def single(fun, name, test=None, **kwargs): ''' st_kwargs = __salt__.kwargs - __opts__['grains'] = __grains__ + __opts__[u'grains'] = __grains__ # state.fun -> [state, fun] - comps = fun.split('.') + comps = fun.split(u'.') if len(comps) < 2: - __context__['retcode'] = 1 - return 'Invalid function passed' + __context__[u'retcode'] = 1 + return u'Invalid function passed' # Create the low chunk, using kwargs as a base - kwargs.update({'state': comps[0], - 'fun': comps[1], - '__id__': name, - 'name': name}) + kwargs.update({u'state': comps[0], + u'fun': comps[1], + u'__id__': name, + u'name': name}) opts = copy.deepcopy(__opts__) # Set test mode if salt.utils.test_mode(test=test, **kwargs): - opts['test'] = True + opts[u'test'] = True else: - opts['test'] = __opts__.get('test', None) + opts[u'test'] = __opts__.get(u'test', None) # Get the override pillar data - __pillar__.update(kwargs.get('pillar', {})) + __pillar__.update(kwargs.get(u'pillar', {})) # Create the State environment st_ = salt.client.ssh.state.SSHState(__opts__, __pillar__) @@ -663,7 +665,7 @@ def single(fun, name, test=None, **kwargs): # Verify the low chunk err = st_.verify_data(kwargs) if err: - __context__['retcode'] = 1 + __context__[u'retcode'] = 1 return err # Must be a list of low-chunks @@ -674,46 +676,46 @@ def single(fun, name, test=None, **kwargs): file_refs = salt.client.ssh.state.lowstate_file_refs( chunks, _merge_extra_filerefs( - kwargs.get('extra_filerefs', ''), - __opts__.get('extra_filerefs', '') + kwargs.get(u'extra_filerefs', u''), + __opts__.get(u'extra_filerefs', u'') ) ) - roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat')) - roster_grains = roster.opts['grains'] + roster = salt.roster.Roster(__opts__, __opts__.get(u'roster', u'flat')) + roster_grains = roster.opts[u'grains'] # Create the tar containing the state pkg and relevant files. trans_tar = salt.client.ssh.state.prep_trans_tar( __opts__, - __context__['fileclient'], + __context__[u'fileclient'], chunks, file_refs, __pillar__, - st_kwargs['id_'], + st_kwargs[u'id_'], roster_grains) # Create a hash so we can verify the tar on the target system - trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__['hash_type']) + trans_tar_sum = salt.utils.get_hash(trans_tar, __opts__[u'hash_type']) # We use state.pkg to execute the "state package" - cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( - __opts__['thin_dir'], + cmd = u'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format( + __opts__[u'thin_dir'], test, trans_tar_sum, - __opts__['hash_type']) + __opts__[u'hash_type']) # Create a salt-ssh Single object to actually do the ssh work single = salt.client.ssh.Single( __opts__, cmd, - fsclient=__context__['fileclient'], + fsclient=__context__[u'fileclient'], minion_opts=__salt__.minion_opts, **st_kwargs) # Copy the tar down single.shell.send( trans_tar, - '{0}/salt_state.tgz'.format(__opts__['thin_dir'])) + u'{0}/salt_state.tgz'.format(__opts__[u'thin_dir'])) # Run the state.pkg command on the target stdout, stderr, _ = single.cmd_block() @@ -728,7 +730,7 @@ def single(fun, name, test=None, **kwargs): try: return json.loads(stdout, object_hook=salt.utils.decode_dict) except Exception as e: - log.error("JSON Render failed for: {0}\n{1}".format(stdout, stderr)) + log.error(u"JSON Render failed for: %s\n%s", stdout, stderr) log.error(str(e)) # If for some reason the json load fails, return the stdout diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index ab30422ac8..5eaf1a4f7a 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -30,13 +30,12 @@ import salt.config import salt.client import salt.loader import salt.utils +import salt.utils.args import salt.utils.cloud +import salt.utils.context import salt.utils.dictupdate import salt.utils.files import salt.syspaths -from salt.utils import reinit_crypto -from salt.utils import context -from salt.ext.six import string_types from salt.template import compile_template # Import third party libs @@ -48,7 +47,7 @@ except ImportError: except ImportError: pass # pycrypto < 2.1 import yaml -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin # Get logging started @@ -342,7 +341,7 @@ class CloudClient(object): vm_overrides = {} kwargs['profile'] = profile mapper = salt.cloud.Map(self._opts_defaults(**kwargs)) - if isinstance(names, str): + if isinstance(names, six.string_types): names = names.split(',') return salt.utils.simple_types_filter( mapper.run_profile(profile, names, vm_overrides=vm_overrides) @@ -367,7 +366,7 @@ class CloudClient(object): Destroy the named VMs ''' mapper = salt.cloud.Map(self._opts_defaults(destroy=True)) - if isinstance(names, str): + if isinstance(names, six.string_types): names = names.split(',') return salt.utils.simple_types_filter( mapper.destroy(names) @@ -391,7 +390,7 @@ class CloudClient(object): provider += ':{0}'.format(next(six.iterkeys(providers[provider]))) else: return False - if isinstance(names, str): + if isinstance(names, six.string_types): names = names.split(',') ret = {} for name in names: @@ -433,7 +432,7 @@ class CloudClient(object): provider += ':{0}'.format(next(six.iterkeys(providers[provider]))) else: return False - if isinstance(names, str): + if isinstance(names, six.string_types): names = names.split(',') ret = {} @@ -625,7 +624,7 @@ class Cloud(object): pmap[alias] = {} try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -708,7 +707,7 @@ class Cloud(object): def get_running_by_names(self, names, query='list_nodes', cached=False, profile=None): - if isinstance(names, string_types): + if isinstance(names, six.string_types): names = [names] matches = {} @@ -730,18 +729,9 @@ class Cloud(object): continue for vm_name, details in six.iteritems(vms): - # If VM was created with use_fqdn with either of the softlayer drivers, - # we need to strip the VM name and only search for the short hostname. - if driver == 'softlayer' or driver == 'softlayer_hw': - ret = [] - for name in names: - name = name.split('.')[0] - ret.append(name) - if vm_name not in ret: - continue # XXX: The logic below can be removed once the aws driver # is removed - elif vm_name not in names: + if vm_name not in names: continue elif driver == 'ec2' and 'aws' in handled_drivers and \ @@ -827,7 +817,7 @@ class Cloud(object): try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -870,7 +860,7 @@ class Cloud(object): data[alias] = {} try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -913,7 +903,7 @@ class Cloud(object): data[alias] = {} try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1035,7 +1025,7 @@ class Cloud(object): log.info('Destroying in non-parallel mode.') for alias, driver, name in vms_to_destroy: fun = '{0}.destroy'.format(driver) - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1280,7 +1270,7 @@ class Cloud(object): try: alias, driver = vm_['provider'].split(':') func = '{0}.create'.format(driver) - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1366,7 +1356,7 @@ class Cloud(object): return try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=extra_['provider'] ): @@ -1514,7 +1504,7 @@ class Cloud(object): invalid_functions[fun].append(vm_name) continue - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1526,7 +1516,7 @@ class Cloud(object): # Clean kwargs of "__pub_*" data before running the cloud action call. # Prevents calling positional "kwarg" arg before "call" when no kwarg # argument is present in the cloud driver function's arg spec. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: ret[alias][driver][vm_name] = self.clouds[fun]( @@ -1598,7 +1588,7 @@ class Cloud(object): ) ) - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1646,7 +1636,7 @@ class Cloud(object): self.opts['providers'].pop(alias) continue - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( self.clouds[fun], __active_provider_name__=':'.join([alias, driver]) ): @@ -1815,7 +1805,7 @@ class Map(Cloud): if isinstance(mapped, (list, tuple)): entries = {} for mapping in mapped: - if isinstance(mapping, string_types): + if isinstance(mapping, six.string_types): # Foo: # - bar1 # - bar2 @@ -1855,7 +1845,7 @@ class Map(Cloud): map_[profile] = entries continue - if isinstance(mapped, string_types): + if isinstance(mapped, six.string_types): # If it's a single string entry, let's make iterable because of # the next step mapped = [mapped] @@ -2310,7 +2300,7 @@ def create_multiprocessing(parallel_data, queue=None): This function will be called from another process when running a map in parallel mode. The result from the create is always a json object. ''' - reinit_crypto() + salt.utils.reinit_crypto() parallel_data['opts']['output'] = 'json' cloud = Cloud(parallel_data['opts']) @@ -2342,14 +2332,14 @@ def destroy_multiprocessing(parallel_data, queue=None): This function will be called from another process when running a map in parallel mode. The result from the destroy is always a json object. ''' - reinit_crypto() + salt.utils.reinit_crypto() parallel_data['opts']['output'] = 'json' clouds = salt.loader.clouds(parallel_data['opts']) try: fun = clouds['{0}.destroy'.format(parallel_data['driver'])] - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( fun, __active_provider_name__=':'.join([ parallel_data['alias'], @@ -2378,11 +2368,11 @@ def run_parallel_map_providers_query(data, queue=None): This function will be called from another process when building the providers map. ''' - reinit_crypto() + salt.utils.reinit_crypto() cloud = Cloud(data['opts']) try: - with context.func_globals_inject( + with salt.utils.context.func_globals_inject( cloud.clouds[data['fun']], __active_provider_name__=':'.join([ data['alias'], diff --git a/salt/cloud/cli.py b/salt/cloud/cli.py index fb8cdd25e1..7c85edce41 100644 --- a/salt/cloud/cli.py +++ b/salt/cloud/cli.py @@ -20,23 +20,24 @@ import logging from salt.ext.six.moves import input # Import salt libs +import salt.cloud +import salt.utils.cloud import salt.config import salt.defaults.exitcodes import salt.output +import salt.syspaths as syspaths import salt.utils -from salt.utils import parsers +import salt.utils.parsers +from salt.exceptions import SaltCloudException, SaltCloudSystemExit from salt.utils.verify import check_user, verify_env, verify_files, verify_log -# Import salt.cloud libs -import salt.cloud -import salt.utils.cloud -from salt.exceptions import SaltCloudException, SaltCloudSystemExit -import salt.ext.six as six -import salt.syspaths as syspaths +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) -class SaltCloud(parsers.SaltCloudParser): +class SaltCloud(salt.utils.parsers.SaltCloudParser): def run(self): ''' diff --git a/salt/cloud/clouds/aliyun.py b/salt/cloud/clouds/aliyun.py index acdcde6124..1980e8f175 100644 --- a/salt/cloud/clouds/aliyun.py +++ b/salt/cloud/clouds/aliyun.py @@ -50,7 +50,8 @@ from salt.exceptions import ( SaltCloudExecutionTimeout ) -# Import Third Party Libs +# Import 3rd-party libs +from salt.ext import six try: import requests HAS_REQUESTS = True @@ -745,7 +746,7 @@ def _compute_signature(parameters, access_key_secret): ''' def percent_encode(line): - if not isinstance(line, str): + if not isinstance(line, six.string_types): return line s = line diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index f7b5f8c7e3..68d1d91847 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -65,7 +65,7 @@ import salt.config as config import salt.utils import salt.utils.cloud import salt.utils.files -import salt.ext.six as six +from salt.ext import six import salt.version from salt.exceptions import ( SaltCloudSystemExit, @@ -424,13 +424,14 @@ def list_nodes_full(conn=None, call=None): # pylint: disable=unused-argument for group in list_resource_groups(): nodes = compconn.virtual_machines.list(group) for node in nodes: + private_ips, public_ips = __get_ips_from_node(group, node) ret[node.name] = object_to_dict(node) ret[node.name]['id'] = node.id ret[node.name]['name'] = node.name ret[node.name]['size'] = node.hardware_profile.vm_size ret[node.name]['state'] = node.provisioning_state - ret[node.name]['private_ips'] = node.network_profile.network_interfaces - ret[node.name]['public_ips'] = node.network_profile.network_interfaces + ret[node.name]['private_ips'] = private_ips + ret[node.name]['public_ips'] = public_ips ret[node.name]['storage_profile']['data_disks'] = [] ret[node.name]['resource_group'] = group for disk in node.storage_profile.data_disks: @@ -450,6 +451,30 @@ def list_nodes_full(conn=None, call=None): # pylint: disable=unused-argument return ret +def __get_ips_from_node(resource_group, node): + ''' + List private and public IPs from a VM interface + ''' + global netconn # pylint: disable=global-statement,invalid-name + if not netconn: + netconn = get_conn(NetworkManagementClient) + + private_ips = [] + public_ips = [] + for node_iface in node.network_profile.network_interfaces: + node_iface_name = node_iface.id.split('/')[-1] + network_interface = netconn.network_interfaces.get(resource_group, node_iface_name) + for ip_configuration in network_interface.ip_configurations: + if ip_configuration.private_ip_address: + private_ips.append(ip_configuration.private_ip_address) + if ip_configuration.public_ip_address and ip_configuration.public_ip_address.id: + public_iface_name = ip_configuration.public_ip_address.id.split('/')[-1] + public_iface = netconn.public_ip_addresses.get(resource_group, public_iface_name) + public_ips.append(public_iface.ip_address) + + return private_ips, public_ips + + def list_resource_groups(conn=None, call=None): # pylint: disable=unused-argument ''' List resource groups associated with the account diff --git a/salt/cloud/clouds/cloudstack.py b/salt/cloud/clouds/cloudstack.py index 1f36c91a9b..8732f05f47 100644 --- a/salt/cloud/clouds/cloudstack.py +++ b/salt/cloud/clouds/cloudstack.py @@ -30,6 +30,8 @@ import logging # Import salt cloud libs import salt.config as config +import salt.utils.cloud +import salt.utils.event from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import from salt.utils import namespaced_function from salt.exceptions import SaltCloudSystemExit diff --git a/salt/cloud/clouds/digital_ocean.py b/salt/cloud/clouds/digital_ocean.py index df13b4c929..daf5b8f75a 100644 --- a/salt/cloud/clouds/digital_ocean.py +++ b/salt/cloud/clouds/digital_ocean.py @@ -46,9 +46,8 @@ from salt.exceptions import ( SaltCloudExecutionFailure, SaltCloudExecutionTimeout ) -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip -from salt.ext.six import string_types # Import Third Party Libs try: @@ -209,7 +208,7 @@ def get_image(vm_): vm_image = config.get_cloud_config_value( 'image', vm_, __opts__, search_global=False ) - if not isinstance(vm_image, string_types): + if not isinstance(vm_image, six.string_types): vm_image = str(vm_image) for image in images: diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py index 8231734840..96fc7f24bd 100644 --- a/salt/cloud/clouds/ec2.py +++ b/salt/cloud/clouds/ec2.py @@ -105,7 +105,7 @@ from salt.exceptions import ( ) # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map, range, zip from salt.ext.six.moves.urllib.parse import urlparse as _urlparse, urlencode as _urlencode @@ -1028,10 +1028,18 @@ def ssh_interface(vm_): Return the ssh_interface type to connect to. Either 'public_ips' (default) or 'private_ips'. ''' - return config.get_cloud_config_value( + ret = config.get_cloud_config_value( 'ssh_interface', vm_, __opts__, default='public_ips', search_global=False ) + if ret not in ('public_ips', 'private_ips'): + log.warning(( + 'Invalid ssh_interface: {0}. ' + 'Allowed options are ("public_ips", "private_ips"). ' + 'Defaulting to "public_ips".' + ).format(ret)) + ret = 'public_ips' + return ret def get_ssh_gateway_config(vm_): @@ -1044,7 +1052,7 @@ def get_ssh_gateway_config(vm_): ) # Check to see if a SSH Gateway will be used. - if not isinstance(ssh_gateway, str): + if not isinstance(ssh_gateway, six.string_types): return None # Create dictionary of configuration items @@ -1431,7 +1439,7 @@ def _create_eni_if_necessary(interface, vm_): ) associate_public_ip = interface.get('AssociatePublicIpAddress', False) - if isinstance(associate_public_ip, str): + if isinstance(associate_public_ip, six.string_types): # Assume id of EIP as value _associate_eip_with_interface(eni_id, associate_public_ip, vm_=vm_) @@ -2611,7 +2619,7 @@ def create(vm_=None, call=None): data, vm_ = request_instance(vm_, location) # If data is a str, it's an error - if isinstance(data, str): + if isinstance(data, six.string_types): log.error('Error requesting instance: {0}'.format(data)) return {} @@ -2644,7 +2652,7 @@ def create(vm_=None, call=None): ) for value in six.itervalues(tags): - if not isinstance(value, str): + if not isinstance(value, six.string_types): raise SaltCloudConfigError( '\'tag\' values must be strings. Try quoting the values. ' 'e.g. "2013-09-19T20:09:46Z".' @@ -2831,7 +2839,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True): if 'instance_id' not in kwargs: kwargs['instance_id'] = _get_node(name)['instanceId'] - if isinstance(kwargs['volumes'], str): + if isinstance(kwargs['volumes'], six.string_types): volumes = yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] diff --git a/salt/cloud/clouds/gce.py b/salt/cloud/clouds/gce.py index 425907aa9b..8d2f891c12 100644 --- a/salt/cloud/clouds/gce.py +++ b/salt/cloud/clouds/gce.py @@ -83,11 +83,11 @@ except ImportError: # Import salt libs from salt.utils import namespaced_function -import salt.ext.six as six +from salt.ext import six import salt.utils.cloud import salt.utils.files +import salt.utils.http import salt.config as config -from salt.utils import http from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import from salt.exceptions import ( SaltCloudSystemExit, @@ -2615,7 +2615,7 @@ def update_pricing(kwargs=None, call=None): .. versionadded:: 2015.8.0 ''' url = 'https://cloudpricingcalculator.appspot.com/static/data/pricelist.json' - price_json = http.query(url, decode=True, decode_type='json') + price_json = salt.utils.http.query(url, decode=True, decode_type='json') outfile = os.path.join( __opts__['cachedir'], 'gce-pricing.p' diff --git a/salt/cloud/clouds/joyent.py b/salt/cloud/clouds/joyent.py index c5bb0c4bf3..8f40c28657 100644 --- a/salt/cloud/clouds/joyent.py +++ b/salt/cloud/clouds/joyent.py @@ -65,7 +65,7 @@ from Crypto.PublicKey import RSA from Crypto.Signature import PKCS1_v1_5 # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import http_client # pylint: disable=import-error,no-name-in-module import salt.utils.cloud import salt.utils.files @@ -1071,10 +1071,10 @@ def query(action=None, timenow = datetime.datetime.utcnow() timestamp = timenow.strftime('%a, %d %b %Y %H:%M:%S %Z').strip() with salt.utils.files.fopen(ssh_keyfile, 'r') as kh_: - rsa_key = RSA.importKey(kh_) + rsa_key = RSA.importKey(kh_.read()) rsa_ = PKCS1_v1_5.new(rsa_key) hash_ = SHA256.new() - hash_.update(timestamp) + hash_.update(timestamp.encode(__salt_system_encoding__)) signed = base64.b64encode(rsa_.sign(hash_)) keyid = '/{0}/keys/{1}'.format(user.split('/')[0], ssh_keyname) @@ -1085,7 +1085,7 @@ def query(action=None, 'Date': timestamp, 'Authorization': 'Signature keyId="{0}",algorithm="rsa-sha256" {1}'.format( keyid, - signed + signed.decode(__salt_system_encoding__) ), } diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index 2bc0b36f52..53a8c4b659 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -63,7 +63,7 @@ import os from xml.etree import ElementTree # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import libvirt # pylint: disable=import-error from libvirt import libvirtError diff --git a/salt/cloud/clouds/linode.py b/salt/cloud/clouds/linode.py index c67355b328..6b761cb985 100644 --- a/salt/cloud/clouds/linode.py +++ b/salt/cloud/clouds/linode.py @@ -35,7 +35,7 @@ import datetime # Import Salt Libs import salt.config as config -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range from salt.exceptions import ( SaltCloudConfigError, diff --git a/salt/cloud/clouds/lxc.py b/salt/cloud/clouds/lxc.py index 48aa6cc1fd..3043874e3f 100644 --- a/salt/cloud/clouds/lxc.py +++ b/salt/cloud/clouds/lxc.py @@ -30,7 +30,7 @@ import salt.runner # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Get logging started log = logging.getLogger(__name__) diff --git a/salt/cloud/clouds/msazure.py b/salt/cloud/clouds/msazure.py index 4a41f4a440..31f7186177 100644 --- a/salt/cloud/clouds/msazure.py +++ b/salt/cloud/clouds/msazure.py @@ -52,6 +52,7 @@ from salt.exceptions import SaltCloudSystemExit import salt.utils.cloud # Import 3rd-party libs +from salt.ext import six HAS_LIBS = False try: import azure @@ -709,7 +710,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True): if kwargs is None: kwargs = {} - if isinstance(kwargs['volumes'], str): + if isinstance(kwargs['volumes'], six.string_types): volumes = yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] @@ -803,7 +804,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True): if kwargs is None: kwargs = {} - if isinstance(kwargs['volumes'], str): + if isinstance(kwargs['volumes'], six.string_types): volumes = yaml.safe_load(kwargs['volumes']) else: volumes = kwargs['volumes'] diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py index 386dcee75f..d59600f04a 100644 --- a/salt/cloud/clouds/nova.py +++ b/salt/cloud/clouds/nova.py @@ -209,7 +209,7 @@ import pprint import yaml # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto @@ -728,12 +728,18 @@ def request_instance(vm_=None, call=None): else: pool = floating_ip_conf.get('pool', 'public') - for fl_ip, opts in six.iteritems(conn.floating_ip_list()): - if opts['fixed_ip'] is None and opts['pool'] == pool: - floating_ip = fl_ip - break - if floating_ip is None: + try: floating_ip = conn.floating_ip_create(pool)['ip'] + except Exception: + log.info('A new IP address was unable to be allocated. ' + 'An IP address will be pulled from the already allocated list, ' + 'This will cause a race condition when building in parallel.') + for fl_ip, opts in six.iteritems(conn.floating_ip_list()): + if opts['fixed_ip'] is None and opts['pool'] == pool: + floating_ip = fl_ip + break + if floating_ip is None: + log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name'])) def __query_node_data(vm_): try: diff --git a/salt/cloud/clouds/oneandone.py b/salt/cloud/clouds/oneandone.py new file mode 100644 index 0000000000..73526b8b4b --- /dev/null +++ b/salt/cloud/clouds/oneandone.py @@ -0,0 +1,849 @@ +# -*- coding: utf-8 -*- +''' +1&1 Cloud Server Module +======================= + +======= +The 1&1 SaltStack cloud module allows a 1&1 server to +be automatically deployed and bootstrapped with Salt. + +:depends: 1and1 >= 1.2.0 + +The module requires the 1&1 api_token to be provided. +The server should also be assigned a public LAN, a private LAN, +or both along with SSH key pairs. +... + +Set up the cloud configuration at ``/etc/salt/cloud.providers`` or +``/etc/salt/cloud.providers.d/oneandone.conf``: + +.. code-block:: yaml + + my-oneandone-config: + driver: oneandone + # The 1&1 api token + api_token: + # SSH private key filename + ssh_private_key: /path/to/private_key + # SSH public key filename + ssh_public_key: /path/to/public_key + +.. code-block:: yaml + + my-oneandone-profile: + provider: my-oneandone-config + # Either provide fixed_instance_size_id or vcore, cores_per_processor, ram, and hdds. + # Size of the ID desired for the server + fixed_instance_size: S + # Total amount of processors + vcore: 2 + # Number of cores per processor + cores_per_processor: 2 + # RAM memory size in GB + ram: 4 + # Hard disks + hdds: + - + is_main: true + size: 20 + - + is_main: false + size: 20 + # ID of the appliance image that will be installed on server + appliance_id: + # ID of the datacenter where the server will be created + datacenter_id: + # Description of the server + description: My server description + # Password of the server. Password must contain more than 8 characters + # using uppercase letters, numbers and other special symbols. + password: P4$$w0rD + # Power on server after creation - default True + power_on: true + # Firewall policy ID. If it is not provided, the server will assign + # the best firewall policy, creating a new one if necessary. + # If the parameter is sent with a 0 value, the server will be created with all ports blocked. + firewall_policy_id: + # IP address ID + ip_id: + # Load balancer ID + load_balancer_id: + # Monitoring policy ID + monitoring_policy_id: + +Set ``deploy`` to False if Salt should not be installed on the node. + +.. code-block:: yaml + + my-oneandone-profile: + deploy: False +''' + +# Import python libs +from __future__ import absolute_import +import logging +import os +import pprint +import time + +# Import salt libs +import salt.utils +import salt.config as config +from salt.exceptions import ( + SaltCloudConfigError, + SaltCloudNotFound, + SaltCloudExecutionFailure, + SaltCloudExecutionTimeout, + SaltCloudSystemExit +) + +# Import salt.cloud libs +import salt.utils.cloud +from salt.ext import six + +try: + from oneandone.client import ( + OneAndOneService, Server, Hdd + ) + HAS_ONEANDONE = True +except ImportError: + HAS_ONEANDONE = False + +# Get logging started +log = logging.getLogger(__name__) + +__virtualname__ = 'oneandone' + + +# Only load in this module if the 1&1 configurations are in place +def __virtual__(): + ''' + Check for 1&1 configurations. + ''' + if get_configured_provider() is False: + return False + + if get_dependencies() is False: + return False + + return __virtualname__ + + +def get_configured_provider(): + ''' + Return the first configured instance. + ''' + return config.is_provider_configured( + __opts__, + __active_provider_name__ or __virtualname__, + ('api_token',) + ) + + +def get_dependencies(): + ''' + Warn if dependencies are not met. + ''' + return config.check_driver_dependencies( + __virtualname__, + {'oneandone': HAS_ONEANDONE} + ) + + +def get_conn(): + ''' + Return a conn object for the passed VM data + ''' + return OneAndOneService( + api_token=config.get_cloud_config_value( + 'api_token', + get_configured_provider(), + __opts__, + search_global=False + ) + ) + + +def get_size(vm_): + ''' + Return the VM's size object + ''' + vm_size = config.get_cloud_config_value( + 'fixed_instance_size', vm_, __opts__, default=None, + search_global=False + ) + sizes = avail_sizes() + + if not vm_size: + size = next((item for item in sizes if item['name'] == 'S'), None) + return size + + size = next((item for item in sizes if item['name'] == vm_size or item['id'] == vm_size), None) + if size: + return size + + raise SaltCloudNotFound( + 'The specified size, \'{0}\', could not be found.'.format(vm_size) + ) + + +def get_image(vm_): + ''' + Return the image object to use + ''' + vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode( + 'ascii', 'salt-cloud-force-ascii' + ) + + images = avail_images() + for key, value in six.iteritems(images): + if vm_image and vm_image in (images[key]['id'], images[key]['name']): + return images[key] + + raise SaltCloudNotFound( + 'The specified image, \'{0}\', could not be found.'.format(vm_image) + ) + + +def avail_locations(conn=None, call=None): + ''' + List available locations/datacenters for 1&1 + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The avail_locations function must be called with ' + '-f or --function, or with the --list-locations option' + ) + + datacenters = [] + + if not conn: + conn = get_conn() + + for datacenter in conn.list_datacenters(): + datacenters.append({datacenter['country_code']: datacenter}) + + return {'Locations': datacenters} + + +def avail_images(conn=None, call=None): + ''' + Return a list of the server appliances that are on the provider + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The avail_images function must be called with ' + '-f or --function, or with the --list-images option' + ) + + if not conn: + conn = get_conn() + + ret = {} + + for appliance in conn.list_appliances(): + ret[appliance['name']] = appliance + + return ret + + +def avail_sizes(call=None): + ''' + Return a dict of all available VM sizes on the cloud provider with + relevant data. + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The avail_sizes function must be called with ' + '-f or --function, or with the --list-sizes option' + ) + + conn = get_conn() + + sizes = conn.fixed_server_flavors() + + return sizes + + +def script(vm_): + ''' + Return the script deployment object + ''' + return salt.utils.cloud.os_script( + config.get_cloud_config_value('script', vm_, __opts__), + vm_, + __opts__, + salt.utils.cloud.salt_config_to_yaml( + salt.utils.cloud.minion_config(__opts__, vm_) + ) + ) + + +def list_nodes(conn=None, call=None): + ''' + Return a list of VMs that are on the provider + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes function must be called with -f or --function.' + ) + + if not conn: + conn = get_conn() + + ret = {} + nodes = conn.list_servers() + + for node in nodes: + public_ips = [] + private_ips = [] + ret = {} + + size = node.get('hardware').get('fixed_instance_size_id', 'Custom size') + + if node.get('private_networks') and len(node['private_networks']) > 0: + for private_ip in node['private_networks']: + private_ips.append(private_ip) + + if node.get('ips') and len(node['ips']) > 0: + for public_ip in node['ips']: + public_ips.append(public_ip['ip']) + + server = { + 'id': node['id'], + 'image': node['image']['id'], + 'size': size, + 'state': node['status']['state'], + 'private_ips': private_ips, + 'public_ips': public_ips + } + ret[node['name']] = server + + return ret + + +def list_nodes_full(conn=None, call=None): + ''' + Return a list of the VMs that are on the provider, with all fields + ''' + if call == 'action': + raise SaltCloudSystemExit( + 'The list_nodes_full function must be called with -f or ' + '--function.' + ) + + if not conn: + conn = get_conn() + + ret = {} + nodes = conn.list_servers() + + for node in nodes: + ret[node['name']] = node + + return ret + + +def list_nodes_select(conn=None, call=None): + ''' + Return a list of the VMs that are on the provider, with select fields + ''' + if not conn: + conn = get_conn() + + return salt.utils.cloud.list_nodes_select( + list_nodes_full(conn, 'function'), + __opts__['query.selection'], + call, + ) + + +def show_instance(name, call=None): + ''' + Show the details from the provider concerning an instance + ''' + if call != 'action': + raise SaltCloudSystemExit( + 'The show_instance action must be called with -a or --action.' + ) + + nodes = list_nodes_full() + __utils__['cloud.cache_node']( + nodes[name], + __active_provider_name__, + __opts__ + ) + return nodes[name] + + +def _get_server(vm_): + ''' + Construct server instance from cloud profile config + ''' + description = config.get_cloud_config_value( + 'description', vm_, __opts__, default=None, + search_global=False + ) + + ssh_key = load_public_key(vm_) + + vcore = None + cores_per_processor = None + ram = None + fixed_instance_size_id = None + + if 'fixed_instance_size' in vm_: + fixed_instance_size = get_size(vm_) + fixed_instance_size_id = fixed_instance_size['id'] + elif (vm_['vcore'] and vm_['cores_per_processor'] and + vm_['ram'] and vm_['hdds']): + vcore = config.get_cloud_config_value( + 'vcore', vm_, __opts__, default=None, + search_global=False + ) + cores_per_processor = config.get_cloud_config_value( + 'cores_per_processor', vm_, __opts__, default=None, + search_global=False + ) + ram = config.get_cloud_config_value( + 'ram', vm_, __opts__, default=None, + search_global=False + ) + else: + raise SaltCloudConfigError("'fixed_instance_size' or 'vcore'," + "'cores_per_processor', 'ram', and 'hdds'" + "must be provided.") + + appliance_id = config.get_cloud_config_value( + 'appliance_id', vm_, __opts__, default=None, + search_global=False + ) + + password = config.get_cloud_config_value( + 'password', vm_, __opts__, default=None, + search_global=False + ) + + firewall_policy_id = config.get_cloud_config_value( + 'firewall_policy_id', vm_, __opts__, default=None, + search_global=False + ) + + ip_id = config.get_cloud_config_value( + 'ip_id', vm_, __opts__, default=None, + search_global=False + ) + + load_balancer_id = config.get_cloud_config_value( + 'load_balancer_id', vm_, __opts__, default=None, + search_global=False + ) + + monitoring_policy_id = config.get_cloud_config_value( + 'monitoring_policy_id', vm_, __opts__, default=None, + search_global=False + ) + + datacenter_id = config.get_cloud_config_value( + 'datacenter_id', vm_, __opts__, default=None, + search_global=False + ) + + private_network_id = config.get_cloud_config_value( + 'private_network_id', vm_, __opts__, default=None, + search_global=False + ) + + power_on = config.get_cloud_config_value( + 'power_on', vm_, __opts__, default=True, + search_global=False + ) + + # Contruct server object + return Server( + name=vm_['name'], + description=description, + fixed_instance_size_id=fixed_instance_size_id, + vcore=vcore, + cores_per_processor=cores_per_processor, + ram=ram, + appliance_id=appliance_id, + password=password, + power_on=power_on, + firewall_policy_id=firewall_policy_id, + ip_id=ip_id, + load_balancer_id=load_balancer_id, + monitoring_policy_id=monitoring_policy_id, + datacenter_id=datacenter_id, + rsa_key=ssh_key, + private_network_id=private_network_id + ) + + +def _get_hdds(vm_): + ''' + Construct VM hdds from cloud profile config + ''' + _hdds = config.get_cloud_config_value( + 'hdds', vm_, __opts__, default=None, + search_global=False + ) + + hdds = [] + + for hdd in _hdds: + hdds.append( + Hdd( + size=hdd['size'], + is_main=hdd['is_main'] + ) + ) + + return hdds + + +def create(vm_): + ''' + Create a single VM from a data dict + ''' + try: + # Check for required profile parameters before sending any API calls. + if (vm_['profile'] and + config.is_profile_configured(__opts__, + (__active_provider_name__ or + 'oneandone'), + vm_['profile']) is False): + return False + except AttributeError: + pass + + data = None + conn = get_conn() + hdds = [] + + # Assemble the composite server object. + server = _get_server(vm_) + + if not bool(server.specs['hardware']['fixed_instance_size_id']): + # Assemble the hdds object. + hdds = _get_hdds(vm_) + + __utils__['cloud.fire_event']( + 'event', + 'requesting instance', + 'salt/cloud/{0}/requesting'.format(vm_['name']), + args={'name': vm_['name']}, + sock_dir=__opts__['sock_dir'], + transport=__opts__['transport'] + ) + + try: + data = conn.create_server(server=server, hdds=hdds) + + _wait_for_completion(conn, + get_wait_timeout(vm_), + data['id']) + except Exception as exc: # pylint: disable=W0703 + log.error( + 'Error creating {0} on 1and1\n\n' + 'The following exception was thrown by the 1and1 library ' + 'when trying to run the initial deployment: \n{1}'.format( + vm_['name'], exc + ), + exc_info_on_loglevel=logging.DEBUG + ) + return False + + vm_['server_id'] = data['id'] + password = data['first_password'] + + def __query_node_data(vm_, data): + ''' + Query node data until node becomes available. + ''' + running = False + try: + data = show_instance(vm_['name'], 'action') + if not data: + return False + log.debug( + 'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format( + vm_['name'], + pprint.pformat(data['name']), + data['status']['state'] + ) + ) + except Exception as err: + log.error( + 'Failed to get nodes list: {0}'.format( + err + ), + # Show the trackback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + # Trigger a failure in the wait for IP function + return False + + running = data['status']['state'].lower() == 'powered_on' + if not running: + # Still not running, trigger another iteration + return + + vm_['ssh_host'] = data['ips'][0]['ip'] + + return data + + try: + data = salt.utils.cloud.wait_for_ip( + __query_node_data, + update_args=(vm_, data), + timeout=config.get_cloud_config_value( + 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), + interval=config.get_cloud_config_value( + 'wait_for_ip_interval', vm_, __opts__, default=10), + ) + except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: + try: + # It might be already up, let's destroy it! + destroy(vm_['name']) + except SaltCloudSystemExit: + pass + finally: + raise SaltCloudSystemExit(str(exc.message)) + + log.debug('VM is now running') + log.info('Created Cloud VM {0}'.format(vm_)) + log.debug( + '{0} VM creation details:\n{1}'.format( + vm_, pprint.pformat(data) + ) + ) + + __utils__['cloud.fire_event']( + 'event', + 'created instance', + 'salt/cloud/{0}/created'.format(vm_['name']), + args={ + 'name': vm_['name'], + 'profile': vm_['profile'], + 'provider': vm_['driver'], + }, + sock_dir=__opts__['sock_dir'], + transport=__opts__['transport'] + ) + + if 'ssh_host' in vm_: + vm_['password'] = password + vm_['key_filename'] = get_key_filename(vm_) + ret = __utils__['cloud.bootstrap'](vm_, __opts__) + ret.update(data) + return ret + else: + raise SaltCloudSystemExit('A valid IP address was not found.') + + +def destroy(name, call=None): + ''' + destroy a server by name + + :param name: name given to the server + :param call: call value in this case is 'action' + :return: array of booleans , true if successfully stopped and true if + successfully removed + + CLI Example: + + .. code-block:: bash + + salt-cloud -d vm_name + + ''' + if call == 'function': + raise SaltCloudSystemExit( + 'The destroy action must be called with -d, --destroy, ' + '-a or --action.' + ) + + __utils__['cloud.fire_event']( + 'event', + 'destroying instance', + 'salt/cloud/{0}/destroying'.format(name), + args={'name': name}, + sock_dir=__opts__['sock_dir'], + transport=__opts__['transport'] + ) + + conn = get_conn() + node = get_node(conn, name) + + conn.delete_server(server_id=node['id']) + + __utils__['cloud.fire_event']( + 'event', + 'destroyed instance', + 'salt/cloud/{0}/destroyed'.format(name), + args={'name': name}, + sock_dir=__opts__['sock_dir'], + transport=__opts__['transport'] + ) + + if __opts__.get('update_cachedir', False) is True: + __utils__['cloud.delete_minion_cachedir']( + name, + __active_provider_name__.split(':')[0], + __opts__ + ) + + return True + + +def reboot(name, call=None): + ''' + reboot a server by name + :param name: name given to the machine + :param call: call value in this case is 'action' + :return: true if successful + + CLI Example: + + .. code-block:: bash + + salt-cloud -a reboot vm_name + ''' + conn = get_conn() + node = get_node(conn, name) + + conn.modify_server_status(server_id=node['id'], action='REBOOT') + + return True + + +def stop(name, call=None): + ''' + stop a server by name + :param name: name given to the machine + :param call: call value in this case is 'action' + :return: true if successful + + CLI Example: + + .. code-block:: bash + + salt-cloud -a stop vm_name + ''' + conn = get_conn() + node = get_node(conn, name) + + conn.stop_server(server_id=node['id']) + + return True + + +def start(name, call=None): + ''' + start a server by name + :param name: name given to the machine + :param call: call value in this case is 'action' + :return: true if successful + + + CLI Example: + + .. code-block:: bash + + salt-cloud -a start vm_name + ''' + conn = get_conn() + node = get_node(conn, name) + + conn.start_server(server_id=node['id']) + + return True + + +def get_node(conn, name): + ''' + Return a node for the named VM + ''' + for node in conn.list_servers(per_page=1000): + if node['name'] == name: + return node + + +def get_key_filename(vm_): + ''' + Check SSH private key file and return absolute path if exists. + ''' + key_filename = config.get_cloud_config_value( + 'ssh_private_key', vm_, __opts__, search_global=False, default=None + ) + if key_filename is not None: + key_filename = os.path.expanduser(key_filename) + if not os.path.isfile(key_filename): + raise SaltCloudConfigError( + 'The defined ssh_private_key \'{0}\' does not exist'.format( + key_filename + ) + ) + + return key_filename + + +def load_public_key(vm_): + ''' + Load the public key file if exists. + ''' + public_key_filename = config.get_cloud_config_value( + 'ssh_public_key', vm_, __opts__, search_global=False, default=None + ) + if public_key_filename is not None: + public_key_filename = os.path.expanduser(public_key_filename) + if not os.path.isfile(public_key_filename): + raise SaltCloudConfigError( + 'The defined ssh_public_key \'{0}\' does not exist'.format( + public_key_filename + ) + ) + + with salt.utils.fopen(public_key_filename, 'r') as public_key: + key = public_key.read().replace('\n', '') + + return key + + +def get_wait_timeout(vm_): + ''' + Return the wait_for_timeout for resource provisioning. + ''' + return config.get_cloud_config_value( + 'wait_for_timeout', vm_, __opts__, default=15 * 60, + search_global=False + ) + + +def _wait_for_completion(conn, wait_timeout, server_id): + ''' + Poll request status until resource is provisioned. + ''' + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time(): + time.sleep(5) + + server = conn.get_server(server_id) + server_state = server['status']['state'].lower() + + if server_state == "powered_on": + return + elif server_state == 'failed': + raise Exception('Server creation failed for {0}'.format(server_id)) + elif server_state in ('active', + 'enabled', + 'deploying', + 'configuring'): + continue + else: + raise Exception( + 'Unknown server state {0}'.format(server_state)) + raise Exception( + 'Timed out waiting for server create completion for {0}'.format(server_id) + ) diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index 53bbb5b0b6..b26a36a47c 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -135,6 +135,14 @@ Alternatively, one could use the private IP to connect by specifying: ssh_interface: private_ips +.. note:: + + When using floating ips from networks, if the OpenStack driver is unable to + allocate a new ip address for the server, it will check that for + unassociated ip addresses in the floating ip pool. If SaltCloud is running + in parallel mode, it is possible that more than one server will attempt to + use the same ip address. + ''' # Import python libs @@ -143,7 +151,9 @@ import os import logging import socket import pprint -from salt.utils.versions import LooseVersion as _LooseVersion + +# This import needs to be here so the version check can be done below +import salt.utils.versions # Import libcloud try: @@ -162,7 +172,8 @@ try: # However, older versions of libcloud must still be supported with this work-around. # This work-around can be removed when the required minimum version of libcloud is # 2.0.0 (See PR #40837 - which is implemented in Salt Oxygen). - if _LooseVersion(libcloud.__version__) < _LooseVersion('1.4.0'): + if salt.utils.versions.LooseVersion(libcloud.__version__) < \ + salt.utils.versions.LooseVersion('1.4.0'): # See https://github.com/saltstack/salt/issues/32743 import libcloud.security libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem') @@ -174,12 +185,11 @@ except Exception: from salt.cloud.libcloudfuncs import * # pylint: disable=W0614,W0401 # Import salt libs -import salt.utils +import salt.utils # Can be removed once namespaced_function has been moved import salt.utils.cloud import salt.utils.files import salt.utils.pycrypto import salt.config as config -from salt.utils import namespaced_function from salt.exceptions import ( SaltCloudConfigError, SaltCloudNotFound, @@ -204,19 +214,19 @@ __virtualname__ = 'openstack' # Some of the libcloud functions need to be in the same namespace as the # functions defined in the module, so we create new function objects inside # this module namespace -get_size = namespaced_function(get_size, globals()) -get_image = namespaced_function(get_image, globals()) -avail_locations = namespaced_function(avail_locations, globals()) -avail_images = namespaced_function(avail_images, globals()) -avail_sizes = namespaced_function(avail_sizes, globals()) -script = namespaced_function(script, globals()) -destroy = namespaced_function(destroy, globals()) -reboot = namespaced_function(reboot, globals()) -list_nodes = namespaced_function(list_nodes, globals()) -list_nodes_full = namespaced_function(list_nodes_full, globals()) -list_nodes_select = namespaced_function(list_nodes_select, globals()) -show_instance = namespaced_function(show_instance, globals()) -get_node = namespaced_function(get_node, globals()) +get_size = salt.utils.namespaced_function(get_size, globals()) +get_image = salt.utils.namespaced_function(get_image, globals()) +avail_locations = salt.utils.namespaced_function(avail_locations, globals()) +avail_images = salt.utils.namespaced_function(avail_images, globals()) +avail_sizes = salt.utils.namespaced_function(avail_sizes, globals()) +script = salt.utils.namespaced_function(script, globals()) +destroy = salt.utils.namespaced_function(destroy, globals()) +reboot = salt.utils.namespaced_function(reboot, globals()) +list_nodes = salt.utils.namespaced_function(list_nodes, globals()) +list_nodes_full = salt.utils.namespaced_function(list_nodes_full, globals()) +list_nodes_select = salt.utils.namespaced_function(list_nodes_select, globals()) +show_instance = salt.utils.namespaced_function(show_instance, globals()) +get_node = salt.utils.namespaced_function(get_node, globals()) # Only load in this module is the OPENSTACK configurations are in place @@ -230,7 +240,7 @@ def __virtual__(): if get_dependencies() is False: return False - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'This driver has been deprecated and will be removed in the ' '{version} release of Salt. Please use the nova driver instead.' @@ -854,40 +864,43 @@ def _assign_floating_ips(vm_, conn, kwargs): pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection ) - for idx in pool.list_floating_ips(): - if idx.node_id is None: - floating.append(idx) + try: + floating.append(pool.create_floating_ip()) + except Exception as e: + log.debug('Cannot allocate IP from floating pool \'%s\'. Checking for unassociated ips.', + net['floating']) + for idx in pool.list_floating_ips(): + if idx.node_id is None: + floating.append(idx) + break if not floating: - try: - floating.append(pool.create_floating_ip()) - except Exception as e: - raise SaltCloudSystemExit( - 'Floating pool \'{0}\' does not have any more ' - 'please create some more or use a different ' - 'pool.'.format(net['floating']) - ) + raise SaltCloudSystemExit( + 'There are no more floating IP addresses ' + 'available, please create some more' + ) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some - # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool( '', conn.connection ) - for idx in pool.list_floating_ips(): - if idx.node_id is None: - floating.append(idx) + try: + floating.append(pool.create_floating_ip()) + except Exception as e: + log.debug('Cannot allocate IP from the default floating pool. Checking for unassociated ips.') + for idx in pool.list_floating_ips(): + if idx.node_id is None: + floating.append(idx) + break if not floating: - try: - floating.append(pool.create_floating_ip()) - except Exception as e: - raise SaltCloudSystemExit( - 'There are no more floating IP addresses ' - 'available, please create some more' - ) + log.warning( + 'There are no more floating IP addresses ' + 'available, please create some more if necessary' + ) except Exception as e: if str(e).startswith('404'): pass diff --git a/salt/cloud/clouds/parallels.py b/salt/cloud/clouds/parallels.py index a789351d35..a8db20eaae 100644 --- a/salt/cloud/clouds/parallels.py +++ b/salt/cloud/clouds/parallels.py @@ -51,6 +51,9 @@ from salt.exceptions import ( SaltCloudExecutionTimeout ) +# Import 3rd-party libs +from salt.ext import six + # Get logging started log = logging.getLogger(__name__) @@ -397,7 +400,7 @@ def query(action=None, command=None, args=None, method='GET', data=None): args = {} kwargs = {'data': data} - if isinstance(data, str) and ' # to target=host.domain.net @@ -707,7 +711,7 @@ def run_command_async(msg): log.debug("Command {} will run via runner_functions".format(cmd)) # pylint is tripping # pylint: disable=missing-whitespace-after-comma - job_id_dict = runner.async(cmd, {"args":args, "kwargs":kwargs}) + job_id_dict = runner.async(cmd, {"args": args, "kwargs": kwargs}) job_id = job_id_dict['jid'] # Default to trying to run as a client module. @@ -716,7 +720,7 @@ def run_command_async(msg): log.debug("Command {} will run via local.cmd_async, targeting {}".format(cmd, target)) log.debug("Running {}, {}, {}, {}, {}".format(str(target), cmd, args, kwargs, str(tgt_type))) # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form - job_id = local.cmd_async(str(target), cmd, arg=args, kwargs=kwargs, expr_form=str(tgt_type)) + job_id = local.cmd_async(str(target), cmd, arg=args, kwargs=kwargs, tgt_type=str(tgt_type)) log.info("ret from local.cmd_async is {}".format(job_id)) return job_id diff --git a/salt/engines/stalekey.py b/salt/engines/stalekey.py index a44c2fd9aa..1887be9fd0 100644 --- a/salt/engines/stalekey.py +++ b/salt/engines/stalekey.py @@ -31,7 +31,7 @@ import salt.utils.minions import salt.wheel # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six import msgpack log = logging.getLogger(__name__) diff --git a/salt/exceptions.py b/salt/exceptions.py index 82df8ba6e4..7df94e17bb 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -11,7 +11,7 @@ import time # Import Salt libs import salt.defaults.exitcodes -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -31,14 +31,14 @@ def get_error_message(error): ''' Get human readable message from Python Exception ''' - return error.args[0] if error.args else '' + return error.args[0] if error.args else u'' class SaltException(Exception): ''' Base exception class; all Salt-specific exceptions should subclass this ''' - def __init__(self, message=''): + def __init__(self, message=u''): super(SaltException, self).__init__(message) self.strerror = message @@ -48,7 +48,8 @@ class SaltException(Exception): transport via msgpack ''' if six.PY3: - return {'message': str(self), 'args': self.args} + # The message should be a str type, not a unicode + return {u'message': str(self), u'args': self.args} return dict(message=self.__unicode__(), args=self.args) @@ -99,16 +100,16 @@ class CommandExecutionError(SaltException): Used when a module runs a command which returns an error and wants to show the user the output gracefully instead of dying ''' - def __init__(self, message='', info=None): + def __init__(self, message=u'', info=None): self.error = exc_str_prefix = message self.info = info if self.info: if exc_str_prefix: - if exc_str_prefix[-1] not in '.?!': - exc_str_prefix += '.' - exc_str_prefix += ' ' + if exc_str_prefix[-1] not in u'.?!': + exc_str_prefix += u'.' + exc_str_prefix += u' ' - exc_str_prefix += 'Additional info follows:\n\n' + exc_str_prefix += u'Additional info follows:\n\n' # NOTE: exc_str will be passed to the parent class' constructor and # become self.strerror. exc_str = exc_str_prefix + _nested_output(self.info) @@ -119,7 +120,7 @@ class CommandExecutionError(SaltException): # this information would be redundant). if isinstance(self.info, dict): info_without_changes = copy.deepcopy(self.info) - info_without_changes.pop('changes', None) + info_without_changes.pop(u'changes', None) if info_without_changes: self.strerror_without_changes = \ exc_str_prefix + _nested_output(info_without_changes) @@ -168,9 +169,9 @@ class FileLockError(SaltException): super(FileLockError, self).__init__(msg, *args, **kwargs) if time_start is None: log.warning( - 'time_start should be provided when raising a FileLockError. ' - 'Defaulting to current time as a fallback, but this may ' - 'result in an inaccurate timeout.' + u'time_start should be provided when raising a FileLockError. ' + u'Defaulting to current time as a fallback, but this may ' + u'result in an inaccurate timeout.' ) self.time_start = time.time() else: @@ -223,29 +224,29 @@ class SaltRenderError(SaltException): def __init__(self, message, line_num=None, - buf='', - marker=' <======================', + buf=u'', + marker=u' <======================', trace=None): self.error = message exc_str = copy.deepcopy(message) self.line_num = line_num self.buffer = buf - self.context = '' + self.context = u'' if trace: - exc_str += '\n{0}\n'.format(trace) + exc_str += u'\n{0}\n'.format(trace) if self.line_num and self.buffer: - import salt.utils + import salt.utils.stringutils self.context = salt.utils.get_context( self.buffer, self.line_num, marker=marker ) - exc_str += '; line {0}\n\n{1}'.format( + exc_str += '; line {0}\n\n{1}'.format( # future lint: disable=non-unicode-string self.line_num, - self.context + salt.utils.stringutils.to_str(self.context), ) - SaltException.__init__(self, exc_str) + super(SaltRenderError, self).__init__(exc_str) class SaltClientTimeout(SaltException): @@ -389,6 +390,13 @@ class TemplateError(SaltException): ''' +# Validation related exceptions +class InvalidConfigError(CommandExecutionError): + ''' + Used when the input is invalid + ''' + + # VMware related exceptions class VMwareSaltError(CommandExecutionError): ''' diff --git a/salt/executors/sudo.py b/salt/executors/sudo.py index 106ca918a6..61ada99665 100644 --- a/salt/executors/sudo.py +++ b/salt/executors/sudo.py @@ -11,14 +11,14 @@ except ImportError: from pipes import quote as _cmd_quote # Import salt libs -import salt.utils +import salt.utils.path import salt.syspaths __virtualname__ = 'sudo' def __virtual__(): - if salt.utils.which('sudo') and __opts__.get('sudo_user'): + if salt.utils.path.which('sudo') and __opts__.get('sudo_user'): return __virtualname__ return False diff --git a/salt/ext/win_inet_pton.py b/salt/ext/win_inet_pton.py index 68d5b1b375..bd9cfcb0b4 100644 --- a/salt/ext/win_inet_pton.py +++ b/salt/ext/win_inet_pton.py @@ -9,6 +9,7 @@ from __future__ import absolute_import import socket import ctypes import os +import ipaddress class sockaddr(ctypes.Structure): @@ -31,6 +32,24 @@ else: def inet_pton(address_family, ip_string): + # Verify IP Address + # This will catch IP Addresses such as 10.1.2 + if address_family == socket.AF_INET: + try: + ipaddress.ip_address(ip_string.decode()) + except ValueError: + raise socket.error('illegal IP address string passed to inet_pton') + return socket.inet_aton(ip_string) + + # Verify IP Address + # The `WSAStringToAddressA` function handles notations used by Berkeley + # software which includes 3 part IP Addresses such as `10.1.2`. That's why + # the above check is needed to enforce more strict IP Address validation as + # used by the `inet_pton` function in Unix. + # See the following: + # https://stackoverflow.com/a/29286098 + # Docs for the `inet_addr` function on MSDN + # https://msdn.microsoft.com/en-us/library/windows/desktop/ms738563.aspx addr = sockaddr() addr.sa_family = address_family addr_size = ctypes.c_int(ctypes.sizeof(addr)) diff --git a/salt/fileclient.py b/salt/fileclient.py index 8537c1f423..0c0050ebc0 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -26,15 +26,19 @@ import salt.transport import salt.fileserver import salt.utils import salt.utils.files -import salt.utils.templates -import salt.utils.url import salt.utils.gzip_util import salt.utils.http -import salt.ext.six as six +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils +import salt.utils.templates +import salt.utils.url +import salt.utils.versions from salt.utils.locales import sdecode from salt.utils.openstack.swift import SaltSwift # pylint: disable=no-name-in-module,import-error +from salt.ext import six import salt.ext.six.moves.BaseHTTPServer as BaseHTTPServer from salt.ext.six.moves.urllib.error import HTTPError, URLError from salt.ext.six.moves.urllib.parse import urlparse, urlunparse @@ -48,13 +52,13 @@ def get_file_client(opts, pillar=False): Read in the ``file_client`` option and return the correct type of file server ''' - client = opts.get('file_client', 'remote') - if pillar and client == 'local': - client = 'pillar' + client = opts.get(u'file_client', u'remote') + if pillar and client == u'local': + client = u'pillar' return { - 'remote': RemoteClient, - 'local': FSClient, - 'pillar': LocalClient, + u'remote': RemoteClient, + u'local': FSClient, + u'pillar': LocalClient, }.get(client, RemoteClient)(opts) @@ -95,16 +99,16 @@ class Client(object): def __setstate__(self, state): # This will polymorphically call __init__ # in the derived class. - self.__init__(state['opts']) + self.__init__(state[u'opts']) def __getstate__(self): - return {'opts': self.opts} + return {u'opts': self.opts} def _check_proto(self, path): ''' Make sure that this path is intended for the salt master and trim it ''' - if not path.startswith('salt://'): + if not path.startswith(u'salt://'): raise MinionError(u'Unsupported path: {0}'.format(path)) file_path, saltenv = salt.utils.url.parse(path) return file_path @@ -128,13 +132,13 @@ class Client(object): return filelist @contextlib.contextmanager - def _cache_loc(self, path, saltenv='base', cachedir=None): + def _cache_loc(self, path, saltenv=u'base', cachedir=None): ''' Return the local location to cache the file, cache dirs will be made ''' cachedir = self.get_cachedir(cachedir) - dest = salt.utils.path_join(cachedir, - 'files', + dest = salt.utils.path.join(cachedir, + u'files', saltenv, path) destdir = os.path.dirname(dest) @@ -157,16 +161,16 @@ class Client(object): def get_cachedir(self, cachedir=None): if cachedir is None: - cachedir = self.opts['cachedir'] + cachedir = self.opts[u'cachedir'] elif not os.path.isabs(cachedir): - cachedir = os.path.join(self.opts['cachedir'], cachedir) + cachedir = os.path.join(self.opts[u'cachedir'], cachedir) return cachedir def get_file(self, path, - dest='', + dest=u'', makedirs=False, - saltenv='base', + saltenv=u'base', gzip=None, cachedir=None): ''' @@ -175,32 +179,32 @@ class Client(object): ''' raise NotImplementedError - def file_list_emptydirs(self, saltenv='base', prefix=''): + def file_list_emptydirs(self, saltenv=u'base', prefix=u''): ''' List the empty dirs ''' raise NotImplementedError - def cache_file(self, path, saltenv='base', cachedir=None): + def cache_file(self, path, saltenv=u'base', cachedir=None): ''' Pull a file down from the file server and store it in the minion file cache ''' - return self.get_url(path, '', True, saltenv, cachedir=cachedir) + return self.get_url(path, u'', True, saltenv, cachedir=cachedir) - def cache_files(self, paths, saltenv='base', cachedir=None): + def cache_files(self, paths, saltenv=u'base', cachedir=None): ''' Download a list of files stored on the master and put them in the minion file cache ''' ret = [] - if isinstance(paths, str): - paths = paths.split(',') + if isinstance(paths, six.string_types): + paths = paths.split(u',') for path in paths: ret.append(self.cache_file(path, saltenv, cachedir=cachedir)) return ret - def cache_master(self, saltenv='base', cachedir=None): + def cache_master(self, saltenv=u'base', cachedir=None): ''' Download and cache all files on a master in a specified environment ''' @@ -212,7 +216,7 @@ class Client(object): ) return ret - def cache_dir(self, path, saltenv='base', include_empty=False, + def cache_dir(self, path, saltenv=u'base', include_empty=False, include_pat=None, exclude_pat=None, cachedir=None): ''' Download all of the files in a subdir of the master @@ -223,13 +227,11 @@ class Client(object): # We want to make sure files start with this *directory*, use # '/' explicitly because the master (that's generating the # list of files) only runs on POSIX - if not path.endswith('/'): - path = path + '/' + if not path.endswith(u'/'): + path = path + u'/' log.info( - 'Caching directory \'{0}\' for environment \'{1}\''.format( - path, saltenv - ) + u'Caching directory \'%s\' for environment \'%s\'', path, saltenv ) # go through the list of all files finding ones that are in # the target directory and caching them @@ -255,11 +257,11 @@ class Client(object): # prefix = separated[0] cachedir = self.get_cachedir(cachedir) - dest = salt.utils.path_join(cachedir, 'files', saltenv) + dest = salt.utils.path.join(cachedir, u'files', saltenv) for fn_ in self.file_list_emptydirs(saltenv): fn_ = sdecode(fn_) if fn_.startswith(path): - minion_dir = '{0}/{1}'.format(dest, fn_) + minion_dir = u'{0}/{1}'.format(dest, fn_) if not os.path.isdir(minion_dir): os.makedirs(minion_dir) ret.append(minion_dir) @@ -269,8 +271,8 @@ class Client(object): ''' Cache a local file on the minion in the localfiles cache ''' - dest = os.path.join(self.opts['cachedir'], 'localfiles', - path.lstrip('/')) + dest = os.path.join(self.opts[u'cachedir'], u'localfiles', + path.lstrip(u'/')) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): @@ -279,41 +281,41 @@ class Client(object): shutil.copyfile(path, dest) return dest - def file_local_list(self, saltenv='base'): + def file_local_list(self, saltenv=u'base'): ''' List files in the local minion files and localfiles caches ''' - filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv) - localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles') + filesdest = os.path.join(self.opts[u'cachedir'], u'files', saltenv) + localfilesdest = os.path.join(self.opts[u'cachedir'], u'localfiles') fdest = self._file_local_list(filesdest) ldest = self._file_local_list(localfilesdest) return sorted(fdest.union(ldest)) - def file_list(self, saltenv='base', prefix=''): + def file_list(self, saltenv=u'base', prefix=u''): ''' This function must be overwritten ''' return [] - def dir_list(self, saltenv='base', prefix=''): + def dir_list(self, saltenv=u'base', prefix=u''): ''' This function must be overwritten ''' return [] - def symlink_list(self, saltenv='base', prefix=''): + def symlink_list(self, saltenv=u'base', prefix=u''): ''' This function must be overwritten ''' return {} - def is_cached(self, path, saltenv='base', cachedir=None): + def is_cached(self, path, saltenv=u'base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' - if path.startswith('salt://'): + if path.startswith(u'salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv @@ -322,9 +324,9 @@ class Client(object): # also strip escape character '|' localsfilesdest = os.path.join( - self.opts['cachedir'], 'localfiles', path.lstrip('|/')) + self.opts[u'cachedir'], u'localfiles', path.lstrip(u'|/')) filesdest = os.path.join( - self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) + self.opts[u'cachedir'], u'files', saltenv, path.lstrip(u'|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): @@ -336,7 +338,7 @@ class Client(object): elif os.path.exists(extrndest): return extrndest - return '' + return u'' def list_states(self, saltenv): ''' @@ -344,51 +346,53 @@ class Client(object): environment ''' - limit_traversal = self.opts.get('fileserver_limit_traversal', False) + limit_traversal = self.opts.get(u'fileserver_limit_traversal', False) states = [] if limit_traversal: - if saltenv not in self.opts['file_roots']: + if saltenv not in self.opts[u'file_roots']: log.warning( - 'During an attempt to list states for saltenv \'{0}\', ' - 'the environment could not be found in the configured ' - 'file roots'.format(saltenv) + u'During an attempt to list states for saltenv \'%s\', ' + u'the environment could not be found in the configured ' + u'file roots', saltenv ) return states - for path in self.opts['file_roots'][saltenv]: + for path in self.opts[u'file_roots'][saltenv]: for root, dirs, files in os.walk(path, topdown=True): - log.debug('Searching for states in dirs {0} and files ' - '{1}'.format(dirs, files)) - if not [filename.endswith('.sls') for filename in files]: + log.debug( + u'Searching for states in dirs %s and files %s', + dirs, files + ) + if not [filename.endswith(u'.sls') for filename in files]: # Use shallow copy so we don't disturb the memory used by os.walk. Otherwise this breaks! del dirs[:] else: for found_file in files: stripped_root = os.path.relpath(root, path) - if salt.utils.is_windows(): - stripped_root = stripped_root.replace('\\', '/') - stripped_root = stripped_root.replace('/', '.') - if found_file.endswith(('.sls')): - if found_file.endswith('init.sls'): - if stripped_root.endswith('.'): - stripped_root = stripped_root.rstrip('.') + if salt.utils.platform.is_windows(): + stripped_root = stripped_root.replace(u'\\', u'/') + stripped_root = stripped_root.replace(u'/', u'.') + if found_file.endswith((u'.sls')): + if found_file.endswith(u'init.sls'): + if stripped_root.endswith(u'.'): + stripped_root = stripped_root.rstrip(u'.') states.append(stripped_root) else: - if not stripped_root.endswith('.'): - stripped_root += '.' - if stripped_root.startswith('.'): - stripped_root = stripped_root.lstrip('.') + if not stripped_root.endswith(u'.'): + stripped_root += u'.' + if stripped_root.startswith(u'.'): + stripped_root = stripped_root.lstrip(u'.') states.append(stripped_root + found_file[:-4]) else: for path in self.file_list(saltenv): - if salt.utils.is_windows(): - path = path.replace('\\', '/') - if path.endswith('.sls'): + if salt.utils.platform.is_windows(): + path = path.replace(u'\\', u'/') + if path.endswith(u'.sls'): # is an sls module! - if path.endswith('{0}init.sls'.format('/')): - states.append(path.replace('/', '.')[:-9]) + if path.endswith(u'/init.sls'): + states.append(path.replace(u'/', u'.')[:-9]) else: - states.append(path.replace('/', '.')[:-4]) + states.append(path.replace(u'/', u'.')[:-4]) return states def get_state(self, sls, saltenv, cachedir=None): @@ -396,31 +400,31 @@ class Client(object): Get a state file from the master and store it in the local minion cache; return the location of the file ''' - if '.' in sls: - sls = sls.replace('.', '/') - sls_url = salt.utils.url.create(sls + '.sls') - init_url = salt.utils.url.create(sls + '/init.sls') + if u'.' in sls: + sls = sls.replace(u'.', u'/') + sls_url = salt.utils.url.create(sls + u'.sls') + init_url = salt.utils.url.create(sls + u'/init.sls') for path in [sls_url, init_url]: dest = self.cache_file(path, saltenv, cachedir=cachedir) if dest: - return {'source': path, 'dest': dest} + return {u'source': path, u'dest': dest} return {} - def get_dir(self, path, dest='', saltenv='base', gzip=None, + def get_dir(self, path, dest=u'', saltenv=u'base', gzip=None, cachedir=None): ''' Get a directory recursively from the salt-master ''' ret = [] # Strip trailing slash - path = self._check_proto(path).rstrip('/') + path = self._check_proto(path).rstrip(u'/') # Break up the path into a list containing the bottom-level directory # (the one being recursively copied) and the directories preceding it - separated = path.rsplit('/', 1) + separated = path.rsplit(u'/', 1) if len(separated) != 2: # No slashes in path. (This means all files in saltenv will be # copied) - prefix = '' + prefix = u'' else: prefix = separated[0] @@ -429,17 +433,17 @@ class Client(object): # Prevent files in "salt://foobar/" (or salt://foo.sh) from # matching a path of "salt://foo" try: - if fn_[len(path)] != '/': + if fn_[len(path)] != u'/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. - minion_relpath = fn_[len(prefix):].lstrip('/') + minion_relpath = fn_[len(prefix):].lstrip(u'/') ret.append( self.get_file( salt.utils.url.create(fn_), - '{0}/{1}'.format(dest, minion_relpath), + u'{0}/{1}'.format(dest, minion_relpath), True, saltenv, gzip ) ) @@ -449,14 +453,14 @@ class Client(object): # Prevent an empty dir "salt://foobar/" from matching a path of # "salt://foo" try: - if fn_[len(path)] != '/': + if fn_[len(path)] != u'/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. - minion_relpath = fn_[len(prefix):].lstrip('/') - minion_mkdir = '{0}/{1}'.format(dest, minion_relpath) + minion_relpath = fn_[len(prefix):].lstrip(u'/') + minion_mkdir = u'{0}/{1}'.format(dest, minion_relpath) if not os.path.isdir(minion_mkdir): os.makedirs(minion_mkdir) ret.append(minion_mkdir) @@ -465,7 +469,7 @@ class Client(object): ret.sort() return ret - def get_url(self, url, dest, makedirs=False, saltenv='base', + def get_url(self, url, dest, makedirs=False, saltenv=u'base', no_cache=False, cachedir=None): ''' Get a single file from a URL. @@ -477,37 +481,37 @@ class Client(object): # If dest is a directory, rewrite dest with filename if dest is not None \ - and (os.path.isdir(dest) or dest.endswith(('/', '\\'))): - if url_data.query or len(url_data.path) > 1 and not url_data.path.endswith('/'): - strpath = url.split('/')[-1] + and (os.path.isdir(dest) or dest.endswith((u'/', u'\\'))): + if url_data.query or len(url_data.path) > 1 and not url_data.path.endswith(u'/'): + strpath = url.split(u'/')[-1] else: - strpath = 'index.html' + strpath = u'index.html' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): strpath = salt.utils.sanitize_win_path_string(strpath) dest = os.path.join(dest, strpath) if url_scheme and url_scheme.lower() in string.ascii_lowercase: - url_path = ':'.join((url_scheme, url_path)) - url_scheme = 'file' + url_path = u':'.join((url_scheme, url_path)) + url_scheme = u'file' - if url_scheme in ('file', ''): + if url_scheme in (u'file', u''): # Local filesystem if not os.path.isabs(url_path): raise CommandExecutionError( - 'Path \'{0}\' is not absolute'.format(url_path) + u'Path \'{0}\' is not absolute'.format(url_path) ) if dest is None: - with salt.utils.files.fopen(url_path, 'r') as fp_: + with salt.utils.files.fopen(url_path, u'r') as fp_: data = fp_.read() return data return url_path - if url_scheme == 'salt': + if url_scheme == u'salt': result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir) if result and dest is None: - with salt.utils.files.fopen(result, 'r') as fp_: + with salt.utils.files.fopen(result, u'r') as fp_: data = fp_.read() return data return result @@ -518,87 +522,89 @@ class Client(object): if makedirs: os.makedirs(destdir) else: - return '' + return u'' elif not no_cache: dest = self._extrn_path(url, saltenv, cachedir=cachedir) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) - if url_data.scheme == 's3': + if url_data.scheme == u's3': try: def s3_opt(key, default=None): - '''Get value of s3. from Minion config or from Pillar''' - if 's3.' + key in self.opts: - return self.opts['s3.' + key] + u'''Get value of s3. from Minion config or from Pillar''' + if u's3.' + key in self.opts: + return self.opts[u's3.' + key] try: - return self.opts['pillar']['s3'][key] + return self.opts[u'pillar'][u's3'][key] except (KeyError, TypeError): return default - self.utils['s3.query'](method='GET', + self.utils[u's3.query'](method=u'GET', bucket=url_data.netloc, path=url_data.path[1:], return_bin=False, local_file=dest, action=None, - key=s3_opt('key'), - keyid=s3_opt('keyid'), - service_url=s3_opt('service_url'), - verify_ssl=s3_opt('verify_ssl', True), - location=s3_opt('location'), - path_style=s3_opt('path_style', False), - https_enable=s3_opt('https_enable', True)) + key=s3_opt(u'key'), + keyid=s3_opt(u'keyid'), + service_url=s3_opt(u'service_url'), + verify_ssl=s3_opt(u'verify_ssl', True), + location=s3_opt(u'location'), + path_style=s3_opt(u'path_style', False), + https_enable=s3_opt(u'https_enable', True)) return dest except Exception as exc: raise MinionError( - 'Could not fetch from {0}. Exception: {1}'.format(url, exc) + u'Could not fetch from {0}. Exception: {1}'.format(url, exc) ) - if url_data.scheme == 'ftp': + if url_data.scheme == u'ftp': try: ftp = ftplib.FTP() ftp.connect(url_data.hostname, url_data.port) ftp.login(url_data.username, url_data.password) - with salt.utils.files.fopen(dest, 'wb') as fp_: - ftp.retrbinary('RETR {0}'.format(url_data.path), fp_.write) + with salt.utils.files.fopen(dest, u'wb') as fp_: + ftp.retrbinary(u'RETR {0}'.format(url_data.path), fp_.write) ftp.quit() return dest except Exception as exc: - raise MinionError('Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc)) + raise MinionError(u'Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc)) - if url_data.scheme == 'swift': + if url_data.scheme == u'swift': try: def swift_opt(key, default): - '''Get value of from Minion config or from Pillar''' + ''' + Get value of from Minion config or from Pillar + ''' if key in self.opts: return self.opts[key] try: - return self.opts['pillar'][key] + return self.opts[u'pillar'][key] except (KeyError, TypeError): return default - swift_conn = SaltSwift(swift_opt('keystone.user', None), - swift_opt('keystone.tenant', None), - swift_opt('keystone.auth_url', None), - swift_opt('keystone.password', None)) + swift_conn = SaltSwift(swift_opt(u'keystone.user', None), + swift_opt(u'keystone.tenant', None), + swift_opt(u'keystone.auth_url', None), + swift_opt(u'keystone.password', None)) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) return dest except Exception: - raise MinionError('Could not fetch from {0}'.format(url)) + raise MinionError(u'Could not fetch from {0}'.format(url)) get_kwargs = {} if url_data.username is not None \ - and url_data.scheme in ('http', 'https'): + and url_data.scheme in (u'http', u'https'): netloc = url_data.netloc - at_sign_pos = netloc.rfind('@') + at_sign_pos = netloc.rfind(u'@') if at_sign_pos != -1: netloc = netloc[at_sign_pos + 1:] fixed_url = urlunparse( (url_data.scheme, netloc, url_data.path, url_data.params, url_data.query, url_data.fragment)) - get_kwargs['auth'] = (url_data.username, url_data.password) + get_kwargs[u'auth'] = (url_data.username, url_data.password) else: fixed_url = url @@ -634,19 +640,26 @@ class Client(object): def on_header(hdr): if write_body[1] is not False and write_body[2] is None: + if not hdr.strip() and 'Content-Type' not in write_body[1]: + # We've reached the end of the headers and not yet + # found the Content-Type. Reset the values we're + # tracking so that we properly follow the redirect. + write_body[0] = None + write_body[1] = False + return # Try to find out what content type encoding is used if # this is a text file write_body[1].parse_line(hdr) # pylint: disable=no-member - if 'Content-Type' in write_body[1]: - content_type = write_body[1].get('Content-Type') # pylint: disable=no-member - if not content_type.startswith('text'): + if u'Content-Type' in write_body[1]: + content_type = write_body[1].get(u'Content-Type') # pylint: disable=no-member + if not content_type.startswith(u'text'): write_body[1] = write_body[2] = False else: - encoding = 'utf-8' - fields = content_type.split(';') + encoding = u'utf-8' + fields = content_type.split(u';') for field in fields: - if 'encoding' in field: - encoding = field.split('encoding=')[-1] + if u'encoding' in field: + encoding = field.split(u'encoding=')[-1] write_body[2] = encoding # We have found our encoding. Stop processing headers. write_body[1] = False @@ -677,10 +690,10 @@ class Client(object): chunk = chunk.decode(write_body[2]) result.append(chunk) else: - dest_tmp = "{0}.part".format(dest) + dest_tmp = u"{0}.part".format(dest) # We need an open filehandle to use in the on_chunk callback, # that's why we're not using a with clause here. - destfp = salt.utils.files.fopen(dest_tmp, 'wb') # pylint: disable=resource-leakage + destfp = salt.utils.files.fopen(dest_tmp, u'wb') # pylint: disable=resource-leakage def on_chunk(chunk): if write_body[0]: @@ -696,24 +709,24 @@ class Client(object): opts=self.opts, **get_kwargs ) - if 'handle' not in query: - raise MinionError('Error: {0} reading {1}'.format(query['error'], url)) + if u'handle' not in query: + raise MinionError(u'Error: {0} reading {1}'.format(query[u'error'], url)) if no_cache: if write_body[2]: - return six.u('').join(result) - return six.b('').join(result) + return u''.join(result) + return six.b(u'').join(result) else: destfp.close() destfp = None salt.utils.files.rename(dest_tmp, dest) return dest except HTTPError as exc: - raise MinionError('HTTP error {0} reading {1}: {3}'.format( + raise MinionError(u'HTTP error {0} reading {1}: {3}'.format( exc.code, url, *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code])) except URLError as exc: - raise MinionError('Error reading {0}: {1}'.format(url, exc.reason)) + raise MinionError(u'Error reading {0}: {1}'.format(url, exc.reason)) finally: if destfp is not None: destfp.close() @@ -722,45 +735,43 @@ class Client(object): self, url, dest, - template='jinja', + template=u'jinja', makedirs=False, - saltenv='base', + saltenv=u'base', cachedir=None, **kwargs): ''' Cache a file then process it as a template ''' - if 'env' in kwargs: - salt.utils.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' + if u'env' in kwargs: + salt.utils.versions.warn_until( + u'Oxygen', + u'Parameter \'env\' has been detected in the argument list. This ' + u'parameter is no longer used and has been replaced by \'saltenv\' ' + u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) - kwargs.pop('env') + kwargs.pop(u'env') - kwargs['saltenv'] = saltenv + kwargs[u'saltenv'] = saltenv url_data = urlparse(url) sfn = self.cache_file(url, saltenv, cachedir=cachedir) if not os.path.exists(sfn): - return '' + return u'' if template in salt.utils.templates.TEMPLATE_REGISTRY: data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, **kwargs ) else: - log.error('Attempted to render template with unavailable engine ' - '{0}'.format(template)) - return '' - if not data['result']: - # Failed to render the template log.error( - 'Failed to render template with error: {0}'.format( - data['data'] - ) + u'Attempted to render template with unavailable engine %s', + template ) - return '' + return u'' + if not data[u'result']: + # Failed to render the template + log.error(u'Failed to render template with error: %s', data[u'data']) + return u'' if not dest: # No destination passed, set the dest as an extrn_files cache dest = self._extrn_path(url, saltenv, cachedir=cachedir) @@ -772,9 +783,9 @@ class Client(object): if makedirs: os.makedirs(destdir) else: - salt.utils.files.safe_rm(data['data']) - return '' - shutil.move(data['data'], dest) + salt.utils.files.safe_rm(data[u'data']) + return u'' + shutil.move(data[u'data'], dest) return dest def _extrn_path(self, url, saltenv, cachedir=None): @@ -782,27 +793,27 @@ class Client(object): Return the extn_filepath for a given url ''' url_data = urlparse(url) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): netloc = salt.utils.sanitize_win_path_string(url_data.netloc) else: netloc = url_data.netloc # Strip user:pass from URLs - netloc = netloc.split('@')[-1] + netloc = netloc.split(u'@')[-1] if cachedir is None: - cachedir = self.opts['cachedir'] + cachedir = self.opts[u'cachedir'] elif not os.path.isabs(cachedir): - cachedir = os.path.join(self.opts['cachedir'], cachedir) + cachedir = os.path.join(self.opts[u'cachedir'], cachedir) if url_data.query: - file_name = '-'.join([url_data.path, url_data.query]) + file_name = u'-'.join([url_data.path, url_data.query]) else: file_name = url_data.path - return salt.utils.path_join( + return salt.utils.path.join( cachedir, - 'extrn_files', + u'extrn_files', saltenv, netloc, file_name @@ -816,31 +827,31 @@ class LocalClient(Client): def __init__(self, opts): Client.__init__(self, opts) - def _find_file(self, path, saltenv='base'): + def _find_file(self, path, saltenv=u'base'): ''' Locate the file path ''' - fnd = {'path': '', - 'rel': ''} + fnd = {u'path': u'', + u'rel': u''} - if saltenv not in self.opts['file_roots']: + if saltenv not in self.opts[u'file_roots']: return fnd if salt.utils.url.is_escaped(path): # The path arguments are escaped path = salt.utils.url.unescape(path) - for root in self.opts['file_roots'][saltenv]: + for root in self.opts[u'file_roots'][saltenv]: full = os.path.join(root, path) if os.path.isfile(full): - fnd['path'] = full - fnd['rel'] = path + fnd[u'path'] = full + fnd[u'rel'] = path return fnd return fnd def get_file(self, path, - dest='', + dest=u'', makedirs=False, - saltenv='base', + saltenv=u'base', gzip=None, cachedir=None): ''' @@ -849,22 +860,22 @@ class LocalClient(Client): ''' path = self._check_proto(path) fnd = self._find_file(path, saltenv) - fnd_path = fnd.get('path') + fnd_path = fnd.get(u'path') if not fnd_path: - return '' + return u'' return fnd_path - def file_list(self, saltenv='base', prefix=''): + def file_list(self, saltenv=u'base', prefix=u''): ''' Return a list of files in the given environment with optional relative prefix path to limit directory traversal ''' ret = [] - if saltenv not in self.opts['file_roots']: + if saltenv not in self.opts[u'file_roots']: return ret - prefix = prefix.strip('/') - for path in self.opts['file_roots'][saltenv]: + prefix = prefix.strip(u'/') + for path in self.opts[u'file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): @@ -875,16 +886,16 @@ class LocalClient(Client): ret.append(sdecode(relpath)) return ret - def file_list_emptydirs(self, saltenv='base', prefix=''): + def file_list_emptydirs(self, saltenv=u'base', prefix=u''): ''' List the empty dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] - prefix = prefix.strip('/') - if saltenv not in self.opts['file_roots']: + prefix = prefix.strip(u'/') + if saltenv not in self.opts[u'file_roots']: return ret - for path in self.opts['file_roots'][saltenv]: + for path in self.opts[u'file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): @@ -894,23 +905,23 @@ class LocalClient(Client): ret.append(sdecode(os.path.relpath(root, path))) return ret - def dir_list(self, saltenv='base', prefix=''): + def dir_list(self, saltenv=u'base', prefix=u''): ''' List the dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] - if saltenv not in self.opts['file_roots']: + if saltenv not in self.opts[u'file_roots']: return ret - prefix = prefix.strip('/') - for path in self.opts['file_roots'][saltenv]: + prefix = prefix.strip(u'/') + for path in self.opts[u'file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): ret.append(sdecode(os.path.relpath(root, path))) return ret - def __get_file_path(self, path, saltenv='base'): + def __get_file_path(self, path, saltenv=u'base'): ''' Return either a file path or the result of a remote find_file call. ''' @@ -919,14 +930,16 @@ class LocalClient(Client): except MinionError as err: # Local file path if not os.path.isfile(path): - msg = 'specified file {0} is not present to generate hash: {1}' - log.warning(msg.format(path, err)) + log.warning( + u'specified file %s is not present to generate hash: %s', + path, err + ) return None else: return path return self._find_file(path, saltenv) - def hash_file(self, path, saltenv='base'): + def hash_file(self, path, saltenv=u'base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt:// otherwise, prepend the @@ -939,17 +952,17 @@ class LocalClient(Client): try: # Remote file path (self._find_file() invoked) - fnd_path = fnd['path'] + fnd_path = fnd[u'path'] except TypeError: # Local file path fnd_path = fnd - hash_type = self.opts.get('hash_type', 'md5') - ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) - ret['hash_type'] = hash_type + hash_type = self.opts.get(u'hash_type', u'md5') + ret[u'hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) + ret[u'hash_type'] = hash_type return ret - def hash_and_stat_file(self, path, saltenv='base'): + def hash_and_stat_file(self, path, saltenv=u'base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt:// otherwise, prepend the @@ -965,8 +978,8 @@ class LocalClient(Client): try: # Remote file path (self._find_file() invoked) - fnd_path = fnd['path'] - fnd_stat = fnd.get('stat') + fnd_path = fnd[u'path'] + fnd_stat = fnd.get(u'stat') except TypeError: # Local file path fnd_path = fnd @@ -975,12 +988,12 @@ class LocalClient(Client): except Exception: fnd_stat = None - hash_type = self.opts.get('hash_type', 'md5') - ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) - ret['hash_type'] = hash_type + hash_type = self.opts.get(u'hash_type', u'md5') + ret[u'hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) + ret[u'hash_type'] = hash_type return ret, fnd_stat - def list_env(self, saltenv='base'): + def list_env(self, saltenv=u'base'): ''' Return a list of the files in the file server's specified environment ''' @@ -997,7 +1010,7 @@ class LocalClient(Client): Return the available environments ''' ret = [] - for saltenv in self.opts['file_roots']: + for saltenv in self.opts[u'file_roots']: ret.append(saltenv) return ret @@ -1024,10 +1037,10 @@ class RemoteClient(Client): def __init__(self, opts): Client.__init__(self, opts) self.channel = salt.transport.Channel.factory(self.opts) - if hasattr(self.channel, 'auth'): + if hasattr(self.channel, u'auth'): self.auth = self.channel.auth else: - self.auth = '' + self.auth = u'' def _refresh_channel(self): ''' @@ -1038,9 +1051,9 @@ class RemoteClient(Client): def get_file(self, path, - dest='', + dest=u'', makedirs=False, - saltenv='base', + saltenv=u'base', gzip=None, cachedir=None): ''' @@ -1053,7 +1066,7 @@ class RemoteClient(Client): if senv: saltenv = senv - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): hash_server, stat_server = self.hash_and_stat_file(path, saltenv) try: mode_server = stat_server[0] @@ -1065,20 +1078,21 @@ class RemoteClient(Client): # Check if file exists on server, before creating files and # directories - if hash_server == '': + if hash_server == u'': log.debug( - 'Could not find file \'%s\' in saltenv \'%s\'', + u'Could not find file \'%s\' in saltenv \'%s\'', path, saltenv ) return False # If dest is a directory, rewrite dest with filename if dest is not None \ - and (os.path.isdir(dest) or dest.endswith(('/', '\\'))): + and (os.path.isdir(dest) or dest.endswith((u'/', u'\\'))): dest = os.path.join(dest, os.path.basename(path)) log.debug( - 'In saltenv \'%s\', \'%s\' is a directory. Changing dest to ' - '\'%s\'', saltenv, os.path.dirname(dest), dest) + u'In saltenv \'%s\', \'%s\' is a directory. Changing dest to ' + u'\'%s\'', saltenv, os.path.dirname(dest), dest + ) # Hash compare local copy with master and skip download # if no difference found. @@ -1087,20 +1101,20 @@ class RemoteClient(Client): rel_path = self._check_proto(path) log.debug( - 'In saltenv \'%s\', looking at rel_path \'%s\' to resolve ' - '\'%s\'', saltenv, rel_path, path + u'In saltenv \'%s\', looking at rel_path \'%s\' to resolve ' + u'\'%s\'', saltenv, rel_path, path ) with self._cache_loc( rel_path, saltenv, cachedir=cachedir) as cache_dest: dest2check = cache_dest log.debug( - 'In saltenv \'%s\', ** considering ** path \'%s\' to resolve ' - '\'%s\'', saltenv, dest2check, path + u'In saltenv \'%s\', ** considering ** path \'%s\' to resolve ' + u'\'%s\'', saltenv, dest2check, path ) if dest2check and os.path.isfile(dest2check): - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): hash_local, stat_local = \ self.hash_and_stat_file(dest2check, saltenv) try: @@ -1115,18 +1129,18 @@ class RemoteClient(Client): return dest2check log.debug( - 'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'', + u'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'', saltenv, path ) d_tries = 0 transport_tries = 0 path = self._check_proto(path) - load = {'path': path, - 'saltenv': saltenv, - 'cmd': '_serve_file'} + load = {u'path': path, + u'saltenv': saltenv, + u'cmd': u'_serve_file'} if gzip: gzip = int(gzip) - load['gzip'] = gzip + load[u'gzip'] = gzip fn_ = None if dest: @@ -1138,15 +1152,15 @@ class RemoteClient(Client): return False # We need an open filehandle here, that's why we're not using a # with clause: - fn_ = salt.utils.files.fopen(dest, 'wb+') # pylint: disable=resource-leakage + fn_ = salt.utils.files.fopen(dest, u'wb+') # pylint: disable=resource-leakage else: - log.debug('No dest file found') + log.debug(u'No dest file found') while True: if not fn_: - load['loc'] = 0 + load[u'loc'] = 0 else: - load['loc'] = fn_.tell() + load[u'loc'] = fn_.tell() data = self.channel.send(load, raw=True) if six.PY3: # Sometimes the source is local (eg when using @@ -1156,31 +1170,31 @@ class RemoteClient(Client): # strings for the top-level keys to simplify things. data = decode_dict_keys_to_str(data) try: - if not data['data']: - if not fn_ and data['dest']: + if not data[u'data']: + if not fn_ and data[u'dest']: # This is a 0 byte file on the master with self._cache_loc( - data['dest'], + data[u'dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest - with salt.utils.files.fopen(cache_dest, 'wb+') as ofile: - ofile.write(data['data']) - if 'hsum' in data and d_tries < 3: + with salt.utils.files.fopen(cache_dest, u'wb+') as ofile: + ofile.write(data[u'data']) + if u'hsum' in data and d_tries < 3: # Master has prompted a file verification, if the # verification fails, re-download the file. Try 3 times d_tries += 1 - hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5'))) - if hsum != data['hsum']: + hsum = salt.utils.get_hash(dest, salt.utils.stringutils.to_str(data.get(u'hash_type', b'md5'))) # future lint: disable=non-unicode-string + if hsum != data[u'hsum']: log.warning( - 'Bad download of file %s, attempt %d of 3', + u'Bad download of file %s, attempt %d of 3', path, d_tries ) continue break if not fn_: with self._cache_loc( - data['dest'], + data[u'dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest @@ -1188,11 +1202,11 @@ class RemoteClient(Client): # remove it to avoid a traceback trying to write the file if os.path.isdir(dest): salt.utils.files.rm_rf(dest) - fn_ = salt.utils.files.fopen(dest, 'wb+') - if data.get('gzip', None): - data = salt.utils.gzip_util.uncompress(data['data']) + fn_ = salt.utils.files.fopen(dest, u'wb+') + if data.get(u'gzip', None): + data = salt.utils.gzip_util.uncompress(data[u'data']) else: - data = data['data'] + data = data[u'data'] if six.PY3 and isinstance(data, str): data = data.encode() fn_.write(data) @@ -1204,15 +1218,15 @@ class RemoteClient(Client): data_type = str(type(data)) transport_tries += 1 log.warning( - 'Data transport is broken, got: %s, type: %s, ' - 'exception: %s, attempt %d of 3', + u'Data transport is broken, got: %s, type: %s, ' + u'exception: %s, attempt %d of 3', data, data_type, exc, transport_tries ) self._refresh_channel() if transport_tries > 3: log.error( - 'Data transport is broken, got: %s, type: %s, ' - 'exception: %s, retry attempts exhausted', + u'Data transport is broken, got: %s, type: %s, ' + u'exception: %s, retry attempts exhausted', data, data_type, exc ) break @@ -1220,55 +1234,55 @@ class RemoteClient(Client): if fn_: fn_.close() log.info( - 'Fetching file from saltenv \'%s\', ** done ** \'%s\'', + u'Fetching file from saltenv \'%s\', ** done ** \'%s\'', saltenv, path ) else: log.debug( - 'In saltenv \'%s\', we are ** missing ** the file \'%s\'', + u'In saltenv \'%s\', we are ** missing ** the file \'%s\'', saltenv, path ) return dest - def file_list(self, saltenv='base', prefix=''): + def file_list(self, saltenv=u'base', prefix=u''): ''' List the files on the master ''' - load = {'saltenv': saltenv, - 'prefix': prefix, - 'cmd': '_file_list'} + load = {u'saltenv': saltenv, + u'prefix': prefix, + u'cmd': u'_file_list'} return [sdecode(fn_) for fn_ in self.channel.send(load)] - def file_list_emptydirs(self, saltenv='base', prefix=''): + def file_list_emptydirs(self, saltenv=u'base', prefix=u''): ''' List the empty dirs on the master ''' - load = {'saltenv': saltenv, - 'prefix': prefix, - 'cmd': '_file_list_emptydirs'} + load = {u'saltenv': saltenv, + u'prefix': prefix, + u'cmd': u'_file_list_emptydirs'} self.channel.send(load) - def dir_list(self, saltenv='base', prefix=''): + def dir_list(self, saltenv=u'base', prefix=u''): ''' List the dirs on the master ''' - load = {'saltenv': saltenv, - 'prefix': prefix, - 'cmd': '_dir_list'} + load = {u'saltenv': saltenv, + u'prefix': prefix, + u'cmd': u'_dir_list'} return self.channel.send(load) - def symlink_list(self, saltenv='base', prefix=''): + def symlink_list(self, saltenv=u'base', prefix=u''): ''' List symlinked files and dirs on the master ''' - load = {'saltenv': saltenv, - 'prefix': prefix, - 'cmd': '_symlink_list'} + load = {u'saltenv': saltenv, + u'prefix': prefix, + u'cmd': u'_symlink_list'} return self.channel.send(load) - def __hash_and_stat_file(self, path, saltenv='base'): + def __hash_and_stat_file(self, path, saltenv=u'base'): ''' Common code for hashing and stating files ''' @@ -1276,21 +1290,23 @@ class RemoteClient(Client): path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): - msg = 'specified file {0} is not present to generate hash: {1}' - log.warning(msg.format(path, err)) - return {} + log.warning( + u'specified file %s is not present to generate hash: %s', + path, err + ) + return {}, None else: ret = {} - hash_type = self.opts.get('hash_type', 'md5') - ret['hsum'] = salt.utils.get_hash(path, form=hash_type) - ret['hash_type'] = hash_type + hash_type = self.opts.get(u'hash_type', u'md5') + ret[u'hsum'] = salt.utils.get_hash(path, form=hash_type) + ret[u'hash_type'] = hash_type return ret, list(os.stat(path)) - load = {'path': path, - 'saltenv': saltenv, - 'cmd': '_file_hash_and_stat'} + load = {u'path': path, + u'saltenv': saltenv, + u'cmd': u'_file_hash_and_stat'} return self.channel.send(load) - def hash_file(self, path, saltenv='base'): + def hash_file(self, path, saltenv=u'base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt:// @@ -1298,44 +1314,44 @@ class RemoteClient(Client): ''' return self.__hash_and_stat_file(path, saltenv)[0] - def hash_and_stat_file(self, path, saltenv='base'): + def hash_and_stat_file(self, path, saltenv=u'base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' return self.__hash_and_stat_file(path, saltenv) - def list_env(self, saltenv='base'): + def list_env(self, saltenv=u'base'): ''' Return a list of the files in the file server's specified environment ''' - load = {'saltenv': saltenv, - 'cmd': '_file_list'} + load = {u'saltenv': saltenv, + u'cmd': u'_file_list'} return self.channel.send(load) def envs(self): ''' Return a list of available environments ''' - load = {'cmd': '_file_envs'} + load = {u'cmd': u'_file_envs'} return self.channel.send(load) def master_opts(self): ''' Return the master opts data ''' - load = {'cmd': '_master_opts'} + load = {u'cmd': u'_master_opts'} return self.channel.send(load) def master_tops(self): ''' Return the metadata derived from the master_tops system ''' - load = {'cmd': '_master_tops', - 'id': self.opts['id'], - 'opts': self.opts} + load = {u'cmd': u'_master_tops', + u'id': self.opts[u'id'], + u'opts': self.opts} if self.auth: - load['tok'] = self.auth.gen_token('salt') + load[u'tok'] = self.auth.gen_token(u'salt') return self.channel.send(load) diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py index 8ca59108c4..c3f046fc98 100644 --- a/salt/fileserver/__init__.py +++ b/salt/fileserver/__init__.py @@ -15,14 +15,14 @@ import time # Import salt libs import salt.loader -import salt.utils import salt.utils.files import salt.utils.locales import salt.utils.url +import salt.utils.versions from salt.utils.args import get_function_argspec as _argspec # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -553,7 +553,7 @@ class Fileserver(object): kwargs[args[0]] = args[1] if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -583,7 +583,7 @@ class Fileserver(object): 'dest': ''} if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -609,7 +609,7 @@ class Fileserver(object): Common code for hashing and stating files ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. ' 'This parameter is no longer used and has been replaced by ' @@ -656,7 +656,7 @@ class Fileserver(object): Deletes the file_lists cache files ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -738,7 +738,7 @@ class Fileserver(object): Return a list of files from the dominant environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -769,7 +769,7 @@ class Fileserver(object): List all emptydirs in the given environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -800,7 +800,7 @@ class Fileserver(object): List all directories in the given environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -831,7 +831,7 @@ class Fileserver(object): Return a list of symlinked files and dirs ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/fileserver/azurefs.py b/salt/fileserver/azurefs.py index 11b08b5a52..d1cdf1c33b 100644 --- a/salt/fileserver/azurefs.py +++ b/salt/fileserver/azurefs.py @@ -58,6 +58,7 @@ import salt.fileserver import salt.utils import salt.utils.files import salt.utils.gzip_util +import salt.utils.path from salt.utils.versions import LooseVersion try: @@ -69,7 +70,7 @@ except ImportError: HAS_AZURE = False # Import third party libs -import salt.ext.six as six +from salt.ext import six __virtualname__ = 'azurefs' @@ -276,7 +277,7 @@ def file_hash(load, fnd): relpath = fnd['rel'] path = fnd['path'] hash_cachedir = os.path.join(__opts__['cachedir'], 'azurefs', 'hashes') - hashdest = salt.utils.path_join(hash_cachedir, + hashdest = salt.utils.path.join(hash_cachedir, load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) diff --git a/salt/fileserver/hgfs.py b/salt/fileserver/hgfs.py index ffcee6a01d..bf7c82dcd8 100644 --- a/salt/fileserver/hgfs.py +++ b/salt/fileserver/hgfs.py @@ -49,7 +49,7 @@ VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import hglib @@ -63,6 +63,7 @@ import salt.utils import salt.utils.files import salt.utils.gzip_util import salt.utils.url +import salt.utils.versions import salt.fileserver from salt.utils.event import tagify @@ -569,7 +570,7 @@ def _env_is_exposed(env): blacklist. ''' if __opts__['hgfs_env_whitelist']: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' @@ -579,7 +580,7 @@ def _env_is_exposed(env): whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' @@ -735,7 +736,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -769,7 +770,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -803,7 +804,7 @@ def _file_lists(load, form): Return a dict containing the file lists for files and dirs ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -851,7 +852,7 @@ def _get_file_list(load): Get a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -896,7 +897,7 @@ def _get_dir_list(load): Get a list of all directories on the master ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/fileserver/minionfs.py b/salt/fileserver/minionfs.py index e94e5bc85c..52a8fd0616 100644 --- a/salt/fileserver/minionfs.py +++ b/salt/fileserver/minionfs.py @@ -35,9 +35,10 @@ import salt.utils import salt.utils.files import salt.utils.gzip_util import salt.utils.url +import salt.utils.versions # Import third party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -164,7 +165,7 @@ def file_hash(load, fnd): ret = {} if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -234,7 +235,7 @@ def file_list(load): Return a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -318,7 +319,7 @@ def dir_list(load): - source-minion/absolute/path ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/fileserver/roots.py b/salt/fileserver/roots.py index 11fb663d10..9aefb46e25 100644 --- a/salt/fileserver/roots.py +++ b/salt/fileserver/roots.py @@ -24,12 +24,13 @@ import logging # Import salt libs import salt.fileserver -import salt.utils +import salt.utils # Can be removed once is_bin_file and get_hash are moved import salt.utils.event import salt.utils.files import salt.utils.gzip_util import salt.utils.path -import salt.ext.six as six +import salt.utils.versions +from salt.ext import six log = logging.getLogger(__name__) @@ -39,7 +40,7 @@ def find_file(path, saltenv='base', **kwargs): Search the environment for the relative path. ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -116,7 +117,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -217,7 +218,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -297,7 +298,7 @@ def _file_lists(load, form): Return a dict containing the file lists for files, dirs, emtydirs and symlinks ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -443,7 +444,7 @@ def symlink_list(load): Return a dict of all symlinks based on a given path on the Master ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/fileserver/s3fs.py b/salt/fileserver/s3fs.py index 0ce9bcb851..7f262aa3bb 100644 --- a/salt/fileserver/s3fs.py +++ b/salt/fileserver/s3fs.py @@ -71,10 +71,11 @@ import salt.modules import salt.utils import salt.utils.files import salt.utils.gzip_util +import salt.utils.versions # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import filter from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -125,7 +126,7 @@ def find_file(path, saltenv='base', **kwargs): is missing, or if the MD5 does not match. ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -167,7 +168,7 @@ def file_hash(load, fnd): Return an MD5 file hash ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -200,7 +201,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -244,7 +245,7 @@ def file_list(load): Return a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -285,7 +286,7 @@ def dir_list(load): Return a list of all directories on the master ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/fileserver/svnfs.py b/salt/fileserver/svnfs.py index 5b9d7375b8..ac7681ec23 100644 --- a/salt/fileserver/svnfs.py +++ b/salt/fileserver/svnfs.py @@ -2,7 +2,7 @@ ''' Subversion Fileserver Backend -After enabling this backend, branches, and tags in a remote subversion +After enabling this backend, branches and tags in a remote subversion repository are exposed to salt as different environments. To enable this backend, add ``svn`` to the :conf_master:`fileserver_backend` option in the Master config file. @@ -42,7 +42,7 @@ from salt.exceptions import FileserverConfigError PER_REMOTE_OVERRIDES = ('mountpoint', 'root', 'trunk', 'branches', 'tags') # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error HAS_SVN = False try: @@ -58,6 +58,7 @@ import salt.utils import salt.utils.files import salt.utils.gzip_util import salt.utils.url +import salt.utils.versions import salt.fileserver from salt.utils.event import tagify @@ -484,7 +485,7 @@ def _env_is_exposed(env): blacklist. ''' if __opts__['svnfs_env_whitelist']: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The svnfs_env_whitelist config option has been renamed to ' 'svnfs_saltenv_whitelist. Please update your configuration.' @@ -494,7 +495,7 @@ def _env_is_exposed(env): whitelist = __opts__['svnfs_saltenv_whitelist'] if __opts__['svnfs_env_blacklist']: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The svnfs_env_blacklist config option has been renamed to ' 'svnfs_saltenv_blacklist. Please update your configuration.' @@ -630,7 +631,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -664,7 +665,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -719,10 +720,10 @@ def file_hash(load, fnd): def _file_lists(load, form): ''' - Return a dict containing the file lists for files, dirs, emtydirs and symlinks + Return a dict containing the file lists for files, dirs, emptydirs and symlinks ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/grains/chronos.py b/salt/grains/chronos.py index 24a11eec7d..e5a46e2ada 100644 --- a/salt/grains/chronos.py +++ b/salt/grains/chronos.py @@ -8,14 +8,14 @@ Generate chronos proxy minion grains. from __future__ import absolute_import -import salt.utils import salt.utils.http +import salt.utils.platform __proxyenabled__ = ['chronos'] __virtualname__ = 'chronos' def __virtual__(): - if not salt.utils.is_proxy() or 'proxy' not in __opts__: + if not salt.utils.platform.is_proxy() or 'proxy' not in __opts__: return False else: return __virtualname__ diff --git a/salt/grains/core.py b/salt/grains/core.py index c558cc5202..f635143fb6 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -46,10 +46,12 @@ import salt.utils import salt.utils.dns import salt.utils.files import salt.utils.network -import salt.ext.six as six +import salt.utils.path +import salt.utils.platform +from salt.ext import six from salt.ext.six.moves import range -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): import salt.utils.win_osinfo # Solve the Chicken and egg problem where grains need to run before any @@ -67,7 +69,7 @@ __salt__ = { log = logging.getLogger(__name__) HAS_WMI = False -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): # attempt to import the python wmi module # the Windows minion uses WMI for some of its grains try: @@ -174,7 +176,7 @@ def _linux_gpu_data(): if __opts__.get('enable_gpu_grains', True) is False: return {} - lspci = salt.utils.which('lspci') + lspci = salt.utils.path.which('lspci') if not lspci: log.debug( 'The `lspci` binary is not available on the system. GPU grains ' @@ -305,8 +307,8 @@ def _bsd_cpudata(osdata): # num_cpus # cpu_model # cpu_flags - sysctl = salt.utils.which('sysctl') - arch = salt.utils.which('arch') + sysctl = salt.utils.path.which('sysctl') + arch = salt.utils.path.which('arch') cmds = {} if sysctl: @@ -412,7 +414,7 @@ def _memdata(osdata): # Use floor division to force output to be an integer grains['mem_total'] = int(comps[1].split()[0]) // 1024 elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'): - sysctl = salt.utils.which('sysctl') + sysctl = salt.utils.path.which('sysctl') if sysctl: if osdata['kernel'] == 'Darwin': mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl)) @@ -510,8 +512,8 @@ def _virtual(osdata): _cmds = ['systemd-detect-virt', 'virt-what', 'dmidecode'] # test first for virt-what, which covers most of the desired functionality # on most platforms - if not salt.utils.is_windows() and osdata['kernel'] not in skip_cmds: - if salt.utils.which('virt-what'): + if not salt.utils.platform.is_windows() and osdata['kernel'] not in skip_cmds: + if salt.utils.path.which('virt-what'): _cmds = ['virt-what'] else: log.debug( @@ -556,7 +558,7 @@ def _virtual(osdata): else: command = 'prtdiag' - cmd = salt.utils.which(command) + cmd = salt.utils.path.which(command) if not cmd: continue @@ -571,13 +573,13 @@ def _virtual(osdata): # systemd-detect-virt always returns > 0 on non-virtualized # systems # prtdiag only works in the global zone, skip if it fails - if salt.utils.is_windows() or 'systemd-detect-virt' in cmd or 'prtdiag' in cmd: + if salt.utils.platform.is_windows() or 'systemd-detect-virt' in cmd or 'prtdiag' in cmd: continue failed_commands.add(command) continue except salt.exceptions.CommandExecutionError: if salt.log.is_logging_configured(): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): continue failed_commands.add(command) continue @@ -715,7 +717,7 @@ def _virtual(osdata): choices = ('Linux', 'HP-UX') isdir = os.path.isdir - sysctl = salt.utils.which('sysctl') + sysctl = salt.utils.path.which('sysctl') if osdata['kernel'] in choices: if os.path.isdir('/proc'): try: @@ -806,7 +808,7 @@ def _virtual(osdata): except IOError: pass elif osdata['kernel'] == 'FreeBSD': - kenv = salt.utils.which('kenv') + kenv = salt.utils.path.which('kenv') if kenv: product = __salt__['cmd.run']( '{0} smbios.system.product'.format(kenv) @@ -854,12 +856,12 @@ def _virtual(osdata): grains['virtual_subtype'] = roles else: # Check if it's a "regular" zone. (i.e. Solaris 10/11 zone) - zonename = salt.utils.which('zonename') + zonename = salt.utils.path.which('zonename') if zonename: zone = __salt__['cmd.run']('{0}'.format(zonename)) if zone != 'global': grains['virtual'] = 'zone' - if salt.utils.is_smartos_zone(): + if salt.utils.platform.is_smartos_zone(): grains.update(_smartos_zone_data()) # Check if it's a branded zone (i.e. Solaris 8/9 zone) if isdir('/.SUNWnative'): @@ -1010,28 +1012,20 @@ def _windows_platform_data(): os_release = platform.release() kernel_version = platform.version() info = salt.utils.win_osinfo.get_os_version_info() + server = {'Vista': '2008Server', + '7': '2008ServerR2', + '8': '2012Server', + '8.1': '2012ServerR2', + '10': '2016Server'} # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function # started reporting the Desktop version instead of the Server version on # Server versions of Windows, so we need to look those up - # Check for Python >=2.7.12 or >=3.5.2 - ver = pythonversion()['pythonversion'] - if ((six.PY2 and - salt.utils.compare_versions(ver, '>=', [2, 7, 12, 'final', 0])) - or - (six.PY3 and - salt.utils.compare_versions(ver, '>=', [3, 5, 2, 'final', 0]))): - # (Product Type 1 is Desktop, Everything else is Server) - if info['ProductType'] > 1: - server = {'Vista': '2008Server', - '7': '2008ServerR2', - '8': '2012Server', - '8.1': '2012ServerR2', - '10': '2016Server'} - os_release = server.get(os_release, - 'Grain not found. Update lookup table ' - 'in the `_windows_platform_data` ' - 'function in `grains\\core.py`') + # So, if you find a Server Platform that's a key in the server + # dictionary, then lookup the actual Server Release. + # (Product Type 1 is Desktop, Everything else is Server) + if info['ProductType'] > 1 and os_release in server: + os_release = server[os_release] service_pack = None if info['ServicePackMajor'] > 0: @@ -1294,7 +1288,7 @@ def os_data(): grains['kernelrelease'], grains['kernelversion'], grains['cpuarch'], _) = platform.uname() # pylint: enable=unpacking-non-sequence - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): grains['kernel'] = 'proxy' grains['kernelrelease'] = 'proxy' grains['kernelversion'] = 'proxy' @@ -1302,7 +1296,7 @@ def os_data(): grains['os'] = 'proxy' grains['os_family'] = 'proxy' grains['osfullname'] = 'proxy' - elif salt.utils.is_windows(): + elif salt.utils.platform.is_windows(): grains['os'] = 'Windows' grains['os_family'] = 'Windows' grains.update(_memdata(grains)) @@ -1330,7 +1324,7 @@ def os_data(): grains['init'] = 'Windows' return grains - elif salt.utils.is_linux(): + elif salt.utils.platform.is_linux(): # Add SELinux grain, if you have it if _linux_bin_exists('selinuxenabled'): grains['selinux'] = {} @@ -1361,7 +1355,7 @@ def os_data(): with salt.utils.files.fopen('/proc/1/cmdline') as fhr: init_cmdline = fhr.read().replace('\x00', ' ').split() try: - init_bin = salt.utils.which(init_cmdline[0]) + init_bin = salt.utils.path.which(init_cmdline[0]) except IndexError: # Emtpy init_cmdline init_bin = None @@ -1397,7 +1391,7 @@ def os_data(): 'Unable to read from init_bin ({0}): {1}' .format(init_bin, exc) ) - elif salt.utils.which('supervisord') in init_cmdline: + elif salt.utils.path.which('supervisord') in init_cmdline: grains['init'] = 'supervisord' elif init_cmdline == ['runit']: grains['init'] = 'runit' @@ -1576,7 +1570,7 @@ def os_data(): grains.update(_linux_cpudata()) grains.update(_linux_gpu_data()) elif grains['kernel'] == 'SunOS': - if salt.utils.is_smartos(): + if salt.utils.platform.is_smartos(): # See https://github.com/joyent/smartos-live/issues/224 uname_v = os.uname()[3] # format: joyent_20161101T004406Z uname_v = uname_v[uname_v.index('_')+1:] @@ -1589,7 +1583,7 @@ def os_data(): ]) # store a untouched copy of the timestamp in osrelease_stamp grains['osrelease_stamp'] = uname_v - if salt.utils.is_smartos_globalzone(): + if salt.utils.platform.is_smartos_globalzone(): grains.update(_smartos_computenode_data()) elif os.path.isfile('/etc/release'): with salt.utils.files.fopen('/etc/release', 'r') as fp_: @@ -1734,7 +1728,7 @@ def locale_info(): grains = {} grains['locale_info'] = {} - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return grains try: @@ -1764,7 +1758,7 @@ def hostname(): global __FQDN__ grains = {} - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return grains grains['localhost'] = socket.gethostname() @@ -1792,7 +1786,7 @@ def append_domain(): grain = {} - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return grain if 'append_domain' in __opts__: @@ -1804,7 +1798,7 @@ def ip_fqdn(): ''' Return ip address and FQDN grains ''' - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} ret = {} @@ -1839,7 +1833,7 @@ def ip_interfaces(): # Provides: # ip_interfaces - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} ret = {} @@ -1867,7 +1861,7 @@ def ip4_interfaces(): # Provides: # ip_interfaces - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} ret = {} @@ -1892,7 +1886,7 @@ def ip6_interfaces(): # Provides: # ip_interfaces - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} ret = {} @@ -1932,7 +1926,7 @@ def dns(): ''' # Provides: # dns - if salt.utils.is_windows() or 'proxyminion' in __opts__: + if salt.utils.platform.is_windows() or 'proxyminion' in __opts__: return {} resolv = salt.utils.dns.parse_resolv() @@ -2055,7 +2049,7 @@ def _hw_data(osdata): .. versionadded:: 0.9.5 ''' - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} grains = {} @@ -2084,8 +2078,8 @@ def _hw_data(osdata): if err.errno == EACCES or err.errno == EPERM: # Skip the grain if non-root user has no access to the file. pass - elif salt.utils.which_bin(['dmidecode', 'smbios']) is not None and not ( - salt.utils.is_smartos() or + elif salt.utils.path.which_bin(['dmidecode', 'smbios']) is not None and not ( + salt.utils.platform.is_smartos() or ( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table' osdata['kernel'] == 'SunOS' and osdata['cpuarch'].startswith('sparc') @@ -2108,7 +2102,7 @@ def _hw_data(osdata): if serial is not None: grains['serialnumber'] = serial break - elif salt.utils.which_bin(['fw_printenv']) is not None: + elif salt.utils.path.which_bin(['fw_printenv']) is not None: # ARM Linux devices expose UBOOT env variables via fw_printenv hwdata = { 'manufacturer': 'manufacturer', @@ -2122,7 +2116,7 @@ def _hw_data(osdata): elif osdata['kernel'] == 'FreeBSD': # On FreeBSD /bin/kenv (already in base system) # can be used instead of dmidecode - kenv = salt.utils.which('kenv') + kenv = salt.utils.path.which('kenv') if kenv: # In theory, it will be easier to add new fields to this later fbsd_hwdata = { @@ -2137,7 +2131,7 @@ def _hw_data(osdata): value = __salt__['cmd.run']('{0} {1}'.format(kenv, val)) grains[key] = _clean_value(key, value) elif osdata['kernel'] == 'OpenBSD': - sysctl = salt.utils.which('sysctl') + sysctl = salt.utils.path.which('sysctl') hwdata = {'biosversion': 'hw.version', 'manufacturer': 'hw.vendor', 'productname': 'hw.product', @@ -2148,7 +2142,7 @@ def _hw_data(osdata): if not value.endswith(' value is not available'): grains[key] = _clean_value(key, value) elif osdata['kernel'] == 'NetBSD': - sysctl = salt.utils.which('sysctl') + sysctl = salt.utils.path.which('sysctl') nbsd_hwdata = { 'biosversion': 'machdep.dmi.board-version', 'manufacturer': 'machdep.dmi.system-vendor', @@ -2163,7 +2157,7 @@ def _hw_data(osdata): grains[key] = _clean_value(key, result['stdout']) elif osdata['kernel'] == 'Darwin': grains['manufacturer'] = 'Apple Inc.' - sysctl = salt.utils.which('sysctl') + sysctl = salt.utils.path.which('sysctl') hwdata = {'productname': 'hw.model'} for key, oid in hwdata.items(): value = __salt__['cmd.run']('{0} -b {1}'.format(sysctl, oid)) @@ -2175,7 +2169,7 @@ def _hw_data(osdata): # commands and attempt various lookups. data = "" for (cmd, args) in (('/usr/sbin/prtdiag', '-v'), ('/usr/sbin/prtconf', '-vp'), ('/usr/sbin/virtinfo', '-a')): - if salt.utils.which(cmd): # Also verifies that cmd is executable + if salt.utils.path.which(cmd): # Also verifies that cmd is executable data += __salt__['cmd.run']('{0} {1}'.format(cmd, args)) data += '\n' @@ -2301,7 +2295,7 @@ def _smartos_computenode_data(): # vm_capable # vm_hw_virt - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} grains = {} @@ -2339,7 +2333,7 @@ def _smartos_zone_data(): # hypervisor_uuid # datacenter - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} grains = {} @@ -2383,11 +2377,15 @@ def _zpool_data(grains): Provide grains about zpools ''' # quickly return if windows or proxy - if salt.utils.is_windows() or 'proxyminion' in __opts__: + if salt.utils.platform.is_windows() or 'proxyminion' in __opts__: + return {} + + # quickly return if NetBSD (ZFS still under development) + if salt.utils.platform.is_netbsd(): return {} # quickly return if no zpool and zfs command - if not salt.utils.which('zpool'): + if not salt.utils.path.which('zpool'): return {} # collect zpool data @@ -2411,7 +2409,7 @@ def get_server_id(): # Provides: # server_id - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return {} return {'server_id': abs(hash(__opts__.get('id', '')) % (2 ** 31))} diff --git a/salt/grains/disks.py b/salt/grains/disks.py index 64c5174393..bd2069226b 100644 --- a/salt/grains/disks.py +++ b/salt/grains/disks.py @@ -10,15 +10,17 @@ import logging import re # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.platform # Solve the Chicken and egg problem where grains need to run before any # of the modules are loaded and are generally available for any usage. import salt.modules.cmdmod __salt__ = { - 'cmd.run': salt.modules.cmdmod._run_quiet + 'cmd.run': salt.modules.cmdmod._run_quiet, + 'cmd.run_all': salt.modules.cmdmod._run_all_quiet } log = logging.getLogger(__name__) @@ -28,10 +30,12 @@ def disks(): ''' Return list of disk devices ''' - if salt.utils.is_freebsd(): + if salt.utils.platform.is_freebsd(): return _freebsd_geom() - elif salt.utils.is_linux(): + elif salt.utils.platform.is_linux(): return _linux_disks() + elif salt.utils.platform.is_windows(): + return _windows_disks() else: log.trace('Disk grain does not support OS') @@ -88,7 +92,7 @@ _geom_attribs = [_geomconsts.__dict__[key] for key in def _freebsd_geom(): - geom = salt.utils.which('geom') + geom = salt.utils.path.which('geom') ret = {'disks': {}, 'SSDs': []} devices = __salt__['cmd.run']('{0} disk list'.format(geom)) @@ -141,3 +145,39 @@ def _linux_disks(): log.trace('Unable to identify device {0} as an SSD or HDD.' ' It does not report 0 or 1'.format(device)) return ret + + +def _windows_disks(): + wmic = salt.utils.which('wmic') + + namespace = r'\\root\microsoft\windows\storage' + path = 'MSFT_PhysicalDisk' + where = '(MediaType=3 or MediaType=4)' + get = 'DeviceID,MediaType' + + ret = {'disks': [], 'SSDs': []} + + cmdret = __salt__['cmd.run_all']( + '{0} /namespace:{1} path {2} where {3} get {4} /format:table'.format( + wmic, namespace, path, where, get)) + + if cmdret['retcode'] != 0: + log.trace('Disk grain does not support this version of Windows') + else: + for line in cmdret['stdout'].splitlines(): + info = line.split() + if len(info) != 2 or not info[0].isdigit() or not info[1].isdigit(): + continue + device = r'\\.\PhysicalDrive{0}'.format(info[0]) + mediatype = info[1] + if mediatype == '3': + log.trace('Device {0} reports itself as an HDD'.format(device)) + ret['disks'].append(device) + elif mediatype == '4': + log.trace('Device {0} reports itself as an SSD'.format(device)) + ret['SSDs'].append(device) + else: + log.trace('Unable to identify device {0} as an SSD or HDD.' + 'It does not report 3 or 4'.format(device)) + + return ret diff --git a/salt/grains/esxi.py b/salt/grains/esxi.py index f4cf1b79cd..c548b24a55 100644 --- a/salt/grains/esxi.py +++ b/salt/grains/esxi.py @@ -12,7 +12,7 @@ import logging # Import Salt Libs from salt.exceptions import SaltSystemExit -import salt.utils +import salt.utils.platform import salt.modules.vsphere __proxyenabled__ = ['esxi'] @@ -26,7 +26,7 @@ GRAINS_CACHE = {} def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'esxi': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'esxi': return __virtualname__ except KeyError: pass diff --git a/salt/grains/fx2.py b/salt/grains/fx2.py index fa4069775e..b4c723346d 100644 --- a/salt/grains/fx2.py +++ b/salt/grains/fx2.py @@ -7,11 +7,11 @@ in proxy/fx2.py--just enough to get data from the chassis to include in grains. ''' from __future__ import absolute_import -import salt.utils import logging import salt.proxy.fx2 import salt.modules.cmdmod import salt.modules.dracr +import salt.utils.platform __proxyenabled__ = ['fx2'] @@ -25,7 +25,7 @@ GRAINS_CACHE = {} def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'fx2': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'fx2': return __virtualname__ except KeyError: pass diff --git a/salt/grains/junos.py b/salt/grains/junos.py index 34e33f4cbc..c2fd0ba1b0 100644 --- a/salt/grains/junos.py +++ b/salt/grains/junos.py @@ -11,7 +11,7 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.ext.six as six +from salt.ext import six __proxyenabled__ = ['junos'] __virtualname__ = 'junos' diff --git a/salt/grains/marathon.py b/salt/grains/marathon.py index 330e1fe486..82c3f75716 100644 --- a/salt/grains/marathon.py +++ b/salt/grains/marathon.py @@ -7,14 +7,14 @@ Generate marathon proxy minion grains. ''' from __future__ import absolute_import -import salt.utils import salt.utils.http +import salt.utils.platform __proxyenabled__ = ['marathon'] __virtualname__ = 'marathon' def __virtual__(): - if not salt.utils.is_proxy() or 'proxy' not in __opts__: + if not salt.utils.platform.is_proxy() or 'proxy' not in __opts__: return False else: return __virtualname__ diff --git a/salt/grains/mdata.py b/salt/grains/mdata.py index 59093db333..9e528e8cfe 100644 --- a/salt/grains/mdata.py +++ b/salt/grains/mdata.py @@ -18,8 +18,9 @@ import json import logging # Import salt libs -import salt.utils import salt.utils.dictupdate +import salt.utils.path +import salt.utils.platform # Solve the Chicken and egg problem where grains need to run before any # of the modules are loaded and are generally available for any usage. @@ -39,10 +40,10 @@ def __virtual__(): Figure out if we need to be loaded ''' ## collect mdata grains in a SmartOS zone - if salt.utils.is_smartos_zone(): + if salt.utils.platform.is_smartos_zone(): return __virtualname__ ## collect mdata grains in a LX zone - if salt.utils.is_linux() and 'BrandZ virtual linux' in os.uname(): + if salt.utils.platform.is_linux() and 'BrandZ virtual linux' in os.uname(): return __virtualname__ return False @@ -54,10 +55,10 @@ def _user_mdata(mdata_list=None, mdata_get=None): grains = {} if not mdata_list: - mdata_list = salt.utils.which('mdata-list') + mdata_list = salt.utils.path.which('mdata-list') if not mdata_get: - mdata_get = salt.utils.which('mdata-get') + mdata_get = salt.utils.path.which('mdata-get') if not mdata_list or not mdata_get: return grains @@ -97,10 +98,10 @@ def _sdc_mdata(mdata_list=None, mdata_get=None): ] if not mdata_list: - mdata_list = salt.utils.which('mdata-list') + mdata_list = salt.utils.path.which('mdata-list') if not mdata_get: - mdata_get = salt.utils.which('mdata-get') + mdata_get = salt.utils.path.which('mdata-get') if not mdata_list or not mdata_get: return grains @@ -154,8 +155,8 @@ def mdata(): Provide grains from the SmartOS metadata ''' grains = {} - mdata_list = salt.utils.which('mdata-list') - mdata_get = salt.utils.which('mdata-get') + mdata_list = salt.utils.path.which('mdata-list') + mdata_get = salt.utils.path.which('mdata-get') grains = salt.utils.dictupdate.update(grains, _user_mdata(mdata_list, mdata_get), merge_lists=True) grains = salt.utils.dictupdate.update(grains, _sdc_mdata(mdata_list, mdata_get), merge_lists=True) diff --git a/salt/grains/minion_process.py b/salt/grains/minion_process.py index 442e6758cb..e1aa762b4d 100644 --- a/salt/grains/minion_process.py +++ b/salt/grains/minion_process.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils +import salt.utils.platform try: import pwd @@ -23,14 +23,18 @@ except ImportError: def _uid(): - '''Grain for the minion User ID''' - if salt.utils.is_windows(): + ''' + Grain for the minion User ID + ''' + if salt.utils.platform.is_windows(): return None return os.getuid() def _username(): - '''Grain for the minion username''' + ''' + Grain for the minion username + ''' if pwd: username = pwd.getpwuid(os.getuid()).pw_name else: @@ -40,14 +44,18 @@ def _username(): def _gid(): - '''Grain for the minion Group ID''' - if salt.utils.is_windows(): + ''' + Grain for the minion Group ID + ''' + if salt.utils.platform.is_windows(): return None return os.getgid() def _groupname(): - '''Grain for the minion groupname''' + ''' + Grain for the minion groupname + ''' if grp: groupname = grp.getgrgid(os.getgid()).gr_name else: @@ -67,7 +75,7 @@ def grains(): 'pid': _pid(), } - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): ret['gid'] = _gid() ret['uid'] = _uid() diff --git a/salt/grains/nxos.py b/salt/grains/nxos.py index 13fbeb8930..84df657072 100644 --- a/salt/grains/nxos.py +++ b/salt/grains/nxos.py @@ -11,7 +11,7 @@ for :mod:`salt.proxy.nxos`. from __future__ import absolute_import # Import Salt Libs -import salt.utils +import salt.utils.platform import salt.modules.nxos import logging @@ -23,7 +23,7 @@ __virtualname__ = 'nxos' def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'nxos': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'nxos': return __virtualname__ except KeyError: pass diff --git a/salt/grains/panos.py b/salt/grains/panos.py new file mode 100644 index 0000000000..d39de5f94b --- /dev/null +++ b/salt/grains/panos.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +''' +Generate baseline proxy minion grains for panos hosts. + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +# Import Salt Libs +import salt.utils.platform +import salt.proxy.panos + +__proxyenabled__ = ['panos'] +__virtualname__ = 'panos' + +log = logging.getLogger(__file__) + +GRAINS_CACHE = {'os_family': 'panos'} + + +def __virtual__(): + try: + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'panos': + return __virtualname__ + except KeyError: + pass + + return False + + +def panos(proxy=None): + if not proxy: + return {} + if proxy['panos.initialized']() is False: + return {} + return {'panos': proxy['panos.grains']()} diff --git a/salt/grains/rest_sample.py b/salt/grains/rest_sample.py index 98a2c0e53f..490256fb56 100644 --- a/salt/grains/rest_sample.py +++ b/salt/grains/rest_sample.py @@ -3,7 +3,7 @@ Generate baseline proxy minion grains ''' from __future__ import absolute_import -import salt.utils +import salt.utils.platform __proxyenabled__ = ['rest_sample'] @@ -12,7 +12,7 @@ __virtualname__ = 'rest_sample' def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except KeyError: pass diff --git a/salt/grains/ssh_sample.py b/salt/grains/ssh_sample.py index 7c1cbe90a6..be12165716 100644 --- a/salt/grains/ssh_sample.py +++ b/salt/grains/ssh_sample.py @@ -3,7 +3,7 @@ Generate baseline proxy minion grains ''' from __future__ import absolute_import -import salt.utils +import salt.utils.platform __proxyenabled__ = ['ssh_sample'] @@ -12,7 +12,7 @@ __virtualname__ = 'ssh_sample' def __virtual__(): try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'ssh_sample': + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'ssh_sample': return __virtualname__ except KeyError: pass diff --git a/salt/key.py b/salt/key.py index d463fcc13e..9fe9fb3884 100644 --- a/salt/key.py +++ b/salt/key.py @@ -26,10 +26,11 @@ import salt.utils import salt.utils.args import salt.utils.event import salt.utils.files -import salt.utils.kinds +import salt.utils.sdb +import salt.utils.kinds as kinds # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import input # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -43,7 +44,7 @@ log = logging.getLogger(__name__) def get_key(opts): - if opts['transport'] in ('zeromq', 'tcp'): + if opts[u'transport'] in (u'zeromq', u'tcp'): return Key(opts) else: return RaetKey(opts) @@ -53,99 +54,99 @@ class KeyCLI(object): ''' Manage key CLI operations ''' - CLI_KEY_MAP = {'list': 'list_status', - 'delete': 'delete_key', - 'gen_signature': 'gen_keys_signature', - 'print': 'key_str', + CLI_KEY_MAP = {u'list': u'list_status', + u'delete': u'delete_key', + u'gen_signature': u'gen_keys_signature', + u'print': u'key_str', } def __init__(self, opts): self.opts = opts self.client = salt.wheel.WheelClient(opts) - if self.opts['transport'] in ('zeromq', 'tcp'): + if self.opts[u'transport'] in (u'zeromq', u'tcp'): self.key = Key else: self.key = RaetKey # instantiate the key object for masterless mode - if not opts.get('eauth'): + if not opts.get(u'eauth'): self.key = self.key(opts) self.auth = None def _update_opts(self): # get the key command - for cmd in ('gen_keys', - 'gen_signature', - 'list', - 'list_all', - 'print', - 'print_all', - 'accept', - 'accept_all', - 'reject', - 'reject_all', - 'delete', - 'delete_all', - 'finger', - 'finger_all', - 'list_all'): # last is default + for cmd in (u'gen_keys', + u'gen_signature', + u'list', + u'list_all', + u'print', + u'print_all', + u'accept', + u'accept_all', + u'reject', + u'reject_all', + u'delete', + u'delete_all', + u'finger', + u'finger_all', + u'list_all'): # last is default if self.opts[cmd]: break # set match if needed - if not cmd.startswith('gen_'): - if cmd == 'list_all': - self.opts['match'] = 'all' - elif cmd.endswith('_all'): - self.opts['match'] = '*' + if not cmd.startswith(u'gen_'): + if cmd == u'list_all': + self.opts[u'match'] = u'all' + elif cmd.endswith(u'_all'): + self.opts[u'match'] = u'*' else: - self.opts['match'] = self.opts[cmd] - if cmd.startswith('accept'): - self.opts['include_rejected'] = self.opts['include_all'] or self.opts['include_rejected'] - self.opts['include_accepted'] = False - elif cmd.startswith('reject'): - self.opts['include_accepted'] = self.opts['include_all'] or self.opts['include_accepted'] - self.opts['include_rejected'] = False - elif cmd == 'gen_keys': - self.opts['keydir'] = self.opts['gen_keys_dir'] - self.opts['keyname'] = self.opts['gen_keys'] + self.opts[u'match'] = self.opts[cmd] + if cmd.startswith(u'accept'): + self.opts[u'include_rejected'] = self.opts[u'include_all'] or self.opts[u'include_rejected'] + self.opts[u'include_accepted'] = False + elif cmd.startswith(u'reject'): + self.opts[u'include_accepted'] = self.opts[u'include_all'] or self.opts[u'include_accepted'] + self.opts[u'include_rejected'] = False + elif cmd == u'gen_keys': + self.opts[u'keydir'] = self.opts[u'gen_keys_dir'] + self.opts[u'keyname'] = self.opts[u'gen_keys'] # match is set to opts, now we can forget about *_all commands - self.opts['fun'] = cmd.replace('_all', '') + self.opts[u'fun'] = cmd.replace(u'_all', u'') def _init_auth(self): if self.auth: return low = {} - skip_perm_errors = self.opts['eauth'] != '' + skip_perm_errors = self.opts[u'eauth'] != u'' - if self.opts['eauth']: - if 'token' in self.opts: + if self.opts[u'eauth']: + if u'token' in self.opts: try: - with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_: - low['key'] = fp_.readline() + with salt.utils.files.fopen(os.path.join(self.opts[u'cachedir'], u'.root_key'), u'r') as fp_: + low[u'key'] = fp_.readline() except IOError: - low['token'] = self.opts['token'] + low[u'token'] = self.opts[u'token'] # # If using eauth and a token hasn't already been loaded into # low, prompt the user to enter auth credentials - if 'token' not in low and 'key' not in low and self.opts['eauth']: + if u'token' not in low and u'key' not in low and self.opts[u'eauth']: # This is expensive. Don't do it unless we need to. resolver = salt.auth.Resolver(self.opts) - res = resolver.cli(self.opts['eauth']) - if self.opts['mktoken'] and res: + res = resolver.cli(self.opts[u'eauth']) + if self.opts[u'mktoken'] and res: tok = resolver.token_cli( - self.opts['eauth'], + self.opts[u'eauth'], res ) if tok: - low['token'] = tok.get('token', '') + low[u'token'] = tok.get(u'token', u'') if not res: - log.error('Authentication failed') + log.error(u'Authentication failed') return {} low.update(res) - low['eauth'] = self.opts['eauth'] + low[u'eauth'] = self.opts[u'eauth'] else: - low['user'] = salt.utils.get_specific_user() - low['key'] = salt.utils.get_master_key(low['user'], self.opts, skip_perm_errors) + low[u'user'] = salt.utils.get_specific_user() + low[u'key'] = salt.utils.get_master_key(low[u'user'], self.opts, skip_perm_errors) self.auth = low @@ -164,24 +165,24 @@ class KeyCLI(object): return args, kwargs def _run_cmd(self, cmd, args=None): - if not self.opts.get('eauth'): + if not self.opts.get(u'eauth'): cmd = self.CLI_KEY_MAP.get(cmd, cmd) fun = getattr(self.key, cmd) args, kwargs = self._get_args_kwargs(fun, args) ret = fun(*args, **kwargs) - if (isinstance(ret, dict) and 'local' in ret and - cmd not in ('finger', 'finger_all')): - ret.pop('local', None) + if (isinstance(ret, dict) and u'local' in ret and + cmd not in (u'finger', u'finger_all')): + ret.pop(u'local', None) return ret - fstr = 'key.{0}'.format(cmd) + fstr = u'key.{0}'.format(cmd) fun = self.client.functions[fstr] args, kwargs = self._get_args_kwargs(fun, args) low = { - 'fun': fstr, - 'arg': args, - 'kwarg': kwargs, + u'fun': fstr, + u'arg': args, + u'kwarg': kwargs, } self._init_auth() @@ -190,41 +191,41 @@ class KeyCLI(object): # Execute the key request! ret = self.client.cmd_sync(low) - ret = ret['data']['return'] - if (isinstance(ret, dict) and 'local' in ret and - cmd not in ('finger', 'finger_all')): - ret.pop('local', None) + ret = ret[u'data'][u'return'] + if (isinstance(ret, dict) and u'local' in ret and + cmd not in (u'finger', u'finger_all')): + ret.pop(u'local', None) return ret def _filter_ret(self, cmd, ret): - if cmd.startswith('delete'): + if cmd.startswith(u'delete'): return ret keys = {} if self.key.PEND in ret: keys[self.key.PEND] = ret[self.key.PEND] - if self.opts['include_accepted'] and bool(ret.get(self.key.ACC)): + if self.opts[u'include_accepted'] and bool(ret.get(self.key.ACC)): keys[self.key.ACC] = ret[self.key.ACC] - if self.opts['include_rejected'] and bool(ret.get(self.key.REJ)): + if self.opts[u'include_rejected'] and bool(ret.get(self.key.REJ)): keys[self.key.REJ] = ret[self.key.REJ] - if self.opts['include_denied'] and bool(ret.get(self.key.DEN)): + if self.opts[u'include_denied'] and bool(ret.get(self.key.DEN)): keys[self.key.DEN] = ret[self.key.DEN] return keys def _print_no_match(self, cmd, match): - statuses = ['unaccepted'] - if self.opts['include_accepted']: - statuses.append('accepted') - if self.opts['include_rejected']: - statuses.append('rejected') - if self.opts['include_denied']: - statuses.append('denied') + statuses = [u'unaccepted'] + if self.opts[u'include_accepted']: + statuses.append(u'accepted') + if self.opts[u'include_rejected']: + statuses.append(u'rejected') + if self.opts[u'include_denied']: + statuses.append(u'denied') if len(statuses) == 1: stat_str = statuses[0] else: - stat_str = '{0} or {1}'.format(', '.join(statuses[:-1]), statuses[-1]) - msg = 'The key glob \'{0}\' does not match any {1} keys.'.format(match, stat_str) + stat_str = u'{0} or {1}'.format(u', '.join(statuses[:-1]), statuses[-1]) + msg = u'The key glob \'{0}\' does not match any {1} keys.'.format(match, stat_str) print(msg) def run(self): @@ -232,57 +233,57 @@ class KeyCLI(object): Run the logic for saltkey ''' self._update_opts() - cmd = self.opts['fun'] + cmd = self.opts[u'fun'] veri = None ret = None try: - if cmd in ('accept', 'reject', 'delete'): - ret = self._run_cmd('name_match') + if cmd in (u'accept', u'reject', u'delete'): + ret = self._run_cmd(u'name_match') if not isinstance(ret, dict): - salt.output.display_output(ret, 'key', opts=self.opts) + salt.output.display_output(ret, u'key', opts=self.opts) return ret ret = self._filter_ret(cmd, ret) if not ret: - self._print_no_match(cmd, self.opts['match']) + self._print_no_match(cmd, self.opts[u'match']) return - print('The following keys are going to be {0}ed:'.format(cmd.rstrip('e'))) - salt.output.display_output(ret, 'key', opts=self.opts) + print(u'The following keys are going to be {0}ed:'.format(cmd.rstrip(u'e'))) + salt.output.display_output(ret, u'key', opts=self.opts) - if not self.opts.get('yes', False): + if not self.opts.get(u'yes', False): try: - if cmd.startswith('delete'): - veri = input('Proceed? [N/y] ') + if cmd.startswith(u'delete'): + veri = input(u'Proceed? [N/y] ') if not veri: - veri = 'n' + veri = u'n' else: - veri = input('Proceed? [n/Y] ') + veri = input(u'Proceed? [n/Y] ') if not veri: - veri = 'y' + veri = u'y' except KeyboardInterrupt: - raise SystemExit("\nExiting on CTRL-c") + raise SystemExit(u"\nExiting on CTRL-c") # accept/reject/delete the same keys we're printed to the user - self.opts['match_dict'] = ret - self.opts.pop('match', None) + self.opts[u'match_dict'] = ret + self.opts.pop(u'match', None) list_ret = ret - if veri is None or veri.lower().startswith('y'): + if veri is None or veri.lower().startswith(u'y'): ret = self._run_cmd(cmd) - if cmd in ('accept', 'reject', 'delete'): - if cmd == 'delete': + if cmd in (u'accept', u'reject', u'delete'): + if cmd == u'delete': ret = list_ret for minions in ret.values(): for minion in minions: - print('Key for minion {0} {1}ed.'.format(minion, - cmd.rstrip('e'))) + print(u'Key for minion {0} {1}ed.'.format(minion, + cmd.rstrip(u'e'))) elif isinstance(ret, dict): - salt.output.display_output(ret, 'key', opts=self.opts) + salt.output.display_output(ret, u'key', opts=self.opts) else: - salt.output.display_output({'return': ret}, 'key', opts=self.opts) + salt.output.display_output({u'return': ret}, u'key', opts=self.opts) except salt.exceptions.SaltException as exc: - ret = '{0}'.format(exc) - if not self.opts.get('quiet', False): - salt.output.display_output(ret, 'nested', self.opts) + ret = u'{0}'.format(exc) + if not self.opts.get(u'quiet', False): + salt.output.display_output(ret, u'nested', self.opts) return ret @@ -291,17 +292,17 @@ class MultiKeyCLI(KeyCLI): Manage multiple key backends from the CLI ''' def __init__(self, opts): - opts['__multi_key'] = True + opts[u'__multi_key'] = True super(MultiKeyCLI, self).__init__(opts) # Remove the key attribute set in KeyCLI.__init__ - delattr(self, 'key') + delattr(self, u'key') zopts = copy.copy(opts) ropts = copy.copy(opts) self.keys = {} - zopts['transport'] = 'zeromq' - self.keys['ZMQ Keys'] = KeyCLI(zopts) - ropts['transport'] = 'raet' - self.keys['RAET Keys'] = KeyCLI(ropts) + zopts[u'transport'] = u'zeromq' + self.keys[u'ZMQ Keys'] = KeyCLI(zopts) + ropts[u'transport'] = u'raet' + self.keys[u'RAET Keys'] = KeyCLI(ropts) def _call_all(self, fun, *args): ''' @@ -312,97 +313,99 @@ class MultiKeyCLI(KeyCLI): getattr(self.keys[kback], fun)(*args) def list_status(self, status): - self._call_all('list_status', status) + self._call_all(u'list_status', status) def list_all(self): - self._call_all('list_all') + self._call_all(u'list_all') def accept(self, match, include_rejected=False, include_denied=False): - self._call_all('accept', match, include_rejected, include_denied) + self._call_all(u'accept', match, include_rejected, include_denied) def accept_all(self, include_rejected=False, include_denied=False): - self._call_all('accept_all', include_rejected, include_denied) + self._call_all(u'accept_all', include_rejected, include_denied) def delete(self, match): - self._call_all('delete', match) + self._call_all(u'delete', match) def delete_all(self): - self._call_all('delete_all') + self._call_all(u'delete_all') def reject(self, match, include_accepted=False, include_denied=False): - self._call_all('reject', match, include_accepted, include_denied) + self._call_all(u'reject', match, include_accepted, include_denied) def reject_all(self, include_accepted=False, include_denied=False): - self._call_all('reject_all', include_accepted, include_denied) + self._call_all(u'reject_all', include_accepted, include_denied) def print_key(self, match): - self._call_all('print_key', match) + self._call_all(u'print_key', match) def print_all(self): - self._call_all('print_all') + self._call_all(u'print_all') def finger(self, match, hash_type): - self._call_all('finger', match, hash_type) + self._call_all(u'finger', match, hash_type) def finger_all(self, hash_type): - self._call_all('finger_all', hash_type) + self._call_all(u'finger_all', hash_type) def prep_signature(self): - self._call_all('prep_signature') + self._call_all(u'prep_signature') class Key(object): ''' The object that encapsulates saltkey actions ''' - ACC = 'minions' - PEND = 'minions_pre' - REJ = 'minions_rejected' - DEN = 'minions_denied' + ACC = u'minions' + PEND = u'minions_pre' + REJ = u'minions_rejected' + DEN = u'minions_denied' def __init__(self, opts, io_loop=None): self.opts = opts - kind = self.opts.get('__role', '') # application kind - if kind not in salt.utils.kinds.APPL_KINDS: - emsg = ("Invalid application kind = '{0}'.".format(kind)) - log.error(emsg + '\n') + kind = self.opts.get(u'__role', u'') # application kind + if kind not in kinds.APPL_KINDS: + emsg = (u"Invalid application kind = '{0}'.".format(kind)) + log.error(emsg + u'\n') raise ValueError(emsg) self.event = salt.utils.event.get_event( kind, - opts['sock_dir'], - opts['transport'], + opts[u'sock_dir'], + opts[u'transport'], opts=opts, listen=False, io_loop=io_loop ) + self.passphrase = salt.utils.sdb.sdb_get(self.opts['signing_key_pass'], self.opts) + def _check_minions_directories(self): ''' Return the minion keys directory paths ''' - minions_accepted = os.path.join(self.opts['pki_dir'], self.ACC) - minions_pre = os.path.join(self.opts['pki_dir'], self.PEND) - minions_rejected = os.path.join(self.opts['pki_dir'], + minions_accepted = os.path.join(self.opts[u'pki_dir'], self.ACC) + minions_pre = os.path.join(self.opts[u'pki_dir'], self.PEND) + minions_rejected = os.path.join(self.opts[u'pki_dir'], self.REJ) - minions_denied = os.path.join(self.opts['pki_dir'], + minions_denied = os.path.join(self.opts[u'pki_dir'], self.DEN) return minions_accepted, minions_pre, minions_rejected, minions_denied def _get_key_attrs(self, keydir, keyname, keysize, user): if not keydir: - if 'gen_keys_dir' in self.opts: - keydir = self.opts['gen_keys_dir'] + if u'gen_keys_dir' in self.opts: + keydir = self.opts[u'gen_keys_dir'] else: - keydir = self.opts['pki_dir'] + keydir = self.opts[u'pki_dir'] if not keyname: - if 'gen_keys' in self.opts: - keyname = self.opts['gen_keys'] + if u'gen_keys' in self.opts: + keyname = self.opts[u'gen_keys'] else: - keyname = 'minion' + keyname = u'minion' if not keysize: - keysize = self.opts['keysize'] + keysize = self.opts[u'keysize'] return keydir, keyname, keysize, user def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None): @@ -411,8 +414,8 @@ class Key(object): ''' keydir, keyname, keysize, user = self._get_key_attrs(keydir, keyname, keysize, user) - salt.crypt.gen_keys(keydir, keyname, keysize, user) - return salt.utils.pem_finger(os.path.join(keydir, keyname + '.pub')) + salt.crypt.gen_keys(keydir, keyname, keysize, user, self.passphrase) + return salt.utils.pem_finger(os.path.join(keydir, keyname + u'.pub')) def gen_signature(self, privkey, pubkey, sig_path): ''' @@ -420,7 +423,8 @@ class Key(object): ''' return salt.crypt.gen_signature(privkey, pubkey, - sig_path) + sig_path, + self.passphrase) def gen_keys_signature(self, priv, pub, signature_path, auto_create=False, keysize=None): ''' @@ -429,51 +433,52 @@ class Key(object): # check given pub-key if pub: if not os.path.isfile(pub): - return 'Public-key {0} does not exist'.format(pub) + return u'Public-key {0} does not exist'.format(pub) # default to master.pub else: - mpub = self.opts['pki_dir'] + '/' + 'master.pub' + mpub = self.opts[u'pki_dir'] + u'/' + u'master.pub' if os.path.isfile(mpub): pub = mpub # check given priv-key if priv: if not os.path.isfile(priv): - return 'Private-key {0} does not exist'.format(priv) + return u'Private-key {0} does not exist'.format(priv) # default to master_sign.pem else: - mpriv = self.opts['pki_dir'] + '/' + 'master_sign.pem' + mpriv = self.opts[u'pki_dir'] + u'/' + u'master_sign.pem' if os.path.isfile(mpriv): priv = mpriv if not priv: if auto_create: - log.debug('Generating new signing key-pair {0}.* in {1}' - ''.format(self.opts['master_sign_key_name'], - self.opts['pki_dir'])) - salt.crypt.gen_keys(self.opts['pki_dir'], - self.opts['master_sign_key_name'], - keysize or self.opts['keysize'], - self.opts.get('user')) + log.debug( + u'Generating new signing key-pair .%s.* in %s', + self.opts[u'master_sign_key_name'], self.opts[u'pki_dir'] + ) + salt.crypt.gen_keys(self.opts[u'pki_dir'], + self.opts[u'master_sign_key_name'], + keysize or self.opts[u'keysize'], + self.opts.get(u'user'), + self.passphrase) - priv = self.opts['pki_dir'] + '/' + self.opts['master_sign_key_name'] + '.pem' + priv = self.opts[u'pki_dir'] + u'/' + self.opts[u'master_sign_key_name'] + u'.pem' else: - return 'No usable private-key found' + return u'No usable private-key found' if not pub: - return 'No usable public-key found' + return u'No usable public-key found' - log.debug('Using public-key {0}'.format(pub)) - log.debug('Using private-key {0}'.format(priv)) + log.debug(u'Using public-key %s', pub) + log.debug(u'Using private-key %s', priv) if signature_path: if not os.path.isdir(signature_path): - log.debug('target directory {0} does not exist' - ''.format(signature_path)) + log.debug(u'target directory %s does not exist', signature_path) else: - signature_path = self.opts['pki_dir'] + signature_path = self.opts[u'pki_dir'] - sign_path = signature_path + '/' + self.opts['master_pubkey_signature'] + sign_path = signature_path + u'/' + self.opts[u'master_pubkey_signature'] skey = get_key(self.opts) return skey.gen_signature(priv, pub, sign_path) @@ -491,8 +496,8 @@ class Key(object): minions = [] for key, val in six.iteritems(keys): minions.extend(val) - if not self.opts.get('preserve_minion_cache', False) or not preserve_minions: - m_cache = os.path.join(self.opts['cachedir'], self.ACC) + if not self.opts.get(u'preserve_minion_cache', False) or not preserve_minions: + m_cache = os.path.join(self.opts[u'cachedir'], self.ACC) if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: @@ -502,7 +507,7 @@ class Key(object): if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: - cache.flush('{0}/{1}'.format(self.ACC, minion)) + cache.flush(u'{0}/{1}'.format(self.ACC, minion)) def check_master(self): ''' @@ -513,8 +518,8 @@ class Key(object): ''' if not os.path.exists( os.path.join( - self.opts['sock_dir'], - 'publish_pull.ipc' + self.opts[u'sock_dir'], + u'publish_pull.ipc' ) ): return False @@ -529,8 +534,8 @@ class Key(object): else: matches = self.list_keys() ret = {} - if ',' in match and isinstance(match, str): - match = match.split(',') + if u',' in match and isinstance(match, six.string_types): + match = match.split(u',') for status, keys in six.iteritems(matches): for key in salt.utils.isorted(keys): if isinstance(match, list): @@ -564,12 +569,12 @@ class Key(object): ''' Return a dict of local keys ''' - ret = {'local': []} - for fn_ in salt.utils.isorted(os.listdir(self.opts['pki_dir'])): - if fn_.endswith('.pub') or fn_.endswith('.pem'): - path = os.path.join(self.opts['pki_dir'], fn_) + ret = {u'local': []} + for fn_ in salt.utils.isorted(os.listdir(self.opts[u'pki_dir'])): + if fn_.endswith(u'.pub') or fn_.endswith(u'.pem'): + path = os.path.join(self.opts[u'pki_dir'], fn_) if os.path.isfile(path): - ret['local'].append(fn_) + ret[u'local'].append(fn_) return ret def list_keys(self): @@ -592,7 +597,7 @@ class Key(object): ret[os.path.basename(dir_)] = [] try: for fn_ in salt.utils.isorted(os.listdir(dir_)): - if not fn_.startswith('.'): + if not fn_.startswith(u'.'): if os.path.isfile(os.path.join(dir_, fn_)): ret[os.path.basename(dir_)].append(fn_) except (OSError, IOError): @@ -614,31 +619,31 @@ class Key(object): ''' acc, pre, rej, den = self._check_minions_directories() ret = {} - if match.startswith('acc'): + if match.startswith(u'acc'): ret[os.path.basename(acc)] = [] for fn_ in salt.utils.isorted(os.listdir(acc)): - if not fn_.startswith('.'): + if not fn_.startswith(u'.'): if os.path.isfile(os.path.join(acc, fn_)): ret[os.path.basename(acc)].append(fn_) - elif match.startswith('pre') or match.startswith('un'): + elif match.startswith(u'pre') or match.startswith(u'un'): ret[os.path.basename(pre)] = [] for fn_ in salt.utils.isorted(os.listdir(pre)): - if not fn_.startswith('.'): + if not fn_.startswith(u'.'): if os.path.isfile(os.path.join(pre, fn_)): ret[os.path.basename(pre)].append(fn_) - elif match.startswith('rej'): + elif match.startswith(u'rej'): ret[os.path.basename(rej)] = [] for fn_ in salt.utils.isorted(os.listdir(rej)): - if not fn_.startswith('.'): + if not fn_.startswith(u'.'): if os.path.isfile(os.path.join(rej, fn_)): ret[os.path.basename(rej)].append(fn_) - elif match.startswith('den') and den is not None: + elif match.startswith(u'den') and den is not None: ret[os.path.basename(den)] = [] for fn_ in salt.utils.isorted(os.listdir(den)): - if not fn_.startswith('.'): + if not fn_.startswith(u'.'): if os.path.isfile(os.path.join(den, fn_)): ret[os.path.basename(den)].append(fn_) - elif match.startswith('all'): + elif match.startswith(u'all'): return self.all_keys() return ret @@ -650,8 +655,8 @@ class Key(object): for status, keys in six.iteritems(self.name_match(match)): ret[status] = {} for key in salt.utils.isorted(keys): - path = os.path.join(self.opts['pki_dir'], status, key) - with salt.utils.files.fopen(path, 'r') as fp_: + path = os.path.join(self.opts[u'pki_dir'], status, key) + with salt.utils.files.fopen(path, u'r') as fp_: ret[status][key] = fp_.read() return ret @@ -663,8 +668,8 @@ class Key(object): for status, keys in six.iteritems(self.list_keys()): ret[status] = {} for key in salt.utils.isorted(keys): - path = os.path.join(self.opts['pki_dir'], status, key) - with salt.utils.files.fopen(path, 'r') as fp_: + path = os.path.join(self.opts[u'pki_dir'], status, key) + with salt.utils.files.fopen(path, u'r') as fp_: ret[status][key] = fp_.read() return ret @@ -689,19 +694,19 @@ class Key(object): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], keydir, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.ACC, key) ) - eload = {'result': True, - 'act': 'accept', - 'id': key} + eload = {u'result': True, + u'act': u'accept', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (IOError, OSError): pass return ( @@ -718,19 +723,19 @@ class Key(object): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.PEND, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.ACC, key) ) - eload = {'result': True, - 'act': 'accept', - 'id': key} + eload = {u'result': True, + u'act': u'accept', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (IOError, OSError): pass return self.list_keys() @@ -756,33 +761,33 @@ class Key(object): for key in keys: try: if revoke_auth: - if self.opts.get('rotate_aes_key') is False: - print('Immediate auth revocation specified but AES key rotation not allowed. ' - 'Minion will not be disconnected until the master AES key is rotated.') + if self.opts.get(u'rotate_aes_key') is False: + print(u'Immediate auth revocation specified but AES key rotation not allowed. ' + u'Minion will not be disconnected until the master AES key is rotated.') else: try: client = salt.client.get_local_client(mopts=self.opts) - client.cmd_async(key, 'saltutil.revoke_auth') + client.cmd_async(key, u'saltutil.revoke_auth') except salt.exceptions.SaltClientError: - print('Cannot contact Salt master. ' - 'Connection for {0} will remain up until ' - 'master AES key is rotated or auth is revoked ' - 'with \'saltutil.revoke_auth\'.'.format(key)) - os.remove(os.path.join(self.opts['pki_dir'], status, key)) - eload = {'result': True, - 'act': 'delete', - 'id': key} + print(u'Cannot contact Salt master. ' + u'Connection for {0} will remain up until ' + u'master AES key is rotated or auth is revoked ' + u'with \'saltutil.revoke_auth\'.'.format(key)) + os.remove(os.path.join(self.opts[u'pki_dir'], status, key)) + eload = {u'result': True, + u'act': u'delete', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (OSError, IOError): pass if preserve_minions: - preserve_minions_list = matches.get('minions', []) + preserve_minions_list = matches.get(u'minions', []) else: preserve_minions_list = [] self.check_minion_cache(preserve_minions=preserve_minions_list) - if self.opts.get('rotate_aes_key'): - salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) + if self.opts.get(u'rotate_aes_key'): + salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user']) return ( self.name_match(match) if match is not None else self.dict_match(matches) @@ -796,12 +801,12 @@ class Key(object): for status, keys in six.iteritems(self.list_keys()): for key in keys[self.DEN]: try: - os.remove(os.path.join(self.opts['pki_dir'], status, key)) - eload = {'result': True, - 'act': 'delete', - 'id': key} + os.remove(os.path.join(self.opts[u'pki_dir'], status, key)) + eload = {u'result': True, + u'act': u'delete', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (OSError, IOError): pass self.check_minion_cache() @@ -814,17 +819,17 @@ class Key(object): for status, keys in six.iteritems(self.list_keys()): for key in keys: try: - os.remove(os.path.join(self.opts['pki_dir'], status, key)) - eload = {'result': True, - 'act': 'delete', - 'id': key} + os.remove(os.path.join(self.opts[u'pki_dir'], status, key)) + eload = {u'result': True, + u'act': u'delete', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (OSError, IOError): pass self.check_minion_cache() - if self.opts.get('rotate_aes_key'): - salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) + if self.opts.get(u'rotate_aes_key'): + salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user']) return self.list_keys() def reject(self, match=None, match_dict=None, include_accepted=False, include_denied=False): @@ -848,24 +853,24 @@ class Key(object): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], keydir, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.REJ, key) ) - eload = {'result': True, - 'act': 'reject', - 'id': key} + eload = {u'result': True, + u'act': u'reject', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (IOError, OSError): pass self.check_minion_cache() - if self.opts.get('rotate_aes_key'): - salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) + if self.opts.get(u'rotate_aes_key'): + salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user']) return ( self.name_match(match) if match is not None else self.dict_match(matches) @@ -880,24 +885,24 @@ class Key(object): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.PEND, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.REJ, key) ) - eload = {'result': True, - 'act': 'reject', - 'id': key} + eload = {u'result': True, + u'act': u'reject', + u'id': key} self.event.fire_event(eload, - salt.utils.event.tagify(prefix='key')) + salt.utils.event.tagify(prefix=u'key')) except (IOError, OSError): pass self.check_minion_cache() - if self.opts.get('rotate_aes_key'): - salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) + if self.opts.get(u'rotate_aes_key'): + salt.crypt.dropfile(self.opts[u'cachedir'], self.opts[u'user']) return self.list_keys() def finger(self, match, hash_type=None): @@ -905,17 +910,17 @@ class Key(object): Return the fingerprint for a specified key ''' if hash_type is None: - hash_type = __opts__['hash_type'] + hash_type = __opts__[u'hash_type'] matches = self.name_match(match, True) ret = {} for status, keys in six.iteritems(matches): ret[status] = {} for key in keys: - if status == 'local': - path = os.path.join(self.opts['pki_dir'], key) + if status == u'local': + path = os.path.join(self.opts[u'pki_dir'], key) else: - path = os.path.join(self.opts['pki_dir'], status, key) + path = os.path.join(self.opts[u'pki_dir'], status, key) ret[status][key] = salt.utils.pem_finger(path, sum_type=hash_type) return ret @@ -924,16 +929,16 @@ class Key(object): Return fingerprints for all keys ''' if hash_type is None: - hash_type = __opts__['hash_type'] + hash_type = __opts__[u'hash_type'] ret = {} for status, keys in six.iteritems(self.all_keys()): ret[status] = {} for key in keys: - if status == 'local': - path = os.path.join(self.opts['pki_dir'], key) + if status == u'local': + path = os.path.join(self.opts[u'pki_dir'], key) else: - path = os.path.join(self.opts['pki_dir'], status, key) + path = os.path.join(self.opts[u'pki_dir'], status, key) ret[status][key] = salt.utils.pem_finger(path, sum_type=hash_type) return ret @@ -942,9 +947,9 @@ class RaetKey(Key): ''' Manage keys from the raet backend ''' - ACC = 'accepted' - PEND = 'pending' - REJ = 'rejected' + ACC = u'accepted' + PEND = u'pending' + REJ = u'rejected' DEN = None def __init__(self, opts): @@ -956,9 +961,9 @@ class RaetKey(Key): ''' Return the minion keys directory paths ''' - accepted = os.path.join(self.opts['pki_dir'], self.ACC) - pre = os.path.join(self.opts['pki_dir'], self.PEND) - rejected = os.path.join(self.opts['pki_dir'], self.REJ) + accepted = os.path.join(self.opts[u'pki_dir'], self.ACC) + pre = os.path.join(self.opts[u'pki_dir'], self.PEND) + rejected = os.path.join(self.opts[u'pki_dir'], self.REJ) return accepted, pre, rejected, None def check_minion_cache(self, preserve_minions=False): @@ -970,7 +975,7 @@ class RaetKey(Key): for key, val in six.iteritems(keys): minions.extend(val) - m_cache = os.path.join(self.opts['cachedir'], 'minions') + m_cache = os.path.join(self.opts[u'cachedir'], u'minions') if os.path.isdir(m_cache): for minion in os.listdir(m_cache): if minion not in minions: @@ -980,39 +985,39 @@ class RaetKey(Key): if clist: for minion in clist: if minion not in minions and minion not in preserve_minions: - cache.flush('{0}/{1}'.format(self.ACC, minion)) + cache.flush(u'{0}/{1}'.format(self.ACC, minion)) - kind = self.opts.get('__role', '') # application kind - if kind not in salt.utils.kinds.APPL_KINDS: - emsg = ("Invalid application kind = '{0}'.".format(kind)) - log.error(emsg + '\n') + kind = self.opts.get(u'__role', u'') # application kind + if kind not in kinds.APPL_KINDS: + emsg = (u"Invalid application kind = '{0}'.".format(kind)) + log.error(emsg + u'\n') raise ValueError(emsg) - role = self.opts.get('id', '') + role = self.opts.get(u'id', u'') if not role: - emsg = ("Invalid id.") - log.error(emsg + "\n") + emsg = (u"Invalid id.") + log.error(emsg + u"\n") raise ValueError(emsg) - name = "{0}_{1}".format(role, kind) - road_cache = os.path.join(self.opts['cachedir'], - 'raet', + name = u"{0}_{1}".format(role, kind) + road_cache = os.path.join(self.opts[u'cachedir'], + u'raet', name, - 'remote') + u'remote') if os.path.isdir(road_cache): for road in os.listdir(road_cache): root, ext = os.path.splitext(road) - if ext not in ['.json', '.msgpack']: + if ext not in (u'.json', u'.msgpack'): continue - prefix, sep, name = root.partition('.') - if not name or prefix != 'estate': + prefix, sep, name = root.partition(u'.') + if not name or prefix != u'estate': continue path = os.path.join(road_cache, road) - with salt.utils.files.fopen(path, 'rb') as fp_: - if ext == '.json': + with salt.utils.files.fopen(path, u'rb') as fp_: + if ext == u'.json': data = json.load(fp_) - elif ext == '.msgpack': + elif ext == u'.msgpack': data = msgpack.load(fp_) - if data['role'] not in minions: + if data[u'role'] not in minions: os.remove(path) def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None): @@ -1023,10 +1028,10 @@ class RaetKey(Key): d_key = libnacl.dual.DualSecret() keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname, keysize, user) - path = '{0}.key'.format(os.path.join( + path = u'{0}.key'.format(os.path.join( keydir, keyname)) - d_key.save(path, 'msgpack') + d_key.save(path, u'msgpack') def check_master(self): ''' @@ -1039,10 +1044,10 @@ class RaetKey(Key): ''' Return a dict of local keys ''' - ret = {'local': []} - fn_ = os.path.join(self.opts['pki_dir'], 'local.key') + ret = {u'local': []} + fn_ = os.path.join(self.opts[u'pki_dir'], u'local.key') if os.path.isfile(fn_): - ret['local'].append(fn_) + ret[u'local'].append(fn_) return ret def status(self, minion_id, pub, verify): @@ -1058,47 +1063,47 @@ class RaetKey(Key): rej_path = os.path.join(rej, minion_id) # open mode is turned on, force accept the key keydata = { - 'minion_id': minion_id, - 'pub': pub, - 'verify': verify} - if self.opts['open_mode']: # always accept and overwrite - with salt.utils.files.fopen(acc_path, 'w+b') as fp_: + u'minion_id': minion_id, + u'pub': pub, + u'verify': verify} + if self.opts[u'open_mode']: # always accept and overwrite + with salt.utils.files.fopen(acc_path, u'w+b') as fp_: fp_.write(self.serial.dumps(keydata)) return self.ACC if os.path.isfile(rej_path): - log.debug("Rejection Reason: Keys already rejected.\n") + log.debug(u"Rejection Reason: Keys already rejected.\n") return self.REJ elif os.path.isfile(acc_path): # The minion id has been accepted, verify the key strings - with salt.utils.files.fopen(acc_path, 'rb') as fp_: + with salt.utils.files.fopen(acc_path, u'rb') as fp_: keydata = self.serial.loads(fp_.read()) - if keydata['pub'] == pub and keydata['verify'] == verify: + if keydata[u'pub'] == pub and keydata[u'verify'] == verify: return self.ACC else: - log.debug("Rejection Reason: Keys not match prior accepted.\n") + log.debug(u"Rejection Reason: Keys not match prior accepted.\n") return self.REJ elif os.path.isfile(pre_path): auto_reject = self.auto_key.check_autoreject(minion_id) auto_sign = self.auto_key.check_autosign(minion_id) - with salt.utils.files.fopen(pre_path, 'rb') as fp_: + with salt.utils.files.fopen(pre_path, u'rb') as fp_: keydata = self.serial.loads(fp_.read()) - if keydata['pub'] == pub and keydata['verify'] == verify: + if keydata[u'pub'] == pub and keydata[u'verify'] == verify: if auto_reject: self.reject(minion_id) - log.debug("Rejection Reason: Auto reject pended.\n") + log.debug(u"Rejection Reason: Auto reject pended.\n") return self.REJ elif auto_sign: self.accept(minion_id) return self.ACC return self.PEND else: - log.debug("Rejection Reason: Keys not match prior pended.\n") + log.debug(u"Rejection Reason: Keys not match prior pended.\n") return self.REJ # This is a new key, evaluate auto accept/reject files and place # accordingly auto_reject = self.auto_key.check_autoreject(minion_id) auto_sign = self.auto_key.check_autosign(minion_id) - if self.opts['auto_accept']: + if self.opts[u'auto_accept']: w_path = acc_path ret = self.ACC elif auto_sign: @@ -1106,12 +1111,12 @@ class RaetKey(Key): ret = self.ACC elif auto_reject: w_path = rej_path - log.debug("Rejection Reason: Auto reject new.\n") + log.debug(u"Rejection Reason: Auto reject new.\n") ret = self.REJ else: w_path = pre_path ret = self.PEND - with salt.utils.files.fopen(w_path, 'w+b') as fp_: + with salt.utils.files.fopen(w_path, u'w+b') as fp_: fp_.write(self.serial.dumps(keydata)) return ret @@ -1122,22 +1127,22 @@ class RaetKey(Key): pub: verify: ''' - path = os.path.join(self.opts['pki_dir'], status, minion_id) - with salt.utils.files.fopen(path, 'r') as fp_: + path = os.path.join(self.opts[u'pki_dir'], status, minion_id) + with salt.utils.files.fopen(path, u'r') as fp_: keydata = self.serial.loads(fp_.read()) - return 'pub: {0}\nverify: {1}'.format( - keydata['pub'], - keydata['verify']) + return u'pub: {0}\nverify: {1}'.format( + keydata[u'pub'], + keydata[u'verify']) def _get_key_finger(self, path): ''' Return a sha256 kingerprint for the key ''' - with salt.utils.files.fopen(path, 'r') as fp_: + with salt.utils.files.fopen(path, u'r') as fp_: keydata = self.serial.loads(fp_.read()) - key = 'pub: {0}\nverify: {1}'.format( - keydata['pub'], - keydata['verify']) + key = u'pub: {0}\nverify: {1}'.format( + keydata[u'pub'], + keydata[u'verify']) return hashlib.sha256(key).hexdigest() def key_str(self, match): @@ -1183,11 +1188,11 @@ class RaetKey(Key): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], keydir, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.ACC, key) ) @@ -1207,11 +1212,11 @@ class RaetKey(Key): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.PEND, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.ACC, key) ) @@ -1237,23 +1242,23 @@ class RaetKey(Key): for status, keys in six.iteritems(matches): for key in keys: if revoke_auth: - if self.opts.get('rotate_aes_key') is False: - print('Immediate auth revocation specified but AES key rotation not allowed. ' - 'Minion will not be disconnected until the master AES key is rotated.') + if self.opts.get(u'rotate_aes_key') is False: + print(u'Immediate auth revocation specified but AES key rotation not allowed. ' + u'Minion will not be disconnected until the master AES key is rotated.') else: try: client = salt.client.get_local_client(mopts=self.opts) - client.cmd_async(key, 'saltutil.revoke_auth') + client.cmd_async(key, u'saltutil.revoke_auth') except salt.exceptions.SaltClientError: - print('Cannot contact Salt master. ' - 'Connection for {0} will remain up until ' - 'master AES key is rotated or auth is revoked ' - 'with \'saltutil.revoke_auth\'.'.format(key)) + print(u'Cannot contact Salt master. ' + u'Connection for {0} will remain up until ' + u'master AES key is rotated or auth is revoked ' + u'with \'saltutil.revoke_auth\'.'.format(key)) try: - os.remove(os.path.join(self.opts['pki_dir'], status, key)) + os.remove(os.path.join(self.opts[u'pki_dir'], status, key)) except (OSError, IOError): pass - self.check_minion_cache(preserve_minions=matches.get('minions', [])) + self.check_minion_cache(preserve_minions=matches.get(u'minions', [])) return ( self.name_match(match) if match is not None else self.dict_match(matches) @@ -1266,7 +1271,7 @@ class RaetKey(Key): for status, keys in six.iteritems(self.list_keys()): for key in keys: try: - os.remove(os.path.join(self.opts['pki_dir'], status, key)) + os.remove(os.path.join(self.opts[u'pki_dir'], status, key)) except (OSError, IOError): pass self.check_minion_cache() @@ -1293,11 +1298,11 @@ class RaetKey(Key): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], keydir, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.REJ, key) ) @@ -1318,11 +1323,11 @@ class RaetKey(Key): try: shutil.move( os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.PEND, key), os.path.join( - self.opts['pki_dir'], + self.opts[u'pki_dir'], self.REJ, key) ) @@ -1336,17 +1341,17 @@ class RaetKey(Key): Return the fingerprint for a specified key ''' if hash_type is None: - hash_type = __opts__['hash_type'] + hash_type = __opts__[u'hash_type'] matches = self.name_match(match, True) ret = {} for status, keys in six.iteritems(matches): ret[status] = {} for key in keys: - if status == 'local': - path = os.path.join(self.opts['pki_dir'], key) + if status == u'local': + path = os.path.join(self.opts[u'pki_dir'], key) else: - path = os.path.join(self.opts['pki_dir'], status, key) + path = os.path.join(self.opts[u'pki_dir'], status, key) ret[status][key] = self._get_key_finger(path) return ret @@ -1355,16 +1360,16 @@ class RaetKey(Key): Return fingerprints for all keys ''' if hash_type is None: - hash_type = __opts__['hash_type'] + hash_type = __opts__[u'hash_type'] ret = {} for status, keys in six.iteritems(self.list_keys()): ret[status] = {} for key in keys: - if status == 'local': - path = os.path.join(self.opts['pki_dir'], key) + if status == u'local': + path = os.path.join(self.opts[u'pki_dir'], key) else: - path = os.path.join(self.opts['pki_dir'], status, key) + path = os.path.join(self.opts[u'pki_dir'], status, key) ret[status][key] = self._get_key_finger(path) return ret @@ -1377,7 +1382,7 @@ class RaetKey(Key): for mid in mids: keydata = self.read_remote(mid, status) if keydata: - keydata['acceptance'] = status + keydata[u'acceptance'] = status data[mid] = keydata return data @@ -1386,10 +1391,10 @@ class RaetKey(Key): ''' Read in a remote key of status ''' - path = os.path.join(self.opts['pki_dir'], status, minion_id) + path = os.path.join(self.opts[u'pki_dir'], status, minion_id) if not os.path.isfile(path): return {} - with salt.utils.files.fopen(path, 'rb') as fp_: + with salt.utils.files.fopen(path, u'rb') as fp_: return self.serial.loads(fp_.read()) def read_local(self): @@ -1397,24 +1402,24 @@ class RaetKey(Key): Read in the local private keys, return an empy dict if the keys do not exist ''' - path = os.path.join(self.opts['pki_dir'], 'local.key') + path = os.path.join(self.opts[u'pki_dir'], u'local.key') if not os.path.isfile(path): return {} - with salt.utils.files.fopen(path, 'rb') as fp_: + with salt.utils.files.fopen(path, u'rb') as fp_: return self.serial.loads(fp_.read()) def write_local(self, priv, sign): ''' Write the private key and the signing key to a file on disk ''' - keydata = {'priv': priv, - 'sign': sign} - path = os.path.join(self.opts['pki_dir'], 'local.key') + keydata = {u'priv': priv, + u'sign': sign} + path = os.path.join(self.opts[u'pki_dir'], u'local.key') c_umask = os.umask(191) if os.path.exists(path): #mode = os.stat(path).st_mode os.chmod(path, stat.S_IWUSR | stat.S_IRUSR) - with salt.utils.files.fopen(path, 'w+') as fp_: + with salt.utils.files.fopen(path, u'w+') as fp_: fp_.write(self.serial.dumps(keydata)) os.chmod(path, stat.S_IRUSR) os.umask(c_umask) @@ -1423,7 +1428,7 @@ class RaetKey(Key): ''' Delete the local private key file ''' - path = os.path.join(self.opts['pki_dir'], 'local.key') + path = os.path.join(self.opts[u'pki_dir'], u'local.key') if os.path.isfile(path): os.remove(path) @@ -1431,6 +1436,6 @@ class RaetKey(Key): ''' Delete the private key directory ''' - path = self.opts['pki_dir'] + path = self.opts[u'pki_dir'] if os.path.exists(path): shutil.rmtree(path) diff --git a/salt/loader.py b/salt/loader.py index 8de5c86b77..305480de39 100644 --- a/salt/loader.py +++ b/salt/loader.py @@ -22,17 +22,19 @@ from zipimport import zipimporter import salt.config import salt.syspaths import salt.utils.context +import salt.utils.dictupdate +import salt.utils.event import salt.utils.files import salt.utils.lazy -import salt.utils.event import salt.utils.odict +import salt.utils.platform +import salt.utils.versions from salt.exceptions import LoaderError from salt.template import check_render_pipe_str from salt.utils.decorators import Depends -from salt.utils import is_proxy # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import reload_module if sys.version_info[:2] >= (3, 5): @@ -52,7 +54,7 @@ except ImportError: log = logging.getLogger(__name__) SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR) -LOADED_BASE_NAME = 'salt.loaded' +LOADED_BASE_NAME = u'salt.loaded' if USE_IMPORTLIB: # pylint: disable=no-member @@ -62,11 +64,11 @@ if USE_IMPORTLIB: MODULE_KIND_PKG_DIRECTORY = 5 SUFFIXES = [] for suffix in importlib.machinery.EXTENSION_SUFFIXES: - SUFFIXES.append((suffix, 'rb', MODULE_KIND_EXTENSION)) + SUFFIXES.append((suffix, u'rb', MODULE_KIND_EXTENSION)) for suffix in importlib.machinery.BYTECODE_SUFFIXES: - SUFFIXES.append((suffix, 'rb', MODULE_KIND_COMPILED)) + SUFFIXES.append((suffix, u'rb', MODULE_KIND_COMPILED)) for suffix in importlib.machinery.SOURCE_SUFFIXES: - SUFFIXES.append((suffix, 'rb', MODULE_KIND_SOURCE)) + SUFFIXES.append((suffix, u'rb', MODULE_KIND_SOURCE)) MODULE_KIND_MAP = { MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader, MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader, @@ -80,17 +82,14 @@ else: # which simplifies code readability, it adds some unsupported functions into # the driver's module scope. # We list un-supported functions here. These will be removed from the loaded. +# TODO: remove the need for this cross-module code. Maybe use NotImplemented LIBCLOUD_FUNCS_NOT_SUPPORTED = ( - 'parallels.avail_sizes', - 'parallels.avail_locations', - 'proxmox.avail_sizes', - 'saltify.destroy', - 'saltify.avail_sizes', - 'saltify.avail_images', - 'saltify.avail_locations', - 'rackspace.reboot', - 'openstack.list_locations', - 'rackspace.list_locations' + u'parallels.avail_sizes', + u'parallels.avail_locations', + u'proxmox.avail_sizes', + u'rackspace.reboot', + u'openstack.list_locations', + u'rackspace.list_locations' ) # Will be set to pyximport module at runtime if cython is enabled in config. @@ -143,30 +142,30 @@ def _module_dirs( if tag is None: tag = ext_type sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type) - ext_types = os.path.join(opts['extension_modules'], ext_type) + ext_types = os.path.join(opts[u'extension_modules'], ext_type) ext_type_types = [] if ext_dirs: if ext_type_dirs is None: - ext_type_dirs = '{0}_dirs'.format(tag) + ext_type_dirs = u'{0}_dirs'.format(tag) if ext_type_dirs in opts: ext_type_types.extend(opts[ext_type_dirs]) if HAS_PKG_RESOURCES and ext_type_dirs: - for entry_point in pkg_resources.iter_entry_points('salt.loader', ext_type_dirs): + for entry_point in pkg_resources.iter_entry_points(u'salt.loader', ext_type_dirs): loaded_entry_point = entry_point.load() for path in loaded_entry_point(): ext_type_types.append(path) cli_module_dirs = [] # The dirs can be any module dir, or a in-tree _{ext_type} dir - for _dir in opts.get('module_dirs', []): + for _dir in opts.get(u'module_dirs', []): # Prepend to the list to match cli argument ordering maybe_dir = os.path.join(_dir, ext_type) if os.path.isdir(maybe_dir): cli_module_dirs.insert(0, maybe_dir) continue - maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type)) + maybe_dir = os.path.join(_dir, u'_{0}'.format(ext_type)) if os.path.isdir(maybe_dir): cli_module_dirs.insert(0, maybe_dir) @@ -195,7 +194,7 @@ def minion_mods( generated modules in __context__ :param dict utils: Utility functions which should be made available to - Salt modules in __utils__. See `utils_dir` in + Salt modules in __utils__. See `utils_dirs` in salt.config for additional information about configuration. @@ -219,23 +218,23 @@ def minion_mods( ''' # TODO Publish documentation for module whitelisting if not whitelist: - whitelist = opts.get('whitelist_modules', None) + whitelist = opts.get(u'whitelist_modules', None) ret = LazyLoader( - _module_dirs(opts, 'modules', 'module'), + _module_dirs(opts, u'modules', u'module'), opts, - tag='module', - pack={'__context__': context, '__utils__': utils, '__proxy__': proxy}, + tag=u'module', + pack={u'__context__': context, u'__utils__': utils, u'__proxy__': proxy}, whitelist=whitelist, loaded_base_name=loaded_base_name, static_modules=static_modules, ) - ret.pack['__salt__'] = ret + ret.pack[u'__salt__'] = ret # Load any provider overrides from the configuration file providers option # Note: Providers can be pkg, service, user or group - not to be confused # with cloud providers. - providers = opts.get('providers', False) + providers = opts.get(u'providers', False) if providers and isinstance(providers, dict): for mod in providers: # sometimes providers opts is not to diverge modules but @@ -247,17 +246,17 @@ def minion_mods( else: if funcs: for func in funcs: - f_key = '{0}{1}'.format(mod, func[func.rindex('.'):]) + f_key = u'{0}{1}'.format(mod, func[func.rindex(u'.'):]) ret[f_key] = funcs[func] if notify: - evt = salt.utils.event.get_event('minion', opts=opts, listen=False) - evt.fire_event({'complete': True}, tag='/salt/minion/minion_mod_complete') + evt = salt.utils.event.get_event(u'minion', opts=opts, listen=False) + evt.fire_event({u'complete': True}, tag=u'/salt/minion/minion_mod_complete') return ret -def raw_mod(opts, name, functions, mod='modules'): +def raw_mod(opts, name, functions, mod=u'modules'): ''' Returns a single module loaded raw and bypassing the __virtual__ function @@ -271,11 +270,11 @@ def raw_mod(opts, name, functions, mod='modules'): testmod['test.ping']() ''' loader = LazyLoader( - _module_dirs(opts, mod, 'rawmodule'), + _module_dirs(opts, mod, u'rawmodule'), opts, - tag='rawmodule', + tag=u'rawmodule', virtual_enable=False, - pack={'__salt__': functions}, + pack={u'__salt__': functions}, ) # if we don't have the module, return an empty dict if name not in loader.file_mapping: @@ -289,14 +288,14 @@ def engines(opts, functions, runners, utils, proxy=None): ''' Return the master services plugins ''' - pack = {'__salt__': functions, - '__runners__': runners, - '__proxy__': proxy, - '__utils__': utils} + pack = {u'__salt__': functions, + u'__runners__': runners, + u'__proxy__': proxy, + u'__utils__': utils} return LazyLoader( - _module_dirs(opts, 'engines'), + _module_dirs(opts, u'engines'), opts, - tag='engines', + tag=u'engines', pack=pack, ) @@ -306,13 +305,13 @@ def proxy(opts, functions=None, returners=None, whitelist=None, utils=None): Returns the proxy module for this salt-proxy-minion ''' ret = LazyLoader( - _module_dirs(opts, 'proxy'), + _module_dirs(opts, u'proxy'), opts, - tag='proxy', - pack={'__salt__': functions, '__ret__': returners, '__utils__': utils}, + tag=u'proxy', + pack={u'__salt__': functions, u'__ret__': returners, u'__utils__': utils}, ) - ret.pack['__proxy__'] = ret + ret.pack[u'__proxy__'] = ret return ret @@ -322,11 +321,11 @@ def returners(opts, functions, whitelist=None, context=None, proxy=None): Returns the returner modules ''' return LazyLoader( - _module_dirs(opts, 'returners', 'returner'), + _module_dirs(opts, u'returners', u'returner'), opts, - tag='returner', + tag=u'returner', whitelist=whitelist, - pack={'__salt__': functions, '__context__': context, '__proxy__': proxy or {}}, + pack={u'__salt__': functions, u'__context__': context, u'__proxy__': proxy or {}}, ) @@ -335,11 +334,11 @@ def utils(opts, whitelist=None, context=None, proxy=proxy): Returns the utility modules ''' return LazyLoader( - _module_dirs(opts, 'utils', ext_type_dirs='utils_dirs'), + _module_dirs(opts, u'utils', ext_type_dirs=u'utils_dirs'), opts, - tag='utils', + tag=u'utils', whitelist=whitelist, - pack={'__context__': context, '__proxy__': proxy or {}}, + pack={u'__context__': context, u'__proxy__': proxy or {}}, ) @@ -347,30 +346,30 @@ def pillars(opts, functions, context=None): ''' Returns the pillars modules ''' - ret = LazyLoader(_module_dirs(opts, 'pillar'), + ret = LazyLoader(_module_dirs(opts, u'pillar'), opts, - tag='pillar', - pack={'__salt__': functions, - '__context__': context, - '__utils__': utils(opts)}) - ret.pack['__ext_pillar__'] = ret - return FilterDictWrapper(ret, '.ext_pillar') + tag=u'pillar', + pack={u'__salt__': functions, + u'__context__': context, + u'__utils__': utils(opts)}) + ret.pack[u'__ext_pillar__'] = ret + return FilterDictWrapper(ret, u'.ext_pillar') def tops(opts): ''' Returns the tops modules ''' - if 'master_tops' not in opts: + if u'master_tops' not in opts: return {} - whitelist = list(opts['master_tops'].keys()) + whitelist = list(opts[u'master_tops'].keys()) ret = LazyLoader( - _module_dirs(opts, 'tops', 'top'), + _module_dirs(opts, u'tops', u'top'), opts, - tag='top', + tag=u'top', whitelist=whitelist, ) - return FilterDictWrapper(ret, '.top') + return FilterDictWrapper(ret, u'.top') def wheels(opts, whitelist=None): @@ -378,9 +377,9 @@ def wheels(opts, whitelist=None): Returns the wheels modules ''' return LazyLoader( - _module_dirs(opts, 'wheel'), + _module_dirs(opts, u'wheel'), opts, - tag='wheel', + tag=u'wheel', whitelist=whitelist, ) @@ -393,13 +392,13 @@ def outputters(opts): :returns: LazyLoader instance, with only outputters present in the keyspace ''' ret = LazyLoader( - _module_dirs(opts, 'output', ext_type_dirs='outputter_dirs'), + _module_dirs(opts, u'output', ext_type_dirs=u'outputter_dirs'), opts, - tag='output', + tag=u'output', ) - wrapped_ret = FilterDictWrapper(ret, '.output') + wrapped_ret = FilterDictWrapper(ret, u'.output') # TODO: this name seems terrible... __salt__ should always be execution mods - ret.pack['__salt__'] = wrapped_ret + ret.pack[u'__salt__'] = wrapped_ret return wrapped_ret @@ -410,9 +409,9 @@ def serializers(opts): :returns: LazyLoader instance, with only serializers present in the keyspace ''' return LazyLoader( - _module_dirs(opts, 'serializers'), + _module_dirs(opts, u'serializers'), opts, - tag='serializers', + tag=u'serializers', ) @@ -424,11 +423,11 @@ def auth(opts, whitelist=None): :returns: LazyLoader ''' return LazyLoader( - _module_dirs(opts, 'auth'), + _module_dirs(opts, u'auth'), opts, - tag='auth', + tag=u'auth', whitelist=whitelist, - pack={'__salt__': minion_mods(opts)}, + pack={u'__salt__': minion_mods(opts)}, ) @@ -436,11 +435,11 @@ def fileserver(opts, backends): ''' Returns the file server modules ''' - return LazyLoader(_module_dirs(opts, 'fileserver'), + return LazyLoader(_module_dirs(opts, u'fileserver'), opts, - tag='fileserver', + tag=u'fileserver', whitelist=backends, - pack={'__utils__': utils(opts)}) + pack={u'__utils__': utils(opts)}) def roster(opts, runner, whitelist=None): @@ -448,11 +447,11 @@ def roster(opts, runner, whitelist=None): Returns the roster modules ''' return LazyLoader( - _module_dirs(opts, 'roster'), + _module_dirs(opts, u'roster'), opts, - tag='roster', + tag=u'roster', whitelist=whitelist, - pack={'__runner__': runner}, + pack={u'__runner__': runner}, ) @@ -460,12 +459,12 @@ def thorium(opts, functions, runners): ''' Load the thorium runtime modules ''' - pack = {'__salt__': functions, '__runner__': runners, '__context__': {}} - ret = LazyLoader(_module_dirs(opts, 'thorium'), + pack = {u'__salt__': functions, u'__runner__': runners, u'__context__': {}} + ret = LazyLoader(_module_dirs(opts, u'thorium'), opts, - tag='thorium', + tag=u'thorium', pack=pack) - ret.pack['__thorium__'] = ret + ret.pack[u'__thorium__'] = ret return ret @@ -486,15 +485,15 @@ def states(opts, functions, utils, serializers, whitelist=None, proxy=None): statemods = salt.loader.states(__opts__, None, None) ''' ret = LazyLoader( - _module_dirs(opts, 'states'), + _module_dirs(opts, u'states'), opts, - tag='states', - pack={'__salt__': functions, '__proxy__': proxy or {}}, + tag=u'states', + pack={u'__salt__': functions, u'__proxy__': proxy or {}}, whitelist=whitelist, ) - ret.pack['__states__'] = ret - ret.pack['__utils__'] = utils - ret.pack['__serializers__'] = serializers + ret.pack[u'__states__'] = ret + ret.pack[u'__utils__'] = utils + ret.pack[u'__serializers__'] = serializers return ret @@ -507,11 +506,11 @@ def beacons(opts, functions, context=None, proxy=None): keys and funcs as values. ''' return LazyLoader( - _module_dirs(opts, 'beacons'), + _module_dirs(opts, u'beacons'), opts, - tag='beacons', - pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}}, - virtual_funcs=['__validate__'], + tag=u'beacons', + pack={u'__context__': context, u'__salt__': functions, u'__proxy__': proxy or {}}, + virtual_funcs=[], ) @@ -524,14 +523,14 @@ def log_handlers(opts): ret = LazyLoader( _module_dirs( opts, - 'log_handlers', - int_type='handlers', - base_path=os.path.join(SALT_BASE_PATH, 'log'), + u'log_handlers', + int_type=u'handlers', + base_path=os.path.join(SALT_BASE_PATH, u'log'), ), opts, - tag='log_handlers', + tag=u'log_handlers', ) - return FilterDictWrapper(ret, '.setup_handlers') + return FilterDictWrapper(ret, u'.setup_handlers') def ssh_wrapper(opts, functions=None, context=None): @@ -541,16 +540,16 @@ def ssh_wrapper(opts, functions=None, context=None): return LazyLoader( _module_dirs( opts, - 'wrapper', - base_path=os.path.join(SALT_BASE_PATH, os.path.join('client', 'ssh')), + u'wrapper', + base_path=os.path.join(SALT_BASE_PATH, os.path.join(u'client', u'ssh')), ), opts, - tag='wrapper', + tag=u'wrapper', pack={ - '__salt__': functions, - '__grains__': opts.get('grains', {}), - '__pillar__': opts.get('pillar', {}), - '__context__': context, + u'__salt__': functions, + u'__grains__': opts.get(u'grains', {}), + u'__pillar__': opts.get(u'pillar', {}), + u'__context__': context, }, ) @@ -559,27 +558,27 @@ def render(opts, functions, states=None, proxy=None): ''' Returns the render modules ''' - pack = {'__salt__': functions, - '__grains__': opts.get('grains', {})} + pack = {u'__salt__': functions, + u'__grains__': opts.get(u'grains', {})} if states: - pack['__states__'] = states - pack['__proxy__'] = proxy or {} + pack[u'__states__'] = states + pack[u'__proxy__'] = proxy or {} ret = LazyLoader( _module_dirs( opts, - 'renderers', - 'render', - ext_type_dirs='render_dirs', + u'renderers', + u'render', + ext_type_dirs=u'render_dirs', ), opts, - tag='render', + tag=u'render', pack=pack, ) - rend = FilterDictWrapper(ret, '.render') + rend = FilterDictWrapper(ret, u'.render') - if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']): - err = ('The renderer {0} is unavailable, this error is often because ' - 'the needed software is unavailable'.format(opts['renderer'])) + if not check_render_pipe_str(opts[u'renderer'], rend, opts[u'renderer_blacklist'], opts[u'renderer_whitelist']): + err = (u'The renderer {0} is unavailable, this error is often because ' + u'the needed software is unavailable'.format(opts[u'renderer'])) log.critical(err) raise LoaderError(err) return rend @@ -600,12 +599,12 @@ def grain_funcs(opts, proxy=None): return LazyLoader( _module_dirs( opts, - 'grains', - 'grain', - ext_type_dirs='grains_dirs', + u'grains', + u'grain', + ext_type_dirs=u'grains_dirs', ), opts, - tag='grains', + tag=u'grains', ) @@ -615,30 +614,30 @@ def _load_cached_grains(opts, cfn): corrupted. ''' if not os.path.isfile(cfn): - log.debug('Grains cache file does not exist.') + log.debug(u'Grains cache file does not exist.') return None grains_cache_age = int(time.time() - os.path.getmtime(cfn)) - if grains_cache_age > opts.get('grains_cache_expiration', 300): - log.debug('Grains cache last modified {0} seconds ago and ' - 'cache expiration is set to {1}. ' - 'Grains cache expired. Refreshing.'.format( - grains_cache_age, - opts.get('grains_cache_expiration', 300) - )) + if grains_cache_age > opts.get(u'grains_cache_expiration', 300): + log.debug( + u'Grains cache last modified %s seconds ago and cache ' + u'expiration is set to %s. Grains cache expired. ' + u'Refreshing.', + grains_cache_age, opts.get(u'grains_cache_expiration', 300) + ) return None - if opts.get('refresh_grains_cache', False): - log.debug('refresh_grains_cache requested, Refreshing.') + if opts.get(u'refresh_grains_cache', False): + log.debug(u'refresh_grains_cache requested, Refreshing.') return None - log.debug('Retrieving grains from cache') + log.debug(u'Retrieving grains from cache') try: serial = salt.payload.Serial(opts) - with salt.utils.files.fopen(cfn, 'rb') as fp_: + with salt.utils.files.fopen(cfn, u'rb') as fp_: cached_grains = serial.load(fp_) if not cached_grains: - log.debug('Cached grains are empty, cache might be corrupted. Refreshing.') + log.debug(u'Cached grains are empty, cache might be corrupted. Refreshing.') return None return cached_grains @@ -669,41 +668,41 @@ def grains(opts, force_refresh=False, proxy=None): import salt.config # if we have no grains, lets try loading from disk (TODO: move to decorator?) cfn = os.path.join( - opts['cachedir'], - 'grains.cache.p' + opts[u'cachedir'], + u'grains.cache.p' ) - if not force_refresh and opts.get('grains_cache', False): + if not force_refresh and opts.get(u'grains_cache', False): cached_grains = _load_cached_grains(opts, cfn) if cached_grains: return cached_grains else: - log.debug('Grains refresh requested. Refreshing grains.') + log.debug(u'Grains refresh requested. Refreshing grains.') - if opts.get('skip_grains', False): + if opts.get(u'skip_grains', False): return {} - grains_deep_merge = opts.get('grains_deep_merge', False) is True - if 'conf_file' in opts: + grains_deep_merge = opts.get(u'grains_deep_merge', False) is True + if u'conf_file' in opts: pre_opts = {} pre_opts.update(salt.config.load_config( - opts['conf_file'], 'SALT_MINION_CONFIG', - salt.config.DEFAULT_MINION_OPTS['conf_file'] + opts[u'conf_file'], u'SALT_MINION_CONFIG', + salt.config.DEFAULT_MINION_OPTS[u'conf_file'] )) default_include = pre_opts.get( - 'default_include', opts['default_include'] + u'default_include', opts[u'default_include'] ) - include = pre_opts.get('include', []) + include = pre_opts.get(u'include', []) pre_opts.update(salt.config.include_config( - default_include, opts['conf_file'], verbose=False + default_include, opts[u'conf_file'], verbose=False )) pre_opts.update(salt.config.include_config( - include, opts['conf_file'], verbose=True + include, opts[u'conf_file'], verbose=True )) - if 'grains' in pre_opts: - opts['grains'] = pre_opts['grains'] + if u'grains' in pre_opts: + opts[u'grains'] = pre_opts[u'grains'] else: - opts['grains'] = {} + opts[u'grains'] = {} else: - opts['grains'] = {} + opts[u'grains'] = {} grains_data = {} funcs = grain_funcs(opts, proxy=proxy) @@ -711,9 +710,9 @@ def grains(opts, force_refresh=False, proxy=None): funcs.clear() # Run core grains for key in funcs: - if not key.startswith('core.'): + if not key.startswith(u'core.'): continue - log.trace('Loading {0} grain'.format(key)) + log.trace(u'Loading %s grain', key) ret = funcs[key]() if not isinstance(ret, dict): continue @@ -724,7 +723,7 @@ def grains(opts, force_refresh=False, proxy=None): # Run the rest of the grains for key in funcs: - if key.startswith('core.') or key == '_errors': + if key.startswith(u'core.') or key == u'_errors': continue try: # Grains are loaded too early to take advantage of the injected @@ -733,19 +732,17 @@ def grains(opts, force_refresh=False, proxy=None): # one parameter. Then the grains can have access to the # proxymodule for retrieving information from the connected # device. - log.trace('Loading {0} grain'.format(key)) + log.trace(u'Loading %s grain', key) if funcs[key].__code__.co_argcount == 1: ret = funcs[key](proxy) else: ret = funcs[key]() except Exception: - if is_proxy(): - log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.') + if salt.utils.platform.is_proxy(): + log.info(u'The following CRITICAL message may not be an error; the proxy may not be completely established yet.') log.critical( - 'Failed to load grains defined in grain file {0} in ' - 'function {1}, error:\n'.format( - key, funcs[key] - ), + u'Failed to load grains defined in grain file %s in ' + u'function %s, error:\n', key, funcs[key], exc_info=True ) continue @@ -756,45 +753,44 @@ def grains(opts, force_refresh=False, proxy=None): else: grains_data.update(ret) - if opts.get('proxy_merge_grains_in_module', True) and proxy: + if opts.get(u'proxy_merge_grains_in_module', True) and proxy: try: - proxytype = proxy.opts['proxy']['proxytype'] - if proxytype+'.grains' in proxy: - if proxytype+'.initialized' in proxy and proxy[proxytype+'.initialized'](): + proxytype = proxy.opts[u'proxy'][u'proxytype'] + if proxytype + u'.grains' in proxy: + if proxytype + u'.initialized' in proxy and proxy[proxytype + u'.initialized'](): try: - proxytype = proxy.opts['proxy']['proxytype'] - ret = proxy[proxytype+'.grains']() + proxytype = proxy.opts[u'proxy'][u'proxytype'] + ret = proxy[proxytype + u'.grains']() if grains_deep_merge: salt.utils.dictupdate.update(grains_data, ret) else: grains_data.update(ret) except Exception: - log.critical('Failed to run proxy\'s grains function!', + log.critical(u'Failed to run proxy\'s grains function!', exc_info=True ) except KeyError: pass - grains_data.update(opts['grains']) + grains_data.update(opts[u'grains']) # Write cache if enabled - if opts.get('grains_cache', False): + if opts.get(u'grains_cache', False): cumask = os.umask(0o77) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Late import import salt.modules.cmdmod # Make sure cache file isn't read-only - salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn)) - with salt.utils.files.fopen(cfn, 'w+b') as fp_: + salt.modules.cmdmod._run_quiet(u'attrib -R "{0}"'.format(cfn)) + with salt.utils.files.fopen(cfn, u'w+b') as fp_: try: serial = salt.payload.Serial(opts) serial.dump(grains_data, fp_) except TypeError as e: - log.error('Failed to serialize grains cache: {0}'.format(e)) + log.error(u'Failed to serialize grains cache: %s', e) raise # re-throw for cleanup except Exception as e: - msg = 'Unable to write to grains cache file {0}: {1}' - log.error(msg.format(cfn, e)) + log.error(u'Unable to write to grains cache file %s: %s', cfn, e) # Based on the original exception, the file may or may not have been # created. If it was, we will remove it now, as the exception means # the serialized data is not to be trusted, no matter what the @@ -804,9 +800,9 @@ def grains(opts, force_refresh=False, proxy=None): os.umask(cumask) if grains_deep_merge: - salt.utils.dictupdate.update(grains_data, opts['grains']) + salt.utils.dictupdate.update(grains_data, opts[u'grains']) else: - grains_data.update(opts['grains']) + grains_data.update(opts[u'grains']) return grains_data @@ -815,13 +811,13 @@ def call(fun, **kwargs): ''' Directly call a function inside a loader directory ''' - args = kwargs.get('args', []) - dirs = kwargs.get('dirs', []) + args = kwargs.get(u'args', []) + dirs = kwargs.get(u'dirs', []) funcs = LazyLoader( - [os.path.join(SALT_BASE_PATH, 'modules')] + dirs, + [os.path.join(SALT_BASE_PATH, u'modules')] + dirs, None, - tag='modules', + tag=u'modules', virtual_enable=False, ) return funcs[fun](*args) @@ -834,13 +830,13 @@ def runner(opts, utils=None): if utils is None: utils = {} ret = LazyLoader( - _module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'), + _module_dirs(opts, u'runners', u'runner', ext_type_dirs=u'runner_dirs'), opts, - tag='runners', - pack={'__utils__': utils}, + tag=u'runners', + pack={u'__utils__': utils}, ) # TODO: change from __salt__ to something else, we overload __salt__ too much - ret.pack['__salt__'] = ret + ret.pack[u'__salt__'] = ret return ret @@ -849,9 +845,9 @@ def queues(opts): Directly call a function inside a loader directory ''' return LazyLoader( - _module_dirs(opts, 'queues', 'queue', ext_type_dirs='queue_dirs'), + _module_dirs(opts, u'queues', u'queue', ext_type_dirs=u'queue_dirs'), opts, - tag='queues', + tag=u'queues', ) @@ -863,13 +859,13 @@ def sdb(opts, functions=None, whitelist=None, utils=None): utils = {} return LazyLoader( - _module_dirs(opts, 'sdb'), + _module_dirs(opts, u'sdb'), opts, - tag='sdb', + tag=u'sdb', pack={ - '__sdb__': functions, - '__opts__': opts, - '__utils__': utils, + u'__sdb__': functions, + u'__opts__': opts, + u'__utils__': utils, }, whitelist=whitelist, ) @@ -884,11 +880,11 @@ def pkgdb(opts): return LazyLoader( _module_dirs( opts, - 'pkgdb', - base_path=os.path.join(SALT_BASE_PATH, 'spm') + u'pkgdb', + base_path=os.path.join(SALT_BASE_PATH, u'spm') ), opts, - tag='pkgdb' + tag=u'pkgdb' ) @@ -901,11 +897,11 @@ def pkgfiles(opts): return LazyLoader( _module_dirs( opts, - 'pkgfiles', - base_path=os.path.join(SALT_BASE_PATH, 'spm') + u'pkgfiles', + base_path=os.path.join(SALT_BASE_PATH, u'spm') ), opts, - tag='pkgfiles' + tag=u'pkgfiles' ) @@ -918,21 +914,19 @@ def clouds(opts): # manager when needed. functions = LazyLoader( _module_dirs(opts, - 'clouds', - 'cloud', - base_path=os.path.join(SALT_BASE_PATH, 'cloud'), - int_type='clouds'), + u'clouds', + u'cloud', + base_path=os.path.join(SALT_BASE_PATH, u'cloud'), + int_type=u'clouds'), opts, - tag='clouds', - pack={'__utils__': salt.loader.utils(opts), - '__active_provider_name__': None}, + tag=u'clouds', + pack={u'__utils__': salt.loader.utils(opts), + u'__active_provider_name__': None}, ) for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED: log.trace( - '\'{0}\' has been marked as not supported. Removing from the list ' - 'of supported cloud functions'.format( - funcname - ) + u'\'%s\' has been marked as not supported. Removing from the ' + u'list of supported cloud functions', funcname ) functions.pop(funcname, None) return functions @@ -943,9 +937,9 @@ def netapi(opts): Return the network api functions ''' return LazyLoader( - _module_dirs(opts, 'netapi'), + _module_dirs(opts, u'netapi'), opts, - tag='netapi', + tag=u'netapi', ) @@ -954,10 +948,10 @@ def executors(opts, functions=None, context=None, proxy=None): Returns the executor modules ''' return LazyLoader( - _module_dirs(opts, 'executors', 'executor'), + _module_dirs(opts, u'executors', u'executor'), opts, - tag='executor', - pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}}, + tag=u'executor', + pack={u'__salt__': functions, u'__context__': context or {}, u'__proxy__': proxy or {}}, ) @@ -966,10 +960,10 @@ def cache(opts, serial): Returns the returner modules ''' return LazyLoader( - _module_dirs(opts, 'cache', 'cache'), + _module_dirs(opts, u'cache', u'cache'), opts, - tag='cache', - pack={'__opts__': opts, '__context__': {'serial': serial}}, + tag=u'cache', + pack={u'__opts__': opts, u'__context__': {u'serial': serial}}, ) @@ -977,16 +971,17 @@ def _generate_module(name): if name in sys.modules: return - code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1]) - module = types.ModuleType(name) + code = u"'''Salt loaded {0} parent module'''".format(name.split(u'.')[-1]) + # ModuleType can't accept a unicode type on PY2 + module = types.ModuleType(str(name)) exec(code, module.__dict__) sys.modules[name] = module def _mod_type(module_path): if module_path.startswith(SALT_BASE_PATH): - return 'int' - return 'ext' + return u'int' + return u'ext' # TODO: move somewhere else? @@ -1015,7 +1010,7 @@ class FilterDictWrapper(MutableMapping): def __iter__(self): for key in self._dict: if key.endswith(self.suffix): - yield key.replace(self.suffix, '') + yield key.replace(self.suffix, u'') class LazyLoader(salt.utils.lazy.LazyDict): @@ -1050,7 +1045,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): def __init__(self, module_dirs, opts=None, - tag='module', + tag=u'module', loaded_base_name=None, mod_type_check=None, pack=None, @@ -1069,7 +1064,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): self.pack = {} if pack is None else pack if opts is None: opts = {} - threadsafety = not opts.get('multiprocessing') + threadsafety = not opts.get(u'multiprocessing') self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety) self.opts = self.__prep_mod_opts(opts) @@ -1078,8 +1073,8 @@ class LazyLoader(salt.utils.lazy.LazyDict): self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME self.mod_type_check = mod_type_check or _mod_type - if '__context__' not in self.pack: - self.pack['__context__'] = None + if u'__context__' not in self.pack: + self.pack[u'__context__'] = None for k, v in six.iteritems(self.pack): if v is None: # if the value of a pack is None, lets make an empty dict @@ -1100,16 +1095,24 @@ class LazyLoader(salt.utils.lazy.LazyDict): virtual_funcs = [] self.virtual_funcs = virtual_funcs - self.disabled = set(self.opts.get('disable_{0}s'.format(self.tag), [])) + self.disabled = set( + self.opts.get( + u'disable_{0}{1}'.format( + self.tag, + u'' if self.tag[-1] == u's' else u's' + ), + [] + ) + ) self.refresh_file_mapping() super(LazyLoader, self).__init__() # late init the lazy loader # create all of the import namespaces - _generate_module('{0}.int'.format(self.loaded_base_name)) - _generate_module('{0}.int.{1}'.format(self.loaded_base_name, tag)) - _generate_module('{0}.ext'.format(self.loaded_base_name)) - _generate_module('{0}.ext.{1}'.format(self.loaded_base_name, tag)) + _generate_module(u'{0}.int'.format(self.loaded_base_name)) + _generate_module(u'{0}.int.{1}'.format(self.loaded_base_name, tag)) + _generate_module(u'{0}.ext'.format(self.loaded_base_name)) + _generate_module(u'{0}.ext.{1}'.format(self.loaded_base_name, tag)) def __getitem__(self, item): ''' @@ -1127,7 +1130,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): Allow for "direct" attribute access-- this allows jinja templates to access things like `salt.test.ping()` ''' - if mod_name in ('__getstate__', '__setstate__'): + if mod_name in (u'__getstate__', u'__setstate__'): return object.__getattribute__(self, mod_name) # if we have an attribute named that, lets return it. @@ -1155,19 +1158,19 @@ class LazyLoader(salt.utils.lazy.LazyDict): This can range from "not available' to "__virtual__" returned False ''' - mod_name = function_name.split('.')[0] + mod_name = function_name.split(u'.')[0] if mod_name in self.loaded_modules: - return '\'{0}\' is not available.'.format(function_name) + return u'\'{0}\' is not available.'.format(function_name) else: try: reason = self.missing_modules[mod_name] except KeyError: - return '\'{0}\' is not available.'.format(function_name) + return u'\'{0}\' is not available.'.format(function_name) else: if reason is not None: - return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason) + return u'\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason) else: - return '\'{0}\' __virtual__ returned False'.format(mod_name) + return u'\'{0}\' __virtual__ returned False'.format(mod_name) def refresh_file_mapping(self): ''' @@ -1175,30 +1178,30 @@ class LazyLoader(salt.utils.lazy.LazyDict): ''' # map of suffix to description for imp self.suffix_map = {} - suffix_order = [''] # local list to determine precedence of extensions + suffix_order = [u''] # local list to determine precedence of extensions # Prefer packages (directories) over modules (single files)! for (suffix, mode, kind) in SUFFIXES: self.suffix_map[suffix] = (suffix, mode, kind) suffix_order.append(suffix) - if self.opts.get('cython_enable', True) is True: + if self.opts.get(u'cython_enable', True) is True: try: global pyximport - pyximport = __import__('pyximport') # pylint: disable=import-error + pyximport = __import__(u'pyximport') # pylint: disable=import-error pyximport.install() # add to suffix_map so file_mapping will pick it up - self.suffix_map['.pyx'] = tuple() + self.suffix_map[u'.pyx'] = tuple() except ImportError: - log.info('Cython is enabled in the options but not present ' - 'in the system path. Skipping Cython modules.') + log.info(u'Cython is enabled in the options but not present ' + u'in the system path. Skipping Cython modules.') # Allow for zipimport of modules - if self.opts.get('enable_zip_modules', True) is True: - self.suffix_map['.zip'] = tuple() + if self.opts.get(u'enable_zip_modules', True) is True: + self.suffix_map[u'.zip'] = tuple() # allow for module dirs if USE_IMPORTLIB: - self.suffix_map[''] = ('', '', MODULE_KIND_PKG_DIRECTORY) + self.suffix_map[u''] = (u'', u'', MODULE_KIND_PKG_DIRECTORY) else: - self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY) + self.suffix_map[u''] = (u'', u'', imp.PKG_DIRECTORY) # create mapping of filename (without suffix) to (path, suffix) # The files are added in order of priority, so order *must* be retained. @@ -1213,7 +1216,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): continue # Next mod_dir for filename in files: try: - if filename.startswith('_'): + if filename.startswith(u'_'): # skip private modules # log messages omitted for obviousness continue # Next filename @@ -1223,20 +1226,19 @@ class LazyLoader(salt.utils.lazy.LazyDict): continue # Next filename if f_noext in self.disabled: log.trace( - 'Skipping {0}, it is disabled by configuration'.format( + u'Skipping %s, it is disabled by configuration', filename - ) ) continue # Next filename fpath = os.path.join(mod_dir, filename) # if its a directory, lets allow us to load that - if ext == '': + if ext == u'': # is there something __init__? subfiles = os.listdir(fpath) for suffix in suffix_order: - if '' == suffix: + if u'' == suffix: continue # Next suffix (__init__ must have a suffix) - init_file = '__init__{0}'.format(suffix) + init_file = u'__init__{0}'.format(suffix) if init_file in subfiles: break else: @@ -1245,9 +1247,9 @@ class LazyLoader(salt.utils.lazy.LazyDict): if f_noext in self.file_mapping: curr_ext = self.file_mapping[f_noext][1] #log.debug("****** curr_ext={0} ext={1} suffix_order={2}".format(curr_ext, ext, suffix_order)) - if '' in (curr_ext, ext) and curr_ext != ext: + if u'' in (curr_ext, ext) and curr_ext != ext: log.error( - 'Module/package collision: \'%s\' and \'%s\'', + u'Module/package collision: \'%s\' and \'%s\'', fpath, self.file_mapping[f_noext][0] ) @@ -1260,8 +1262,8 @@ class LazyLoader(salt.utils.lazy.LazyDict): except OSError: continue for smod in self.static_modules: - f_noext = smod.split('.')[-1] - self.file_mapping[f_noext] = (smod, '.o') + f_noext = smod.split(u'.')[-1] + self.file_mapping[f_noext] = (smod, u'.o') def clear(self): ''' @@ -1273,7 +1275,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): self.loaded_modules = {} # if we have been loaded before, lets clear the file mapping since # we obviously want a re-do - if hasattr(self, 'opts'): + if hasattr(self, u'opts'): self.refresh_file_mapping() self.initial_load = False @@ -1281,17 +1283,17 @@ class LazyLoader(salt.utils.lazy.LazyDict): ''' Strip out of the opts any logger instance ''' - if '__grains__' not in self.pack: - self.context_dict['grains'] = opts.get('grains', {}) - self.pack['__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'grains', override_name='grains') + if u'__grains__' not in self.pack: + self.context_dict[u'grains'] = opts.get(u'grains', {}) + self.pack[u'__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, u'grains', override_name=u'grains') - if '__pillar__' not in self.pack: - self.context_dict['pillar'] = opts.get('pillar', {}) - self.pack['__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'pillar', override_name='pillar') + if u'__pillar__' not in self.pack: + self.context_dict[u'pillar'] = opts.get(u'pillar', {}) + self.pack[u'__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, u'pillar', override_name=u'pillar') mod_opts = {} for key, val in list(opts.items()): - if key == 'logger': + if key == u'logger': continue mod_opts[key] = val return mod_opts @@ -1323,7 +1325,7 @@ class LazyLoader(salt.utils.lazy.LazyDict): # reload only custom "sub"modules for submodule in submodules: # it is a submodule if the name is in a namespace under mod - if submodule.__name__.startswith(mod.__name__ + '.'): + if submodule.__name__.startswith(mod.__name__ + u'.'): reload_module(submodule) self._reload_submodules(submodule) @@ -1334,35 +1336,35 @@ class LazyLoader(salt.utils.lazy.LazyDict): fpath_dirname = os.path.dirname(fpath) try: sys.path.append(fpath_dirname) - if suffix == '.pyx': + if suffix == u'.pyx': mod = pyximport.load_module(name, fpath, tempfile.gettempdir()) - elif suffix == '.o': + elif suffix == u'.o': top_mod = __import__(fpath, globals(), locals(), []) - comps = fpath.split('.') + comps = fpath.split(u'.') if len(comps) < 2: mod = top_mod else: mod = top_mod for subname in comps[1:]: mod = getattr(mod, subname) - elif suffix == '.zip': + elif suffix == u'.zip': mod = zipimporter(fpath).load_module(name) else: desc = self.suffix_map[suffix] # if it is a directory, we don't open a file try: - mod_namespace = '.'.join(( + mod_namespace = u'.'.join(( self.loaded_base_name, self.mod_type_check(fpath), self.tag, name)) except TypeError: - mod_namespace = '{0}.{1}.{2}.{3}'.format( + mod_namespace = u'{0}.{1}.{2}.{3}'.format( self.loaded_base_name, self.mod_type_check(fpath), self.tag, name) - if suffix == '': + if suffix == u'': if USE_IMPORTLIB: # pylint: disable=no-member # Package directory, look for __init__ @@ -1418,41 +1420,34 @@ class LazyLoader(salt.utils.lazy.LazyDict): except IOError: raise except ImportError as exc: - if 'magic number' in str(exc): - error_msg = 'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name) + if u'magic number' in str(exc): + error_msg = u'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name) log.warning(error_msg) self.missing_modules[name] = error_msg log.debug( - 'Failed to import {0} {1}:\n'.format( - self.tag, name - ), - exc_info=True + u'Failed to import %s %s:\n', + self.tag, name, exc_info=True ) self.missing_modules[name] = exc return False except Exception as error: log.error( - 'Failed to import {0} {1}, this is due most likely to a ' - 'syntax error:\n'.format( - self.tag, name - ), - exc_info=True + u'Failed to import %s %s, this is due most likely to a ' + u'syntax error:\n', self.tag, name, exc_info=True ) self.missing_modules[name] = error return False except SystemExit as error: log.error( - 'Failed to import {0} {1} as the module called exit()\n'.format( - self.tag, name - ), - exc_info=True + u'Failed to import %s %s as the module called exit()\n', + self.tag, name, exc_info=True ) self.missing_modules[name] = error return False finally: sys.path.remove(fpath_dirname) - if hasattr(mod, '__opts__'): + if hasattr(mod, u'__opts__'): mod.__opts__.update(self.opts) else: mod.__opts__ = self.opts @@ -1461,19 +1456,19 @@ class LazyLoader(salt.utils.lazy.LazyDict): for p_name, p_value in six.iteritems(self.pack): setattr(mod, p_name, p_value) - module_name = mod.__name__.rsplit('.', 1)[-1] + module_name = mod.__name__.rsplit(u'.', 1)[-1] # Call a module's initialization method if it exists - module_init = getattr(mod, '__init__', None) + module_init = getattr(mod, u'__init__', None) if inspect.isfunction(module_init): try: module_init(self.opts) except TypeError as e: log.error(e) except Exception: - err_string = '__init__ failed' + err_string = u'__init__ failed' log.debug( - 'Error loading %s.%s: %s', + u'Error loading %s.%s: %s', self.tag, module_name, err_string, exc_info=True ) self.missing_modules[module_name] = err_string @@ -1483,13 +1478,13 @@ class LazyLoader(salt.utils.lazy.LazyDict): # if virtual modules are enabled, we need to look for the # __virtual__() function inside that module and run it. if self.virtual_enable: - virtual_funcs_to_process = ['__virtual__'] + self.virtual_funcs + virtual_funcs_to_process = [u'__virtual__'] + self.virtual_funcs for virtual_func in virtual_funcs_to_process: virtual_ret, module_name, virtual_err, virtual_aliases = \ - self.process_virtual(mod, module_name) + self.process_virtual(mod, module_name, virtual_func) if virtual_err is not None: log.trace( - 'Error loading %s.%s: %s', + u'Error loading %s.%s: %s', self.tag, module_name, virtual_err ) @@ -1508,22 +1503,20 @@ class LazyLoader(salt.utils.lazy.LazyDict): # containing the names of the proxy types that the module supports. # # Render modules and state modules are OK though - if 'proxy' in self.opts: - if self.tag in ['grains', 'proxy']: - if not hasattr(mod, '__proxyenabled__') or \ - (self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and - '*' not in mod.__proxyenabled__): - err_string = 'not a proxy_minion enabled module' + if u'proxy' in self.opts: + if self.tag in [u'grains', u'proxy']: + if not hasattr(mod, u'__proxyenabled__') or \ + (self.opts[u'proxy'][u'proxytype'] not in mod.__proxyenabled__ and + u'*' not in mod.__proxyenabled__): + err_string = u'not a proxy_minion enabled module' self.missing_modules[module_name] = err_string self.missing_modules[name] = err_string return False - if getattr(mod, '__load__', False) is not False: + if getattr(mod, u'__load__', False) is not False: log.info( - 'The functions from module \'{0}\' are being loaded from the ' - 'provided __load__ attribute'.format( - module_name - ) + u'The functions from module \'%s\' are being loaded from the ' + u'provided __load__ attribute', module_name ) # If we had another module by the same virtual name, we should put any @@ -1534,8 +1527,8 @@ class LazyLoader(salt.utils.lazy.LazyDict): for x in mod_names )) - for attr in getattr(mod, '__load__', dir(mod)): - if attr.startswith('_'): + for attr in getattr(mod, u'__load__', dir(mod)): + if attr.startswith(u'_'): # private functions are skipped continue func = getattr(mod, attr) @@ -1549,12 +1542,12 @@ class LazyLoader(salt.utils.lazy.LazyDict): # # It default's of course to the found callable attribute name # if no alias is defined. - funcname = getattr(mod, '__func_alias__', {}).get(attr, attr) + funcname = getattr(mod, u'__func_alias__', {}).get(attr, attr) for tgt_mod in mod_names: try: - full_funcname = '.'.join((tgt_mod, funcname)) + full_funcname = u'.'.join((tgt_mod, funcname)) except TypeError: - full_funcname = '{0}.{1}'.format(tgt_mod, funcname) + full_funcname = u'{0}.{1}'.format(tgt_mod, funcname) # Save many references for lookups # Careful not to overwrite existing (higher priority) functions if full_funcname not in self._dict: @@ -1568,8 +1561,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): try: Depends.enforce_dependencies(self._dict, self.tag) except RuntimeError as exc: - log.info('Depends.enforce_dependencies() failed ' - 'for reasons: {0}'.format(exc)) + log.info( + u'Depends.enforce_dependencies() failed for the following ' + u'reason: %s', exc + ) for tgt_mod in mod_names: self.loaded_modules[tgt_mod] = mod_dict[tgt_mod] @@ -1580,9 +1575,9 @@ class LazyLoader(salt.utils.lazy.LazyDict): Load a single item if you have it ''' # if the key doesn't have a '.' then it isn't valid for this mod dict - if not isinstance(key, six.string_types) or '.' not in key: + if not isinstance(key, six.string_types) or u'.' not in key: raise KeyError - mod_name, _ = key.split('.', 1) + mod_name, _ = key.split(u'.', 1) if mod_name in self.missing_modules: return True # if the modulename isn't in the whitelist, don't bother @@ -1638,12 +1633,12 @@ class LazyLoader(salt.utils.lazy.LazyDict): ''' Apply the __outputter__ variable to the functions ''' - if hasattr(mod, '__outputter__'): + if hasattr(mod, u'__outputter__'): outp = mod.__outputter__ if func.__name__ in outp: func.__outputter__ = outp[func.__name__] - def process_virtual(self, mod, module_name, virtual_func='__virtual__'): + def process_virtual(self, mod, module_name, virtual_func=u'__virtual__'): ''' Given a loaded module and its default name determine its virtual name @@ -1669,30 +1664,30 @@ class LazyLoader(salt.utils.lazy.LazyDict): # namespace collisions. And finally it allows modules to return False # if they are not intended to run on the given platform or are missing # dependencies. - virtual_aliases = getattr(mod, '__virtual_aliases__', tuple()) + virtual_aliases = getattr(mod, u'__virtual_aliases__', tuple()) try: error_reason = None - if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__): + if hasattr(mod, u'__virtual__') and inspect.isfunction(mod.__virtual__): try: start = time.time() virtual = getattr(mod, virtual_func)() if isinstance(virtual, tuple): error_reason = virtual[1] virtual = virtual[0] - if self.opts.get('virtual_timer', False): + if self.opts.get(u'virtual_timer', False): end = time.time() - start - msg = 'Virtual function took {0} seconds for {1}'.format( + msg = u'Virtual function took {0} seconds for {1}'.format( end, module_name) log.warning(msg) except Exception as exc: error_reason = ( - 'Exception raised when processing __virtual__ function' - ' for {0}. Module will not be loaded: {1}'.format( + u'Exception raised when processing __virtual__ function' + u' for {0}. Module will not be loaded: {1}'.format( mod.__name__, exc)) log.error(error_reason, exc_info_on_loglevel=logging.DEBUG) virtual = None # Get the module's virtual name - virtualname = getattr(mod, '__virtualname__', virtual) + virtualname = getattr(mod, u'__virtualname__', virtual) if not virtual: # if __virtual__() evaluates to False then the module # wasn't meant for this platform or it's not supposed to @@ -1702,13 +1697,10 @@ class LazyLoader(salt.utils.lazy.LazyDict): # improperly loaded if virtual is None: log.warning( - '{0}.__virtual__() is wrongly returning `None`. ' - 'It should either return `True`, `False` or a new ' - 'name. If you\'re the developer of the module ' - '\'{1}\', please fix this.'.format( - mod.__name__, - module_name - ) + u'%s.__virtual__() is wrongly returning `None`. ' + u'It should either return `True`, `False` or a new ' + u'name. If you\'re the developer of the module ' + u'\'%s\', please fix this.', mod.__name__, module_name ) return (False, module_name, error_reason, virtual_aliases) @@ -1719,18 +1711,16 @@ class LazyLoader(salt.utils.lazy.LazyDict): if virtual is not True and module_name != virtual: # The module is renaming itself. Updating the module name # with the new name - log.trace('Loaded {0} as virtual {1}'.format( - module_name, virtual - )) + log.trace(u'Loaded %s as virtual %s', module_name, virtual) - if not hasattr(mod, '__virtualname__'): - salt.utils.warn_until( - 'Hydrogen', - 'The \'{0}\' module is renaming itself in its ' - '__virtual__() function ({1} => {2}). Please ' - 'set it\'s virtual name as the ' - '\'__virtualname__\' module attribute. ' - 'Example: "__virtualname__ = \'{2}\'"'.format( + if not hasattr(mod, u'__virtualname__'): + salt.utils.versions.warn_until( + u'Hydrogen', + u'The \'{0}\' module is renaming itself in its ' + u'__virtual__() function ({1} => {2}). Please ' + u'set it\'s virtual name as the ' + u'\'__virtualname__\' module attribute. ' + u'Example: "__virtualname__ = \'{2}\'"'.format( mod.__name__, module_name, virtual @@ -1742,14 +1732,11 @@ class LazyLoader(salt.utils.lazy.LazyDict): # being returned by the __virtual__() function. This # should be considered an error. log.error( - 'The module \'{0}\' is showing some bad usage. Its ' - '__virtualname__ attribute is set to \'{1}\' yet the ' - '__virtual__() function is returning \'{2}\'. These ' - 'values should match!'.format( - mod.__name__, - virtualname, - virtual - ) + u'The module \'%s\' is showing some bad usage. Its ' + u'__virtualname__ attribute is set to \'%s\' yet the ' + u'__virtual__() function is returning \'%s\'. These ' + u'values should match!', + mod.__name__, virtualname, virtual ) module_name = virtualname @@ -1765,20 +1752,14 @@ class LazyLoader(salt.utils.lazy.LazyDict): # in incomplete grains sets, these can be safely ignored # and logged to debug, still, it includes the traceback to # help debugging. - log.debug( - 'KeyError when loading {0}'.format(module_name), - exc_info=True - ) + log.debug(u'KeyError when loading %s', module_name, exc_info=True) except Exception: # If the module throws an exception during __virtual__() # then log the information and continue to the next. log.error( - 'Failed to read the virtual function for ' - '{0}: {1}'.format( - self.tag, module_name - ), - exc_info=True + u'Failed to read the virtual function for %s: %s', + self.tag, module_name, exc_info=True ) return (False, module_name, error_reason, virtual_aliases) diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py index ceb8f50232..f0341a7c11 100644 --- a/salt/log/handlers/__init__.py +++ b/salt/log/handlers/__init__.py @@ -117,7 +117,7 @@ class RotatingFileHandler(ExcInfoOnLogLevelFormatMixIn, logging.handlers.Rotatin ''' handled = False - # Can't use "salt.utils.is_windows()" in this file + # Can't use "salt.utils.platform.is_windows()" in this file if (sys.platform.startswith('win') and logging.raiseExceptions and sys.stderr): # see Python issue 13807 diff --git a/salt/log/handlers/fluent_mod.py b/salt/log/handlers/fluent_mod.py index d049920d47..dc4103d141 100644 --- a/salt/log/handlers/fluent_mod.py +++ b/salt/log/handlers/fluent_mod.py @@ -61,7 +61,7 @@ from salt.log.mixins import NewStyleClassMixIn import salt.utils.network # Import Third party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/log/handlers/log4mongo_mod.py b/salt/log/handlers/log4mongo_mod.py index 9408ef1861..122ec3bfa6 100644 --- a/salt/log/handlers/log4mongo_mod.py +++ b/salt/log/handlers/log4mongo_mod.py @@ -40,7 +40,7 @@ import socket import logging # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.log.mixins import NewStyleClassMixIn from salt.log.setup import LOG_LEVELS diff --git a/salt/log/handlers/logstash_mod.py b/salt/log/handlers/logstash_mod.py index 7e2b3e929e..a00435b5be 100644 --- a/salt/log/handlers/logstash_mod.py +++ b/salt/log/handlers/logstash_mod.py @@ -169,7 +169,7 @@ from salt.log.mixins import NewStyleClassMixIn import salt.utils.network # Import Third party libs -import salt.ext.six as six +from salt.ext import six try: import zmq except ImportError: diff --git a/salt/log/setup.py b/salt/log/setup.py index c15c2bc3d5..73361391ae 100644 --- a/salt/log/setup.py +++ b/salt/log/setup.py @@ -27,7 +27,7 @@ import traceback import multiprocessing # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=import-error,no-name-in-module # Let's define these custom logging levels before importing the salt.log.mixins @@ -38,6 +38,7 @@ GARBAGE = logging.GARBAGE = 1 QUIET = logging.QUIET = 1000 # Import salt libs +import salt.utils # Can be removed once appendproctitle is moved from salt.textformat import TextFormat from salt.log.handlers import (TemporaryLoggingHandler, StreamHandler, @@ -837,7 +838,7 @@ def setup_multiprocessing_logging(queue=None): This code should be called from within a running multiprocessing process instance. ''' - from salt.utils import is_windows + from salt.utils.platform import is_windows global __MP_LOGGING_CONFIGURED global __MP_LOGGING_QUEUE_HANDLER @@ -997,7 +998,6 @@ def patch_python_logging_handlers(): def __process_multiprocessing_logging_queue(opts, queue): - import salt.utils salt.utils.appendproctitle('MultiprocessingLoggingQueue') # Assign UID/GID of user to proc if set @@ -1006,7 +1006,8 @@ def __process_multiprocessing_logging_queue(opts, queue): if user: check_user(user) - if salt.utils.is_windows(): + from salt.utils.platform import is_windows + if is_windows(): # On Windows, creating a new process doesn't fork (copy the parent # process image). Due to this, we need to setup all of our logging # inside this process. diff --git a/salt/master.py b/salt/master.py index a6b292d458..59388bc0b5 100644 --- a/salt/master.py +++ b/salt/master.py @@ -26,7 +26,7 @@ except ImportError: # Fall back to pycrypto from Crypto.PublicKey import RSA # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -34,7 +34,7 @@ try: import zmq import zmq.eventloop.ioloop # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x - if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): + if not hasattr(zmq.eventloop.ioloop, u'ZMQIOLoop'): zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop HAS_ZMQ = True @@ -69,13 +69,16 @@ import salt.utils.atomicfile import salt.utils.event import salt.utils.files import salt.utils.gitfs -import salt.utils.job -import salt.utils.verify -import salt.utils.minions import salt.utils.gzip_util -import salt.utils.process -import salt.utils.zeromq import salt.utils.jid +import salt.utils.job +import salt.utils.master +import salt.utils.minions +import salt.utils.platform +import salt.utils.process +import salt.utils.schedule +import salt.utils.verify +import salt.utils.zeromq from salt.defaults import DEFAULT_TARGET_DELIM from salt.exceptions import FileserverConfigError from salt.transport import iter_transport_opts @@ -83,8 +86,6 @@ from salt.utils.debug import ( enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack ) from salt.utils.event import tagify -from salt.utils.master import ConnectedCache -from salt.utils.process import default_signals, SignalHandlingMultiprocessingProcess try: import resource @@ -126,16 +127,16 @@ class SMaster(object): # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): - self.opts = state['opts'] - self.master_key = state['master_key'] - self.key = state['key'] - SMaster.secrets = state['secrets'] + self.opts = state[u'opts'] + self.master_key = state[u'master_key'] + self.key = state[u'key'] + SMaster.secrets = state[u'secrets'] def __getstate__(self): - return {'opts': self.opts, - 'master_key': self.master_key, - 'key': self.key, - 'secrets': SMaster.secrets} + return {u'opts': self.opts, + u'master_key': self.master_key, + u'key': self.key, + u'secrets': SMaster.secrets} def __prep_key(self): ''' @@ -145,7 +146,7 @@ class SMaster(object): return salt.daemons.masterapi.access_keys(self.opts) -class Maintenance(SignalHandlingMultiprocessingProcess): +class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' A generalized maintenance process which performs maintenance routines. ''' @@ -158,7 +159,7 @@ class Maintenance(SignalHandlingMultiprocessingProcess): super(Maintenance, self).__init__(log_queue=log_queue) self.opts = opts # How often do we perform the maintenance tasks - self.loop_interval = int(self.opts['loop_interval']) + self.loop_interval = int(self.opts[u'loop_interval']) # Track key rotation intervals self.rotate = int(time.time()) # A serializer for general maint operations @@ -169,11 +170,11 @@ class Maintenance(SignalHandlingMultiprocessingProcess): # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True - self.__init__(state['opts'], log_queue=state['log_queue']) + self.__init__(state[u'opts'], log_queue=state[u'log_queue']) def __getstate__(self): - return {'opts': self.opts, - 'log_queue': self.log_queue} + return {u'opts': self.opts, + u'log_queue': self.log_queue} def _post_fork_init(self): ''' @@ -186,7 +187,7 @@ class Maintenance(SignalHandlingMultiprocessingProcess): self.fileserver = salt.fileserver.Fileserver(self.opts) # Load Runners ropts = dict(self.opts) - ropts['quiet'] = True + ropts[u'quiet'] = True runner_client = salt.runner.RunnerClient(ropts) # Load Returners self.returners = salt.loader.returners(self.opts, {}) @@ -197,15 +198,15 @@ class Maintenance(SignalHandlingMultiprocessingProcess): returners=self.returners) self.ckminions = salt.utils.minions.CkMinions(self.opts) # Make Event bus for firing - self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) + self.event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=False) # Init any values needed by the git ext pillar self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts) self.presence_events = False - if self.opts.get('presence_events', False): + if self.opts.get(u'presence_events', False): tcp_only = True for transport, _ in iter_transport_opts(self.opts): - if transport != 'tcp': + if transport != u'tcp': tcp_only = False if not tcp_only: # For a TCP only transport, the presence events will be @@ -220,7 +221,7 @@ class Maintenance(SignalHandlingMultiprocessingProcess): This is where any data that needs to be cleanly maintained from the master is maintained. ''' - salt.utils.appendproctitle('Maintenance') + salt.utils.appendproctitle(u'Maintenance') # init things that need to be done after the process is forked self._post_fork_init() @@ -252,20 +253,20 @@ class Maintenance(SignalHandlingMultiprocessingProcess): Evaluate accepted keys and create a msgpack file which contains a list ''' - if self.opts['key_cache'] == 'sched': + if self.opts[u'key_cache'] == u'sched': keys = [] #TODO DRY from CKMinions - if self.opts['transport'] in ('zeromq', 'tcp'): - acc = 'minions' + if self.opts[u'transport'] in (u'zeromq', u'tcp'): + acc = u'minions' else: - acc = 'accepted' + acc = u'accepted' - for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)): - if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)): + for fn_ in os.listdir(os.path.join(self.opts[u'pki_dir'], acc)): + if not fn_.startswith(u'.') and os.path.isfile(os.path.join(self.opts[u'pki_dir'], acc, fn_)): keys.append(fn_) - log.debug('Writing master key cache') + log.debug(u'Writing master key cache') # Write a temporary file securely - with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file: + with salt.utils.atomicfile.atomic_open(os.path.join(self.opts[u'pki_dir'], acc, u'.key_cache')) as cache_file: self.serial.dump(keys, cache_file) def handle_key_rotate(self, now): @@ -273,39 +274,39 @@ class Maintenance(SignalHandlingMultiprocessingProcess): Rotate the AES key rotation ''' to_rotate = False - dfn = os.path.join(self.opts['cachedir'], '.dfn') + dfn = os.path.join(self.opts[u'cachedir'], u'.dfn') try: stats = os.stat(dfn) # Basic Windows permissions don't distinguish between # user/group/all. Check for read-only state instead. - if salt.utils.is_windows() and not os.access(dfn, os.W_OK): + if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): to_rotate = True # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) elif stats.st_mode == 0o100400: to_rotate = True else: - log.error('Found dropfile with incorrect permissions, ignoring...') + log.error(u'Found dropfile with incorrect permissions, ignoring...') os.remove(dfn) except os.error: pass - if self.opts.get('publish_session'): - if now - self.rotate >= self.opts['publish_session']: + if self.opts.get(u'publish_session'): + if now - self.rotate >= self.opts[u'publish_session']: to_rotate = True if to_rotate: - log.info('Rotating master AES key') + log.info(u'Rotating master AES key') for secret_key, secret_map in six.iteritems(SMaster.secrets): # should be unnecessary-- since no one else should be modifying - with secret_map['secret'].get_lock(): - secret_map['secret'].value = six.b(secret_map['reload']()) - self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key') + with secret_map[u'secret'].get_lock(): + secret_map[u'secret'].value = six.b(secret_map[u'reload']()) + self.event.fire_event({u'rotate_{0}_key'.format(secret_key): True}, tag=u'key') self.rotate = now - if self.opts.get('ping_on_rotate'): + if self.opts.get(u'ping_on_rotate'): # Ping all minions to get them to pick up the new key - log.debug('Pinging all connected minions ' - 'due to key rotation') + log.debug(u'Pinging all connected minions ' + u'due to key rotation') salt.utils.master.ping_all_connected_minions(self.opts) def handle_git_pillar(self): @@ -316,7 +317,7 @@ class Maintenance(SignalHandlingMultiprocessingProcess): for pillar in self.git_pillar: pillar.update() except Exception as exc: - log.error('Exception caught while updating git_pillar', + log.error(u'Exception caught while updating git_pillar', exc_info=True) def handle_schedule(self): @@ -330,9 +331,7 @@ class Maintenance(SignalHandlingMultiprocessingProcess): if self.schedule.loop_interval < self.loop_interval: self.loop_interval = self.schedule.loop_interval except Exception as exc: - log.error( - 'Exception {0} occurred in scheduled job'.format(exc) - ) + log.error(u'Exception %s occurred in scheduled job', exc) def handle_presence(self, old_present): ''' @@ -344,13 +343,13 @@ class Maintenance(SignalHandlingMultiprocessingProcess): lost = old_present.difference(present) if new or lost: # Fire new minions present event - data = {'new': list(new), - 'lost': list(lost)} - self.event.fire_event(data, tagify('change', 'presence')) - data = {'present': list(present)} + data = {u'new': list(new), + u'lost': list(lost)} + self.event.fire_event(data, tagify(u'change', u'presence')) + data = {u'present': list(present)} # On the first run it may need more time for the EventPublisher # to come up and be ready. Set the timeout to account for this. - self.event.fire_event(data, tagify('present', 'presence'), timeout=3) + self.event.fire_event(data, tagify(u'present', u'presence'), timeout=3) old_present.clear() old_present.update(present) @@ -373,14 +372,14 @@ class Master(SMaster): # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( - [int(x) for x in zmq.zmq_version().split('.')] + [int(x) for x in zmq.zmq_version().split(u'.')] ) if zmq_version_info < (3, 2): log.warning( - 'You have a version of ZMQ less than ZMQ 3.2! There are ' - 'known connection keep-alive issues with ZMQ < 3.2 which ' - 'may result in loss of contact with minions. Please ' - 'upgrade your ZMQ!' + u'You have a version of ZMQ less than ZMQ 3.2! There are ' + u'known connection keep-alive issues with ZMQ < 3.2 which ' + u'may result in loss of contact with minions. Please ' + u'upgrade your ZMQ!' ) SMaster.__init__(self, opts) @@ -394,43 +393,39 @@ class Master(SMaster): # hard limit,but raising to anything above soft limit fails... mof_h = mof_s log.info( - 'Current values for max open files soft/hard setting: ' - '{0}/{1}'.format( - mof_s, mof_h - ) + u'Current values for max open files soft/hard setting: %s/%s', + mof_s, mof_h ) # Let's grab, from the configuration file, the value to raise max open # files to - mof_c = self.opts['max_open_files'] + mof_c = self.opts[u'max_open_files'] if mof_c > mof_h: # The configured value is higher than what's allowed log.info( - 'The value for the \'max_open_files\' setting, {0}, is higher ' - 'than what the user running salt is allowed to raise to, {1}. ' - 'Defaulting to {1}.'.format(mof_c, mof_h) + u'The value for the \'max_open_files\' setting, %s, is higher ' + u'than the highest value the user running salt is allowed to ' + u'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h ) mof_c = mof_h if mof_s < mof_c: # There's room to raise the value. Raise it! - log.info('Raising max open files value to {0}'.format(mof_c)) + log.info(u'Raising max open files value to %s', mof_c) resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h)) try: mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) log.info( - 'New values for max open files soft/hard values: ' - '{0}/{1}'.format(mof_s, mof_h) + u'New values for max open files soft/hard values: %s/%s', + mof_s, mof_h ) except ValueError: # https://github.com/saltstack/salt/issues/1991#issuecomment-13025595 # A user under macOS reported that our 100000 default value is # still too high. log.critical( - 'Failed to raise max open files setting to {0}. If this ' - 'value is too low. The salt-master will most likely fail ' - 'to run properly.'.format( - mof_c - ) + u'Failed to raise max open files setting to %s. If this ' + u'value is too low, the salt-master will most likely fail ' + u'to run properly.', mof_c ) def _pre_flight(self): @@ -442,18 +437,18 @@ class Master(SMaster): critical_errors = [] try: - os.chdir('/') + os.chdir(u'/') except OSError as err: errors.append( - 'Cannot change to root directory ({0})'.format(err) + u'Cannot change to root directory ({0})'.format(err) ) - if self.opts.get('fileserver_verify_config', True): + if self.opts.get(u'fileserver_verify_config', True): fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( - 'Failed to load fileserver backends, the configured backends ' - 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) + u'Failed to load fileserver backends, the configured backends ' + u'are: {0}'.format(u', '.join(self.opts[u'fileserver_backend'])) ) else: # Run init() for all backends which support the function, to @@ -461,25 +456,25 @@ class Master(SMaster): try: fileserver.init() except FileserverConfigError as exc: - critical_errors.append('{0}'.format(exc)) + critical_errors.append(u'{0}'.format(exc)) - if not self.opts['fileserver_backend']: - errors.append('No fileserver backends are configured') + if not self.opts[u'fileserver_backend']: + errors.append(u'No fileserver backends are configured') # Check to see if we need to create a pillar cache dir - if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')): + if self.opts[u'pillar_cache'] and not os.path.isdir(os.path.join(self.opts[u'cachedir'], u'pillar_cache')): try: prev_umask = os.umask(0o077) - os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache')) + os.mkdir(os.path.join(self.opts[u'cachedir'], u'pillar_cache')) os.umask(prev_umask) except OSError: pass - if self.opts.get('git_pillar_verify_config', True): + if self.opts.get(u'git_pillar_verify_config', True): non_legacy_git_pillars = [ - x for x in self.opts.get('ext_pillar', []) - if 'git' in x - and not isinstance(x['git'], six.string_types) + x for x in self.opts.get(u'ext_pillar', []) + if u'git' in x + and not isinstance(x[u'git'], six.string_types) ] if non_legacy_git_pillars: try: @@ -488,10 +483,10 @@ class Master(SMaster): import PER_REMOTE_OVERRIDES as per_remote_overrides, \ PER_REMOTE_ONLY as per_remote_only for repo in non_legacy_git_pillars: - new_opts['ext_pillar'] = [repo] + new_opts[u'ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar(new_opts) - git_pillar.init_remotes(repo['git'], + git_pillar.init_remotes(repo[u'git'], per_remote_overrides, per_remote_only) except FileserverConfigError as exc: @@ -504,7 +499,7 @@ class Master(SMaster): log.error(error) for error in critical_errors: log.critical(error) - log.critical('Master failed pre flight checks, exiting\n') + log.critical(u'Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC) def start(self): @@ -512,11 +507,7 @@ class Master(SMaster): Turn on the master server components ''' self._pre_flight() - log.info( - 'salt-master is starting as user \'{0}\''.format( - salt.utils.get_user() - ) - ) + log.info(u'salt-master is starting as user \'%s\'', salt.utils.get_user()) enable_sigusr1_handler() enable_sigusr2_handler() @@ -526,87 +517,89 @@ class Master(SMaster): # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers - with default_signals(signal.SIGINT, signal.SIGTERM): + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Setup the secrets here because the PubServerChannel may need # them as well. - SMaster.secrets['aes'] = {'secret': multiprocessing.Array(ctypes.c_char, - six.b(salt.crypt.Crypticle.generate_key_string())), - 'reload': salt.crypt.Crypticle.generate_key_string - } - log.info('Creating master process manager') + SMaster.secrets[u'aes'] = { + u'secret': multiprocessing.Array( + ctypes.c_char, + six.b(salt.crypt.Crypticle.generate_key_string()) + ), + u'reload': salt.crypt.Crypticle.generate_key_string + } + log.info(u'Creating master process manager') # Since there are children having their own ProcessManager we should wait for kill more time. self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5) pub_channels = [] - log.info('Creating master publisher process') + log.info(u'Creating master publisher process') for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.pre_fork(self.process_manager) pub_channels.append(chan) - log.info('Creating master event publisher process') + log.info(u'Creating master event publisher process') self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,)) - if self.opts.get('reactor'): - if isinstance(self.opts['engines'], list): + if self.opts.get(u'reactor'): + if isinstance(self.opts[u'engines'], list): rine = False - for item in self.opts['engines']: - if 'reactor' in item: + for item in self.opts[u'engines']: + if u'reactor' in item: rine = True break if not rine: - self.opts['engines'].append({'reactor': {}}) + self.opts[u'engines'].append({u'reactor': {}}) else: - if 'reactor' not in self.opts['engines']: - log.info('Enabling the reactor engine') - self.opts['engines']['reactor'] = {} + if u'reactor' not in self.opts[u'engines']: + log.info(u'Enabling the reactor engine') + self.opts[u'engines'][u'reactor'] = {} salt.engines.start_engines(self.opts, self.process_manager) # must be after channels - log.info('Creating master maintenance process') + log.info(u'Creating master maintenance process') self.process_manager.add_process(Maintenance, args=(self.opts,)) - if self.opts.get('event_return'): - log.info('Creating master event return process') + if self.opts.get(u'event_return'): + log.info(u'Creating master event return process') self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,)) - ext_procs = self.opts.get('ext_processes', []) + ext_procs = self.opts.get(u'ext_processes', []) for proc in ext_procs: - log.info('Creating ext_processes process: {0}'.format(proc)) + log.info(u'Creating ext_processes process: %s', proc) try: - mod = '.'.join(proc.split('.')[:-1]) - cls = proc.split('.')[-1] + mod = u'.'.join(proc.split(u'.')[:-1]) + cls = proc.split(u'.')[-1] _tmp = __import__(mod, globals(), locals(), [cls], -1) cls = _tmp.__getattribute__(cls) self.process_manager.add_process(cls, args=(self.opts,)) except Exception: - log.error(('Error creating ext_processes ' - 'process: {0}').format(proc)) + log.error(u'Error creating ext_processes process: %s', proc) - if HAS_HALITE and 'halite' in self.opts: - log.info('Creating master halite process') - self.process_manager.add_process(Halite, args=(self.opts['halite'],)) + if HAS_HALITE and u'halite' in self.opts: + log.info(u'Creating master halite process') + self.process_manager.add_process(Halite, args=(self.opts[u'halite'],)) # TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there) - if self.opts['con_cache']: - log.info('Creating master concache process') - self.process_manager.add_process(ConnectedCache, args=(self.opts,)) + if self.opts[u'con_cache']: + log.info(u'Creating master concache process') + self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,)) # workaround for issue #16315, race condition - log.debug('Sleeping for two seconds to let concache rest') + log.debug(u'Sleeping for two seconds to let concache rest') time.sleep(2) - log.info('Creating master request server process') + log.info(u'Creating master request server process') kwargs = {} - if salt.utils.is_windows(): - kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue() - kwargs['secrets'] = SMaster.secrets + if salt.utils.platform.is_windows(): + kwargs[u'log_queue'] = salt.log.setup.get_multiprocessing_logging_queue() + kwargs[u'secrets'] = SMaster.secrets self.process_manager.add_process( ReqServer, args=(self.opts, self.key, self.master_key), kwargs=kwargs, - name='ReqServer') + name=u'ReqServer') # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: @@ -629,7 +622,7 @@ class Master(SMaster): sys.exit(0) -class Halite(SignalHandlingMultiprocessingProcess): +class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' Manage the Halite server ''' @@ -647,11 +640,11 @@ class Halite(SignalHandlingMultiprocessingProcess): # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True - self.__init__(state['hopts'], log_queue=state['log_queue']) + self.__init__(state[u'hopts'], log_queue=state[u'log_queue']) def __getstate__(self): - return {'hopts': self.hopts, - 'log_queue': self.log_queue} + return {u'hopts': self.hopts, + u'log_queue': self.log_queue} def run(self): ''' @@ -661,7 +654,7 @@ class Halite(SignalHandlingMultiprocessingProcess): halite.start(self.hopts) -class ReqServer(SignalHandlingMultiprocessingProcess): +class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' Starts up the master request server, minions send results to this interface. @@ -689,15 +682,15 @@ class ReqServer(SignalHandlingMultiprocessingProcess): # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self._is_child = True - self.__init__(state['opts'], state['key'], state['mkey'], - log_queue=state['log_queue'], secrets=state['secrets']) + self.__init__(state[u'opts'], state[u'key'], state[u'mkey'], + log_queue=state[u'log_queue'], secrets=state[u'secrets']) def __getstate__(self): - return {'opts': self.opts, - 'key': self.key, - 'mkey': self.master_key, - 'log_queue': self.log_queue, - 'secrets': self.secrets} + return {u'opts': self.opts, + u'key': self.key, + u'mkey': self.master_key, + u'log_queue': self.log_queue, + u'secrets': self.secrets} def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self.destroy(signum) @@ -713,10 +706,10 @@ class ReqServer(SignalHandlingMultiprocessingProcess): if self.secrets is not None: SMaster.secrets = self.secrets - dfn = os.path.join(self.opts['cachedir'], '.dfn') + dfn = os.path.join(self.opts[u'cachedir'], u'.dfn') if os.path.isfile(dfn): try: - if salt.utils.is_windows() and not os.access(dfn, os.W_OK): + if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) os.remove(dfn) @@ -724,7 +717,7 @@ class ReqServer(SignalHandlingMultiprocessingProcess): pass # Wait for kill should be less then parent's ProcessManager. - self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager', + self.process_manager = salt.utils.process.ProcessManager(name=u'ReqServer_ProcessManager', wait_for_kill=1) req_channels = [] @@ -733,26 +726,26 @@ class ReqServer(SignalHandlingMultiprocessingProcess): chan = salt.transport.server.ReqServerChannel.factory(opts) chan.pre_fork(self.process_manager) req_channels.append(chan) - if transport != 'tcp': + if transport != u'tcp': tcp_only = False kwargs = {} - if salt.utils.is_windows(): - kwargs['log_queue'] = self.log_queue + if salt.utils.platform.is_windows(): + kwargs[u'log_queue'] = self.log_queue # Use one worker thread if only the TCP transport is set up on # Windows and we are using Python 2. There is load balancer # support on Windows for the TCP transport when using Python 3. - if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1: - log.warning('TCP transport supports only 1 worker on Windows ' - 'when using Python 2.') - self.opts['worker_threads'] = 1 + if tcp_only and six.PY2 and int(self.opts[u'worker_threads']) != 1: + log.warning(u'TCP transport supports only 1 worker on Windows ' + u'when using Python 2.') + self.opts[u'worker_threads'] = 1 # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers - with default_signals(signal.SIGINT, signal.SIGTERM): - for ind in range(int(self.opts['worker_threads'])): - name = 'MWorker-{0}'.format(ind) + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): + for ind in range(int(self.opts[u'worker_threads'])): + name = u'MWorker-{0}'.format(ind) self.process_manager.add_process(MWorker, args=(self.opts, self.master_key, @@ -770,7 +763,7 @@ class ReqServer(SignalHandlingMultiprocessingProcess): self.__bind() def destroy(self, signum=signal.SIGTERM): - if hasattr(self, 'process_manager'): + if hasattr(self, u'process_manager'): self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) self.process_manager.kill_children() @@ -779,7 +772,7 @@ class ReqServer(SignalHandlingMultiprocessingProcess): self.destroy() -class MWorker(SignalHandlingMultiprocessingProcess): +class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' The worker multiprocess instance to manage the backend operations for the salt master. @@ -801,8 +794,8 @@ class MWorker(SignalHandlingMultiprocessingProcess): :rtype: MWorker :return: Master worker ''' - kwargs['name'] = name - SignalHandlingMultiprocessingProcess.__init__(self, **kwargs) + kwargs[u'name'] = name + super(MWorker, self).__init__(**kwargs) self.opts = opts self.req_channels = req_channels @@ -817,25 +810,25 @@ class MWorker(SignalHandlingMultiprocessingProcess): # non-Windows platforms. def __setstate__(self, state): self._is_child = True - SignalHandlingMultiprocessingProcess.__init__(self, log_queue=state['log_queue']) - self.opts = state['opts'] - self.req_channels = state['req_channels'] - self.mkey = state['mkey'] - self.key = state['key'] - self.k_mtime = state['k_mtime'] - SMaster.secrets = state['secrets'] + super(MWorker, self).__init__(log_queue=state[u'log_queue']) + self.opts = state[u'opts'] + self.req_channels = state[u'req_channels'] + self.mkey = state[u'mkey'] + self.key = state[u'key'] + self.k_mtime = state[u'k_mtime'] + SMaster.secrets = state[u'secrets'] def __getstate__(self): - return {'opts': self.opts, - 'req_channels': self.req_channels, - 'mkey': self.mkey, - 'key': self.key, - 'k_mtime': self.k_mtime, - 'log_queue': self.log_queue, - 'secrets': SMaster.secrets} + return {u'opts': self.opts, + u'req_channels': self.req_channels, + u'mkey': self.mkey, + u'key': self.key, + u'k_mtime': self.k_mtime, + u'log_queue': self.log_queue, + u'secrets': SMaster.secrets} def _handle_signals(self, signum, sigframe): - for channel in getattr(self, 'req_channels', ()): + for channel in getattr(self, u'req_channels', ()): channel.close() super(MWorker, self)._handle_signals(signum, sigframe) @@ -878,10 +871,10 @@ class MWorker(SignalHandlingMultiprocessingProcess): :param dict payload: The payload route to the appropriate handler ''' - key = payload['enc'] - load = payload['load'] - ret = {'aes': self._handle_aes, - 'clear': self._handle_clear}[key](load) + key = payload[u'enc'] + load = payload[u'load'] + ret = {u'aes': self._handle_aes, + u'clear': self._handle_clear}[key](load) raise tornado.gen.Return(ret) def _handle_clear(self, load): @@ -892,10 +885,10 @@ class MWorker(SignalHandlingMultiprocessingProcess): :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load's 'cmd' key. ''' - log.trace('Clear payload received with command {cmd}'.format(**load)) - if load['cmd'].startswith('__'): + log.trace(u'Clear payload received with command %s', load[u'cmd']) + if load[u'cmd'].startswith(u'__'): return False - return getattr(self.clear_funcs, load['cmd'])(load), {'fun': 'send_clear'} + return getattr(self.clear_funcs, load[u'cmd'])(load), {u'fun': u'send_clear'} def _handle_aes(self, data): ''' @@ -905,13 +898,13 @@ class MWorker(SignalHandlingMultiprocessingProcess): :return: The result of passing the load to a function in AESFuncs corresponding to the command specified in the load's 'cmd' key. ''' - if 'cmd' not in data: - log.error('Received malformed command {0}'.format(data)) + if u'cmd' not in data: + log.error(u'Received malformed command %s', data) return {} - log.trace('AES payload received with command {0}'.format(data['cmd'])) - if data['cmd'].startswith('__'): + log.trace(u'AES payload received with command %s', data[u'cmd']) + if data[u'cmd'].startswith(u'__'): return False - return self.aes_funcs.run_func(data['cmd'], data) + return self.aes_funcs.run_func(data[u'cmd'], data) def run(self): ''' @@ -944,11 +937,11 @@ class AESFuncs(object): :returns: Instance for handling AES operations ''' self.opts = opts - self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) + self.event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Make a client - self.local = salt.client.get_local_client(self.opts['conf_file']) + self.local = salt.client.get_local_client(self.opts[u'conf_file']) # Create the master minion to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, @@ -987,29 +980,31 @@ class AESFuncs(object): ''' if not salt.utils.verify.valid_id(self.opts, id_): return False - pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) + pub_path = os.path.join(self.opts[u'pki_dir'], u'minions', id_) try: - with salt.utils.files.fopen(pub_path, 'r') as fp_: + with salt.utils.files.fopen(pub_path, u'r') as fp_: minion_pub = fp_.read() pub = RSA.importKey(minion_pub) except (IOError, OSError): - log.warning('Salt minion claiming to be {0} attempted to communicate ' - 'with master but key could not be read and verification was ' - 'denied.'.format(id_)) + log.warning( + u'Salt minion claiming to be %s attempted to communicate with ' + u'master, but key could not be read and verification was denied.', + id_ + ) return False except (ValueError, IndexError, TypeError) as err: - log.error('Unable to load public key "{0}": {1}' - .format(pub_path, err)) + log.error(u'Unable to load public key "%s": %s', pub_path, err) try: - if salt.crypt.public_decrypt(pub, token) == b'salt': + if salt.crypt.public_decrypt(pub, token) == b'salt': # future lint: disable=non-unicode-string return True except ValueError as err: - log.error('Unable to decrypt token: {0}'.format(err)) + log.error(u'Unable to decrypt token: %s', err) - log.error('Salt minion claiming to be {0} has attempted to ' - 'communicate with the master and could not be verified' - .format(id_)) + log.error( + u'Salt minion claiming to be %s has attempted to communicate with ' + u'the master and could not be verified', id_ + ) return False def verify_minion(self, id_, token): @@ -1035,48 +1030,46 @@ class AESFuncs(object): :return: A boolean indicating if the minion is allowed to publish the command in the load ''' # Verify that the load is valid - if 'peer' not in self.opts: + if u'peer' not in self.opts: return False - if not isinstance(self.opts['peer'], dict): + if not isinstance(self.opts[u'peer'], dict): return False - if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')): + if any(key not in clear_load for key in (u'fun', u'arg', u'tgt', u'ret', u'tok', u'id')): return False # If the command will make a recursive publish don't run - if clear_load['fun'].startswith('publish.'): + if clear_load[u'fun'].startswith(u'publish.'): return False # Check the permissions for this minion - if not self.__verify_minion(clear_load['id'], clear_load['tok']): + if not self.__verify_minion(clear_load[u'id'], clear_load[u'tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning( - ( - 'Minion id {0} is not who it says it is and is attempting ' - 'to issue a peer command' - ).format(clear_load['id']) + u'Minion id %s is not who it says it is and is attempting ' + u'to issue a peer command', clear_load[u'id'] ) return False - clear_load.pop('tok') + clear_load.pop(u'tok') perms = [] - for match in self.opts['peer']: - if re.match(match, clear_load['id']): + for match in self.opts[u'peer']: + if re.match(match, clear_load[u'id']): # This is the list of funcs/modules! - if isinstance(self.opts['peer'][match], list): - perms.extend(self.opts['peer'][match]) - if ',' in clear_load['fun']: + if isinstance(self.opts[u'peer'][match], list): + perms.extend(self.opts[u'peer'][match]) + if u',' in clear_load[u'fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] - clear_load['fun'] = clear_load['fun'].split(',') + clear_load[u'fun'] = clear_load[u'fun'].split(u',') arg_ = [] - for arg in clear_load['arg']: + for arg in clear_load[u'arg']: arg_.append(arg.split()) - clear_load['arg'] = arg_ + clear_load[u'arg'] = arg_ # finally, check the auth of the load return self.ckminions.auth_check( perms, - clear_load['fun'], - clear_load['arg'], - clear_load['tgt'], - clear_load.get('tgt_type', 'glob'), + clear_load[u'fun'], + clear_load[u'arg'], + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), publish_validate=True) def __verify_load(self, load, verify_keys): @@ -1094,27 +1087,20 @@ class AESFuncs(object): ''' if any(key not in load for key in verify_keys): return False - if 'tok' not in load: + if u'tok' not in load: log.error( - 'Received incomplete call from {0} for \'{1}\', missing \'{2}\'' - .format( - load['id'], - inspect_stack()['co_name'], - 'tok' - )) - return False - if not self.__verify_minion(load['id'], load['tok']): - # The minion is not who it says it is! - # We don't want to listen to it! - log.warning( - 'Minion id {0} is not who it says it is!'.format( - load['id'] - ) + u'Received incomplete call from %s for \'%s\', missing \'%s\'', + load[u'id'], inspect_stack()[u'co_name'], u'tok' ) return False + if not self.__verify_minion(load[u'id'], load[u'tok']): + # The minion is not who it says it is! + # We don't want to listen to it! + log.warning(u'Minion id %s is not who it says it is!', load[u'id']) + return False - if 'tok' in load: - load.pop('tok') + if u'tok' in load: + load.pop(u'tok') return load @@ -1126,7 +1112,7 @@ class AESFuncs(object): :param dict load: A payload received from a minion :return: The results from an external node classifier ''' - load = self.__verify_load(load, ('id', 'tok')) + load = self.__verify_load(load, (u'id', u'tok')) if load is False: return {} return self.masterapi._master_tops(load, skip_verify=True) @@ -1146,22 +1132,22 @@ class AESFuncs(object): for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] - mopts['file_roots'] = file_roots - mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy'] - mopts['env_order'] = self.opts['env_order'] - mopts['default_top'] = self.opts['default_top'] - if load.get('env_only'): + mopts[u'file_roots'] = file_roots + mopts[u'top_file_merging_strategy'] = self.opts[u'top_file_merging_strategy'] + mopts[u'env_order'] = self.opts[u'env_order'] + mopts[u'default_top'] = self.opts[u'default_top'] + if load.get(u'env_only'): return mopts - mopts['renderer'] = self.opts['renderer'] - mopts['failhard'] = self.opts['failhard'] - mopts['state_top'] = self.opts['state_top'] - mopts['state_top_saltenv'] = self.opts['state_top_saltenv'] - mopts['nodegroups'] = self.opts['nodegroups'] - mopts['state_auto_order'] = self.opts['state_auto_order'] - mopts['state_events'] = self.opts['state_events'] - mopts['state_aggregate'] = self.opts['state_aggregate'] - mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] - mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] + mopts[u'renderer'] = self.opts[u'renderer'] + mopts[u'failhard'] = self.opts[u'failhard'] + mopts[u'state_top'] = self.opts[u'state_top'] + mopts[u'state_top_saltenv'] = self.opts[u'state_top_saltenv'] + mopts[u'nodegroups'] = self.opts[u'nodegroups'] + mopts[u'state_auto_order'] = self.opts[u'state_auto_order'] + mopts[u'state_events'] = self.opts[u'state_events'] + mopts[u'state_aggregate'] = self.opts[u'state_aggregate'] + mopts[u'jinja_lstrip_blocks'] = self.opts[u'jinja_lstrip_blocks'] + mopts[u'jinja_trim_blocks'] = self.opts[u'jinja_trim_blocks'] return mopts def _mine_get(self, load): @@ -1173,7 +1159,7 @@ class AESFuncs(object): :rtype: dict :return: Mine data from the specified minions ''' - load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok')) + load = self.__verify_load(load, (u'id', u'tgt', u'fun', u'tok')) if load is False: return {} else: @@ -1188,7 +1174,7 @@ class AESFuncs(object): :rtype: bool :return: True if the data has been stored in the mine ''' - load = self.__verify_load(load, ('id', 'data', 'tok')) + load = self.__verify_load(load, (u'id', u'data', u'tok')) if load is False: return {} return self.masterapi._mine(load, skip_verify=True) @@ -1202,7 +1188,7 @@ class AESFuncs(object): :rtype: bool :return: Boolean indicating whether or not the given function was deleted from the mine ''' - load = self.__verify_load(load, ('id', 'fun', 'tok')) + load = self.__verify_load(load, (u'id', u'fun', u'tok')) if load is False: return {} else: @@ -1214,7 +1200,7 @@ class AESFuncs(object): :param dict load: A payload received from a minion ''' - load = self.__verify_load(load, ('id', 'tok')) + load = self.__verify_load(load, (u'id', u'tok')) if load is False: return {} else: @@ -1225,51 +1211,43 @@ class AESFuncs(object): Allows minions to send files to the master, files are sent to the master file cache ''' - if any(key not in load for key in ('id', 'path', 'loc')): + if any(key not in load for key in (u'id', u'path', u'loc')): return False - if not isinstance(load['path'], list): + if not isinstance(load[u'path'], list): return False - if not self.opts['file_recv']: + if not self.opts[u'file_recv']: return False - if not salt.utils.verify.valid_id(self.opts, load['id']): + if not salt.utils.verify.valid_id(self.opts, load[u'id']): return False - file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] + file_recv_max_size = 1024*1024 * self.opts[u'file_recv_max_size'] - if 'loc' in load and load['loc'] < 0: - log.error('Invalid file pointer: load[loc] < 0') + if u'loc' in load and load[u'loc'] < 0: + log.error(u'Invalid file pointer: load[loc] < 0') return False - if len(load['data']) + load.get('loc', 0) > file_recv_max_size: + if len(load[u'data']) + load.get(u'loc', 0) > file_recv_max_size: log.error( - 'file_recv_max_size limit of %d MB exceeded! %s will be ' - 'truncated. To successfully push this file, adjust ' - 'file_recv_max_size to an integer (in MB) large enough to ' - 'accommodate it.', file_recv_max_size, load['path'] + u'file_recv_max_size limit of %d MB exceeded! %s will be ' + u'truncated. To successfully push this file, adjust ' + u'file_recv_max_size to an integer (in MB) large enough to ' + u'accommodate it.', file_recv_max_size, load[u'path'] ) return False - if 'tok' not in load: + if u'tok' not in load: log.error( - 'Received incomplete call from {0} for \'{1}\', missing ' - '\'{2}\''.format( - load['id'], - inspect_stack()['co_name'], - 'tok' - ) + u'Received incomplete call from %s for \'%s\', missing \'%s\'', + load[u'id'], inspect_stack()[u'co_name'], u'tok' ) return False - if not self.__verify_minion(load['id'], load['tok']): + if not self.__verify_minion(load[u'id'], load[u'tok']): # The minion is not who it says it is! # We don't want to listen to it! - log.warning( - 'Minion id {0} is not who it says it is!'.format( - load['id'] - ) - ) + log.warning(u'Minion id %s is not who it says it is!', load[u'id']) return {} - load.pop('tok') + load.pop(u'tok') # Join path - sep_path = os.sep.join(load['path']) + sep_path = os.sep.join(load[u'path']) # Path normalization should have been done by the sending # minion but we can't guarantee it. Re-do it here. @@ -1277,20 +1255,22 @@ class AESFuncs(object): # Ensure that this safety check is done after the path # have been normalized. - if os.path.isabs(normpath) or '../' in load['path']: + if os.path.isabs(normpath) or u'../' in load[u'path']: # Can overwrite master files!! return False cpath = os.path.join( - self.opts['cachedir'], - 'minions', - load['id'], - 'files', + self.opts[u'cachedir'], + u'minions', + load[u'id'], + u'files', normpath) # One last safety check here - if not os.path.normpath(cpath).startswith(self.opts['cachedir']): - log.warning('Attempt to write received file outside of master cache ' - 'directory! Requested file write: {0}. Access denied.'.format(cpath)) + if not os.path.normpath(cpath).startswith(self.opts[u'cachedir']): + log.warning( + u'Attempt to write received file outside of master cache ' + u'directory! Requested path: %s. Access denied.', cpath + ) return False cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): @@ -1298,17 +1278,15 @@ class AESFuncs(object): os.makedirs(cdir) except os.error: pass - if os.path.isfile(cpath) and load['loc'] != 0: - mode = 'ab' + if os.path.isfile(cpath) and load[u'loc'] != 0: + mode = u'ab' else: - mode = 'wb' + mode = u'wb' with salt.utils.files.fopen(cpath, mode) as fp_: - if load['loc']: - fp_.seek(load['loc']) - if six.PY3: - fp_.write(load['data'].encode(__salt_system_encoding__)) - else: - fp_.write(load['data']) + if load[u'loc']: + fp_.seek(load[u'loc']) + + fp_.write(load[u'data']) return True def _pillar(self, load): @@ -1320,29 +1298,29 @@ class AESFuncs(object): :rtype: dict :return: The pillar data for the minion ''' - if any(key not in load for key in ('id', 'grains')): + if any(key not in load for key in (u'id', u'grains')): return False - if not salt.utils.verify.valid_id(self.opts, load['id']): + if not salt.utils.verify.valid_id(self.opts, load[u'id']): return False - load['grains']['id'] = load['id'] + load[u'grains'][u'id'] = load[u'id'] pillar_dirs = {} pillar = salt.pillar.get_pillar( self.opts, - load['grains'], - load['id'], - load.get('saltenv', load.get('env')), - ext=load.get('ext'), - pillar_override=load.get('pillar_override', {}), - pillarenv=load.get('pillarenv')) + load[u'grains'], + load[u'id'], + load.get(u'saltenv', load.get(u'env')), + ext=load.get(u'ext'), + pillar_override=load.get(u'pillar_override', {}), + pillarenv=load.get(u'pillarenv')) data = pillar.compile_pillar(pillar_dirs=pillar_dirs) self.fs_.update_opts() - if self.opts.get('minion_data_cache', False): - self.masterapi.cache.store('minions/{0}'.format(load['id']), - 'data', - {'grains': load['grains'], - 'pillar': data}) - self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion')) + if self.opts.get(u'minion_data_cache', False): + self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']), + u'data', + {u'grains': load[u'grains'], + u'pillar': data}) + self.event.fire_event({u'Minion data cache refresh': load[u'id']}, tagify(load[u'id'], u'refresh', u'minion')) return data def _minion_event(self, load): @@ -1352,7 +1330,7 @@ class AESFuncs(object): :param dict load: The minion payload ''' - load = self.__verify_load(load, ('id', 'tok')) + load = self.__verify_load(load, (u'id', u'tok')) if load is False: return {} # Route to master event bus @@ -1364,20 +1342,20 @@ class AESFuncs(object): ''' Act on specific events from minions ''' - id_ = load['id'] - if load.get('tag', '') == '_salt_error': + id_ = load[u'id'] + if load.get(u'tag', u'') == u'_salt_error': log.error( - 'Received minion error from [{minion}]: {data}' - .format(minion=id_, data=load['data']['message']) + u'Received minion error from [%s]: %s', + id_, load[u'data'][u'message'] ) - for event in load.get('events', []): - event_data = event.get('data', {}) - if 'minions' in event_data: - jid = event_data.get('jid') + for event in load.get(u'events', []): + event_data = event.get(u'data', {}) + if u'minions' in event_data: + jid = event_data.get(u'jid') if not jid: continue - minions = event_data['minions'] + minions = event_data[u'minions'] try: salt.utils.job.store_minions( self.opts, @@ -1387,8 +1365,8 @@ class AESFuncs(object): syndic_id=id_) except (KeyError, salt.exceptions.SaltCacheError) as exc: log.error( - 'Could not add minion(s) {0} for job {1}: {2}' - .format(minions, jid, exc) + u'Could not add minion(s) %s for job %s: %s', + minions, jid, exc ) def _return(self, load): @@ -1401,29 +1379,36 @@ class AESFuncs(object): :param dict load: The minion payload ''' - if self.opts['require_minion_sign_messages'] and 'sig' not in load: - log.critical('_return: Master is requiring minions to sign their messages, but there is no signature in this payload from {0}.'.format(load['id'])) + if self.opts[u'require_minion_sign_messages'] and u'sig' not in load: + log.critical( + u'_return: Master is requiring minions to sign their ' + u'messages, but there is no signature in this payload from ' + u'%s.', load[u'id'] + ) return False - if 'sig' in load: - log.trace('Verifying signed event publish from minion') - sig = load.pop('sig') - this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id'])) + if u'sig' in load: + log.trace(u'Verifying signed event publish from minion') + sig = load.pop(u'sig') + this_minion_pubkey = os.path.join(self.opts[u'pki_dir'], u'minions/{0}'.format(load[u'id'])) serialized_load = salt.serializers.msgpack.serialize(load) if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig): - log.info('Failed to verify event signature from minion {0}.'.format(load['id'])) - if self.opts['drop_messages_signature_fail']: - log.critical('Drop_messages_signature_fail is enabled, dropping message from {0}'.format(load['id'])) + log.info(u'Failed to verify event signature from minion %s.', load[u'id']) + if self.opts[u'drop_messages_signature_fail']: + log.critical( + u'Drop_messages_signature_fail is enabled, dropping ' + u'message from %s', load[u'id'] + ) return False else: - log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.') - load['sig'] = sig + log.info(u'But \'drop_message_signature_fail\' is disabled, so message is still accepted.') + load[u'sig'] = sig try: salt.utils.job.store_job( self.opts, load, event=self.event, mminion=self.mminion) except salt.exceptions.SaltCacheError: - log.error('Could not store job information for load: {0}'.format(load)) + log.error(u'Could not store job information for load: %s', load) def _syndic_return(self, load): ''' @@ -1433,37 +1418,37 @@ class AESFuncs(object): :param dict load: The minion payload ''' # Verify the load - if any(key not in load for key in ('return', 'jid', 'id')): + if any(key not in load for key in (u'return', u'jid', u'id')): return None # if we have a load, save it - if load.get('load'): - fstr = '{0}.save_load'.format(self.opts['master_job_cache']) - self.mminion.returners[fstr](load['jid'], load['load']) + if load.get(u'load'): + fstr = u'{0}.save_load'.format(self.opts[u'master_job_cache']) + self.mminion.returners[fstr](load[u'jid'], load[u'load']) # Register the syndic - syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id']) + syndic_cache_path = os.path.join(self.opts[u'cachedir'], u'syndics', load[u'id']) if not os.path.exists(syndic_cache_path): path_name = os.path.split(syndic_cache_path)[0] if not os.path.exists(path_name): os.makedirs(path_name) - with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh: - wfh.write('') + with salt.utils.files.fopen(syndic_cache_path, u'w') as wfh: + wfh.write(u'') # Format individual return loads - for key, item in six.iteritems(load['return']): - ret = {'jid': load['jid'], - 'id': key} + for key, item in six.iteritems(load[u'return']): + ret = {u'jid': load[u'jid'], + u'id': key} ret.update(item) - if 'master_id' in load: - ret['master_id'] = load['master_id'] - if 'fun' in load: - ret['fun'] = load['fun'] - if 'arg' in load: - ret['fun_args'] = load['arg'] - if 'out' in load: - ret['out'] = load['out'] - if 'sig' in load: - ret['sig'] = load['sig'] + if u'master_id' in load: + ret[u'master_id'] = load[u'master_id'] + if u'fun' in load: + ret[u'fun'] = load[u'fun'] + if u'arg' in load: + ret[u'fun_args'] = load[u'arg'] + if u'out' in load: + ret[u'out'] = load[u'out'] + if u'sig' in load: + ret[u'sig'] = load[u'sig'] self._return(ret) @@ -1476,7 +1461,7 @@ class AESFuncs(object): :rtype: dict :return: The runner function data ''' - load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok')) + load = self.__verify_load(clear_load, (u'fun', u'arg', u'id', u'tok')) if load is False: return {} else: @@ -1492,21 +1477,21 @@ class AESFuncs(object): :rtype: dict :return: Return data corresponding to a given JID ''' - load = self.__verify_load(load, ('jid', 'id', 'tok')) + load = self.__verify_load(load, (u'jid', u'id', u'tok')) if load is False: return {} # Check that this minion can access this data auth_cache = os.path.join( - self.opts['cachedir'], - 'publish_auth') + self.opts[u'cachedir'], + u'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) - jid_fn = os.path.join(auth_cache, str(load['jid'])) - with salt.utils.files.fopen(jid_fn, 'r') as fp_: - if not load['id'] == fp_.read(): + jid_fn = os.path.join(auth_cache, str(load[u'jid'])) + with salt.utils.files.fopen(jid_fn, u'r') as fp_: + if not load[u'id'] == fp_.read(): return {} # Grab the latest and return - return self.local.get_cache_returns(load['jid']) + return self.local.get_cache_returns(load[u'jid']) def minion_pub(self, clear_load): ''' @@ -1590,10 +1575,13 @@ class AESFuncs(object): :rtype: bool :return: True if key was revoked, False if not ''' - load = self.__verify_load(load, ('id', 'tok')) + load = self.__verify_load(load, (u'id', u'tok')) - if not self.opts.get('allow_minion_key_revoke', False): - log.warning('Minion {0} requested key revoke, but allow_minion_key_revoke is False'.format(load['id'])) + if not self.opts.get(u'allow_minion_key_revoke', False): + log.warning( + u'Minion %s requested key revoke, but allow_minion_key_revoke ' + u'is set to False', load[u'id'] + ) return load if load is False: @@ -1609,44 +1597,38 @@ class AESFuncs(object): :return: The result of the master function that was called ''' # Don't honor private functions - if func.startswith('__'): + if func.startswith(u'__'): # TODO: return some error? Seems odd to return {} - return {}, {'fun': 'send'} + return {}, {u'fun': u'send'} # Run the func if hasattr(self, func): try: start = time.time() ret = getattr(self, func)(load) log.trace( - 'Master function call {0} took {1} seconds'.format( - func, time.time() - start - ) + u'Master function call %s took %s seconds', + func, time.time() - start ) except Exception: - ret = '' - log.error( - 'Error in function {0}:\n'.format(func), - exc_info=True - ) + ret = u'' + log.error(u'Error in function %s:\n', func, exc_info=True) else: log.error( - 'Received function {0} which is unavailable on the master, ' - 'returning False'.format( - func - ) + u'Received function %s which is unavailable on the master, ' + u'returning False', func ) - return False, {'fun': 'send'} + return False, {u'fun': u'send'} # Don't encrypt the return value for the _return func # (we don't care about the return value, so why encrypt it?) - if func == '_return': - return ret, {'fun': 'send'} - if func == '_pillar' and 'id' in load: - if load.get('ver') != '2' and self.opts['pillar_version'] == 1: + if func == u'_return': + return ret, {u'fun': u'send'} + if func == u'_pillar' and u'id' in load: + if load.get(u'ver') != u'2' and self.opts[u'pillar_version'] == 1: # Authorized to return old pillar proto - return ret, {'fun': 'send'} - return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']} + return ret, {u'fun': u'send'} + return ret, {u'fun': u'send_private', u'key': u'pillar', u'tgt': load[u'id']} # Encrypt the return - return ret, {'fun': 'send'} + return ret, {u'fun': u'send'} class ClearFuncs(object): @@ -1662,9 +1644,9 @@ class ClearFuncs(object): self.opts = opts self.key = key # Create the event manager - self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) + self.event = salt.utils.event.get_master_event(self.opts, self.opts[u'sock_dir'], listen=False) # Make a client - self.local = salt.client.get_local_client(self.opts['conf_file']) + self.local = salt.client.get_local_client(self.opts[u'conf_file']) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object @@ -1686,66 +1668,65 @@ class ClearFuncs(object): Send a master control function back to the runner system ''' # All runner ops pass through eauth - if 'token' in clear_load: + if u'token' in clear_load: # Authenticate token = self.loadauth.authenticate_token(clear_load) if not token: - return dict(error=dict(name='TokenAuthenticationError', - message='Authentication failure of type "token" occurred.')) + return dict(error=dict(name=u'TokenAuthenticationError', + message=u'Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] + if self.opts[u'keep_acl_in_token'] and u'auth_list' in token: + auth_list = token[u'auth_list'] else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] + clear_load[u'eauth'] = token[u'eauth'] + clear_load[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.runner_check(auth_list, clear_load['fun']): - return dict(error=dict(name='TokenAuthenticationError', - message=('Authentication failure of type "token" occurred for ' - 'user {0}.').format(token['name']))) - clear_load.pop('token') - username = token['name'] - elif 'eauth' in clear_load: + if not self.ckminions.runner_check(auth_list, clear_load[u'fun']): + return dict(error=dict(name=u'TokenAuthenticationError', + message=(u'Authentication failure of type "token" occurred for ' + u'user {0}.').format(token[u'name']))) + clear_load.pop(u'token') + username = token[u'name'] + elif u'eauth' in clear_load: if not self.loadauth.authenticate_eauth(clear_load): - return dict(error=dict(name='EauthAuthenticationError', - message=('Authentication failure of type "eauth" occurred for ' - 'user {0}.').format(clear_load.get('username', 'UNKNOWN')))) + return dict(error=dict(name=u'EauthAuthenticationError', + message=(u'Authentication failure of type "eauth" occurred for ' + u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.runner_check(auth_list, clear_load['fun']): - return dict(error=dict(name='EauthAuthenticationError', - message=('Authentication failure of type "eauth" occurred for ' - 'user {0}.').format(clear_load.get('username', 'UNKNOWN')))) + if not self.ckminions.runner_check(auth_list, clear_load[u'fun']): + return dict(error=dict(name=u'EauthAuthenticationError', + message=(u'Authentication failure of type "eauth" occurred for ' + u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) # No error occurred, consume the password from the clear_load if # passed - username = clear_load.pop('username', 'UNKNOWN') - clear_load.pop('password', None) + username = clear_load.pop(u'username', u'UNKNOWN') + clear_load.pop(u'password', None) else: if not self.loadauth.authenticate_key(clear_load, self.key): - return dict(error=dict(name='UserAuthenticationError', - message='Authentication failure of type "user" occurred')) + return dict(error=dict(name=u'UserAuthenticationError', + message=u'Authentication failure of type "user" occurred')) - if 'user' in clear_load: - username = clear_load['user'] + if u'user' in clear_load: + username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): - username = self.opts.get('user', 'root') + username = self.opts.get(u'user', u'root') else: username = salt.utils.get_user() # Authorized. Do the job! try: - fun = clear_load.pop('fun') + fun = clear_load.pop(u'fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, - clear_load.get('kwarg', {}), + clear_load.get(u'kwarg', {}), username) except Exception as exc: - log.error('Exception occurred while ' - 'introspecting {0}: {1}'.format(fun, exc)) + log.error(u'Exception occurred while introspecting %s: %s', fun, exc) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) @@ -1756,83 +1737,82 @@ class ClearFuncs(object): ''' # All wheel ops pass through eauth username = None - if 'token' in clear_load: + if u'token' in clear_load: # Authenticate token = self.loadauth.authenticate_token(clear_load) if not token: - return dict(error=dict(name='TokenAuthenticationError', - message='Authentication failure of type "token" occurred.')) + return dict(error=dict(name=u'TokenAuthenticationError', + message=u'Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] + if self.opts[u'keep_acl_in_token'] and u'auth_list' in token: + auth_list = token[u'auth_list'] else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] + clear_load[u'eauth'] = token[u'eauth'] + clear_load[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.wheel_check(auth_list, clear_load['fun']): - return dict(error=dict(name='TokenAuthenticationError', - message=('Authentication failure of type "token" occurred for ' - 'user {0}.').format(token['name']))) - clear_load.pop('token') - username = token['name'] - elif 'eauth' in clear_load: + if not self.ckminions.wheel_check(auth_list, clear_load[u'fun']): + return dict(error=dict(name=u'TokenAuthenticationError', + message=(u'Authentication failure of type "token" occurred for ' + u'user {0}.').format(token[u'name']))) + clear_load.pop(u'token') + username = token[u'name'] + elif u'eauth' in clear_load: if not self.loadauth.authenticate_eauth(clear_load): - return dict(error=dict(name='EauthAuthenticationError', - message=('Authentication failure of type "eauth" occurred for ' - 'user {0}.').format(clear_load.get('username', 'UNKNOWN')))) + return dict(error=dict(name=u'EauthAuthenticationError', + message=(u'Authentication failure of type "eauth" occurred for ' + u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.wheel_check(auth_list, clear_load['fun']): - return dict(error=dict(name='EauthAuthenticationError', - message=('Authentication failure of type "eauth" occurred for ' - 'user {0}.').format(clear_load.get('username', 'UNKNOWN')))) + if not self.ckminions.wheel_check(auth_list, clear_load[u'fun']): + return dict(error=dict(name=u'EauthAuthenticationError', + message=(u'Authentication failure of type "eauth" occurred for ' + u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) # No error occurred, consume the password from the clear_load if # passed - clear_load.pop('password', None) - username = clear_load.pop('username', 'UNKNOWN') + clear_load.pop(u'password', None) + username = clear_load.pop(u'username', u'UNKNOWN') else: if not self.loadauth.authenticate_key(clear_load, self.key): - return dict(error=dict(name='UserAuthenticationError', - message='Authentication failure of type "user" occurred')) + return dict(error=dict(name=u'UserAuthenticationError', + message=u'Authentication failure of type "user" occurred')) - if 'user' in clear_load: - username = clear_load['user'] + if u'user' in clear_load: + username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): - username = self.opts.get('user', 'root') + username = self.opts.get(u'user', u'root') else: username = salt.utils.get_user() # Authorized. Do the job! try: jid = salt.utils.jid.gen_jid() - fun = clear_load.pop('fun') - tag = tagify(jid, prefix='wheel') - data = {'fun': "wheel.{0}".format(fun), - 'jid': jid, - 'tag': tag, - 'user': username} + fun = clear_load.pop(u'fun') + tag = tagify(jid, prefix=u'wheel') + data = {u'fun': u"wheel.{0}".format(fun), + u'jid': jid, + u'tag': tag, + u'user': username} - self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) + self.event.fire_event(data, tagify([jid, u'new'], u'wheel')) ret = self.wheel_.call_func(fun, full_return=True, **clear_load) - data['return'] = ret['return'] - data['success'] = ret['success'] - self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) - return {'tag': tag, - 'data': data} + data[u'return'] = ret[u'return'] + data[u'success'] = ret[u'success'] + self.event.fire_event(data, tagify([jid, u'ret'], u'wheel')) + return {u'tag': tag, + u'data': data} except Exception as exc: - log.error('Exception occurred while ' - 'introspecting {0}: {1}'.format(fun, exc)) - data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( + log.error(u'Exception occurred while introspecting %s: %s', fun, exc) + data[u'return'] = u'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) - data['success'] = False - self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) - return {'tag': tag, - 'data': data} + data[u'success'] = False + self.event.fire_event(data, tagify([jid, u'ret'], u'wheel')) + return {u'tag': tag, + u'data': data} def mk_token(self, clear_load): ''' @@ -1841,81 +1821,78 @@ class ClearFuncs(object): ''' token = self.loadauth.mk_token(clear_load) if not token: - log.warning('Authentication failure of type "eauth" occurred.') - return '' + log.warning(u'Authentication failure of type "eauth" occurred.') + return u'' return token def get_token(self, clear_load): ''' Return the name associated with a token or False if the token is invalid ''' - if 'token' not in clear_load: + if u'token' not in clear_load: return False - return self.loadauth.get_tok(clear_load['token']) + return self.loadauth.get_tok(clear_load[u'token']) def publish(self, clear_load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' - extra = clear_load.get('kwargs', {}) + extra = clear_load.get(u'kwargs', {}) - publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist']) + publisher_acl = salt.acl.PublisherACL(self.opts[u'publisher_acl_blacklist']) - if publisher_acl.user_is_blacklisted(clear_load['user']) or \ - publisher_acl.cmd_is_blacklisted(clear_load['fun']): + if publisher_acl.user_is_blacklisted(clear_load[u'user']) or \ + publisher_acl.cmd_is_blacklisted(clear_load[u'fun']): log.error( - '{user} does not have permissions to run {function}. Please ' - 'contact your local administrator if you believe this is in ' - 'error.\n'.format( - user=clear_load['user'], - function=clear_load['fun'] - ) + u'%s does not have permissions to run %s. Please contact ' + u'your local administrator if you believe this is in ' + u'error.\n', clear_load[u'user'], clear_load[u'fun'] ) - return '' + return u'' # Retrieve the minions list - delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) + delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM) minions = self.ckminions.check_minions( - clear_load['tgt'], - clear_load.get('tgt_type', 'glob'), + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), delimiter ) # Check for external auth calls - if extra.get('token', False): + if extra.get(u'token', False): # Authenticate. token = self.loadauth.authenticate_token(extra) if not token: - return '' + return u'' # Get acl - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] + if self.opts[u'keep_acl_in_token'] and u'auth_list' in token: + auth_list = token[u'auth_list'] else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] + extra[u'eauth'] = token[u'eauth'] + extra[u'username'] = token[u'name'] auth_list = self.loadauth.get_auth_list(extra) # Authorize the request if not self.ckminions.auth_check( auth_list, - clear_load['fun'], - clear_load['arg'], - clear_load['tgt'], - clear_load.get('tgt_type', 'glob'), + clear_load[u'fun'], + clear_load[u'arg'], + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), minions=minions, # always accept find_job - whitelist=['saltutil.find_job'], + whitelist=[u'saltutil.find_job'], ): - log.warning('Authentication failure of type "token" occurred.') - return '' - clear_load['user'] = token['name'] - log.debug('Minion tokenized user = "{0}"'.format(clear_load['user'])) - elif 'eauth' in extra: + log.warning(u'Authentication failure of type "token" occurred.') + return u'' + clear_load[u'user'] = token[u'name'] + log.debug(u'Minion tokenized user = "%s"', clear_load[u'user']) + elif u'eauth' in extra: # Authenticate. if not self.loadauth.authenticate_eauth(extra): - return '' + return u'' # Get acl from eauth module. auth_list = self.loadauth.get_auth_list(extra) @@ -1923,81 +1900,78 @@ class ClearFuncs(object): # Authorize the request if not self.ckminions.auth_check( auth_list, - clear_load['fun'], - clear_load['arg'], - clear_load['tgt'], - clear_load.get('tgt_type', 'glob'), + clear_load[u'fun'], + clear_load[u'arg'], + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), minions=minions, # always accept find_job - whitelist=['saltutil.find_job'], + whitelist=[u'saltutil.find_job'], ): - log.warning('Authentication failure of type "eauth" occurred.') - return '' - clear_load['user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with + log.warning(u'Authentication failure of type "eauth" occurred.') + return u'' + clear_load[u'user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with # Verify that the caller has root on master else: auth_ret = self.loadauth.authenticate_key(clear_load, self.key) if auth_ret is False: - return '' + return u'' if auth_ret is not True: - if salt.auth.AuthUser(clear_load['user']).is_sudo(): - if not self.opts['sudo_acl'] or not self.opts['publisher_acl']: + if salt.auth.AuthUser(clear_load[u'user']).is_sudo(): + if not self.opts[u'sudo_acl'] or not self.opts[u'publisher_acl']: auth_ret = True if auth_ret is not True: auth_list = salt.utils.get_values_of_matching_keys( - self.opts['publisher_acl'], + self.opts[u'publisher_acl'], auth_ret) if not auth_list: log.warning( - 'Authentication failure of type "user" occurred.' + u'Authentication failure of type "user" occurred.' ) - return '' + return u'' if not self.ckminions.auth_check( auth_list, - clear_load['fun'], - clear_load['arg'], - clear_load['tgt'], - clear_load.get('tgt_type', 'glob'), + clear_load[u'fun'], + clear_load[u'arg'], + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), minions=minions, # always accept find_job - whitelist=['saltutil.find_job'], + whitelist=[u'saltutil.find_job'], ): - log.warning('Authentication failure of type "user" occurred.') - return '' + log.warning(u'Authentication failure of type "user" occurred.') + return u'' # If we order masters (via a syndic), don't short circuit if no minions # are found - if not self.opts.get('order_masters'): + if not self.opts.get(u'order_masters'): # Check for no minions if not minions: return { - 'enc': 'clear', - 'load': { - 'jid': None, - 'minions': minions, - 'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt']) + u'enc': u'clear', + u'load': { + u'jid': None, + u'minions': minions, + u'error': u'Master could not resolve minions for target {0}'.format(clear_load[u'tgt']) } } jid = self._prep_jid(clear_load, extra) if jid is None: - return {'enc': 'clear', - 'load': { - 'error': 'Master failed to assign jid', - } - } + return {u'enc': u'clear', + u'load': {u'error': u'Master failed to assign jid'}} payload = self._prep_pub(minions, jid, clear_load, extra) # Send it! self._send_pub(payload) return { - 'enc': 'clear', - 'load': { - 'jid': clear_load['jid'], - 'minions': minions + u'enc': u'clear', + u'load': { + u'jid': clear_load[u'jid'], + u'minions': minions } } @@ -2007,11 +1981,11 @@ class ClearFuncs(object): ''' # the jid in clear_load can be None, '', or something else. this is an # attempt to clean up the value before passing to plugins - passed_jid = clear_load['jid'] if clear_load.get('jid') else None - nocache = extra.get('nocache', False) + passed_jid = clear_load[u'jid'] if clear_load.get(u'jid') else None + nocache = extra.get(u'nocache', False) # Retrieve the jid - fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) + fstr = u'{0}.prep_jid'.format(self.opts[u'master_job_cache']) try: # Retrieve the jid jid = self.mminion.returners[fstr](nocache=nocache, @@ -2019,11 +1993,11 @@ class ClearFuncs(object): except (KeyError, TypeError): # The returner is not present msg = ( - 'Failed to allocate a jid. The requested returner \'{0}\' ' - 'could not be loaded.'.format(fstr.split('.')[0]) + u'Failed to allocate a jid. The requested returner \'{0}\' ' + u'could not be loaded.'.format(fstr.split(u'.')[0]) ) log.error(msg) - return {'error': msg} + return {u'error': msg} return jid def _send_pub(self, load): @@ -2042,26 +2016,26 @@ class ClearFuncs(object): TODO: This is really only bound by temporal cohesion and thus should be refactored even further. ''' - clear_load['jid'] = jid - delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) + clear_load[u'jid'] = jid + delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM) # TODO Error reporting over the master event bus - self.event.fire_event({'minions': minions}, clear_load['jid']) + self.event.fire_event({u'minions': minions}, clear_load[u'jid']) new_job_load = { - 'jid': clear_load['jid'], - 'tgt_type': clear_load['tgt_type'], - 'tgt': clear_load['tgt'], - 'user': clear_load['user'], - 'fun': clear_load['fun'], - 'arg': clear_load['arg'], - 'minions': minions, + u'jid': clear_load[u'jid'], + u'tgt_type': clear_load[u'tgt_type'], + u'tgt': clear_load[u'tgt'], + u'user': clear_load[u'user'], + u'fun': clear_load[u'fun'], + u'arg': clear_load[u'arg'], + u'minions': minions, } # Announce the job on the event bus - self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job')) + self.event.fire_event(new_job_load, tagify([clear_load[u'jid'], u'new'], u'job')) - if self.opts['ext_job_cache']: - fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) + if self.opts[u'ext_job_cache']: + fstr = u'{0}.save_load'.format(self.opts[u'ext_job_cache']) save_load_func = True # Get the returner's save_load arg_spec. @@ -2070,50 +2044,46 @@ class ClearFuncs(object): # Check if 'minions' is included in returner's save_load arg_spec. # This may be missing in custom returners, which we should warn about. - if 'minions' not in arg_spec.args: + if u'minions' not in arg_spec.args: log.critical( - 'The specified returner used for the external job cache ' - '\'{0}\' does not have a \'minions\' kwarg in the returner\'s ' - 'save_load function.'.format( - self.opts['ext_job_cache'] - ) + u'The specified returner used for the external job cache ' + u'\'%s\' does not have a \'minions\' kwarg in the returner\'s ' + u'save_load function.', self.opts[u'ext_job_cache'] ) except (AttributeError, KeyError): save_load_func = False log.critical( - 'The specified returner used for the external job cache ' - '"{0}" does not have a save_load function!'.format( - self.opts['ext_job_cache'] - ) + u'The specified returner used for the external job cache ' + u'"%s" does not have a save_load function!', + self.opts[u'ext_job_cache'] ) if save_load_func: try: - self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions) + self.mminion.returners[fstr](clear_load[u'jid'], clear_load, minions=minions) except Exception: log.critical( - 'The specified returner threw a stack trace:\n', + u'The specified returner threw a stack trace:\n', exc_info=True ) # always write out to the master job caches try: - fstr = '{0}.save_load'.format(self.opts['master_job_cache']) - self.mminion.returners[fstr](clear_load['jid'], clear_load, minions) + fstr = u'{0}.save_load'.format(self.opts[u'master_job_cache']) + self.mminion.returners[fstr](clear_load[u'jid'], clear_load, minions) except KeyError: log.critical( - 'The specified returner used for the master job cache ' - '"{0}" does not have a save_load function!'.format( - self.opts['master_job_cache'] - ) + u'The specified returner used for the master job cache ' + u'"%s" does not have a save_load function!', + self.opts[u'master_job_cache'] ) except Exception: log.critical( - 'The specified returner threw a stack trace:\n', + u'The specified returner threw a stack trace:\n', exc_info=True ) # Set up the payload - payload = {'enc': 'aes'} + payload = {u'enc': u'aes'} # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the @@ -2123,59 +2093,57 @@ class ClearFuncs(object): # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. load = { - 'fun': clear_load['fun'], - 'arg': clear_load['arg'], - 'tgt': clear_load['tgt'], - 'jid': clear_load['jid'], - 'ret': clear_load['ret'], + u'fun': clear_load[u'fun'], + u'arg': clear_load[u'arg'], + u'tgt': clear_load[u'tgt'], + u'jid': clear_load[u'jid'], + u'ret': clear_load[u'ret'], } # if you specified a master id, lets put that in the load - if 'master_id' in self.opts: - load['master_id'] = self.opts['master_id'] + if u'master_id' in self.opts: + load[u'master_id'] = self.opts[u'master_id'] # if someone passed us one, use that - if 'master_id' in extra: - load['master_id'] = extra['master_id'] + if u'master_id' in extra: + load[u'master_id'] = extra[u'master_id'] # Only add the delimiter to the pub data if it is non-default if delimiter != DEFAULT_TARGET_DELIM: - load['delimiter'] = delimiter + load[u'delimiter'] = delimiter - if 'id' in extra: - load['id'] = extra['id'] - if 'tgt_type' in clear_load: - load['tgt_type'] = clear_load['tgt_type'] - if 'to' in clear_load: - load['to'] = clear_load['to'] + if u'id' in extra: + load[u'id'] = extra[u'id'] + if u'tgt_type' in clear_load: + load[u'tgt_type'] = clear_load[u'tgt_type'] + if u'to' in clear_load: + load[u'to'] = clear_load[u'to'] - if 'kwargs' in clear_load: - if 'ret_config' in clear_load['kwargs']: - load['ret_config'] = clear_load['kwargs'].get('ret_config') + if u'kwargs' in clear_load: + if u'ret_config' in clear_load[u'kwargs']: + load[u'ret_config'] = clear_load[u'kwargs'].get(u'ret_config') - if 'metadata' in clear_load['kwargs']: - load['metadata'] = clear_load['kwargs'].get('metadata') + if u'metadata' in clear_load[u'kwargs']: + load[u'metadata'] = clear_load[u'kwargs'].get(u'metadata') - if 'module_executors' in clear_load['kwargs']: - load['module_executors'] = clear_load['kwargs'].get('module_executors') + if u'module_executors' in clear_load[u'kwargs']: + load[u'module_executors'] = clear_load[u'kwargs'].get(u'module_executors') - if 'executor_opts' in clear_load['kwargs']: - load['executor_opts'] = clear_load['kwargs'].get('executor_opts') + if u'executor_opts' in clear_load[u'kwargs']: + load[u'executor_opts'] = clear_load[u'kwargs'].get(u'executor_opts') - if 'ret_kwargs' in clear_load['kwargs']: - load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs') + if u'ret_kwargs' in clear_load[u'kwargs']: + load[u'ret_kwargs'] = clear_load[u'kwargs'].get(u'ret_kwargs') - if 'user' in clear_load: + if u'user' in clear_load: log.info( - 'User {user} Published command {fun} with jid {jid}'.format( - **clear_load - ) + u'User %s Published command %s with jid %s', + clear_load[u'user'], clear_load[u'fun'], clear_load[u'jid'] ) - load['user'] = clear_load['user'] + load[u'user'] = clear_load[u'user'] else: log.info( - 'Published command {fun} with jid {jid}'.format( - **clear_load - ) + u'Published command %s with jid %s', + clear_load[u'fun'], clear_load[u'jid'] ) - log.debug('Published command details {0}'.format(load)) + log.debug(u'Published command details %s', load) return load def ping(self, clear_load): @@ -2207,15 +2175,15 @@ class FloMWorker(MWorker): self.aes_funcs = salt.master.AESFuncs(self.opts) self.context = zmq.Context(1) self.socket = self.context.socket(zmq.REP) - if self.opts.get('ipc_mode', '') == 'tcp': - self.w_uri = 'tcp://127.0.0.1:{0}'.format( - self.opts.get('tcp_master_workers', 4515) + if self.opts.get(u'ipc_mode', u'') == u'tcp': + self.w_uri = u'tcp://127.0.0.1:{0}'.format( + self.opts.get(u'tcp_master_workers', 4515) ) else: - self.w_uri = 'ipc://{0}'.format( - os.path.join(self.opts['sock_dir'], 'workers.ipc') + self.w_uri = u'ipc://{0}'.format( + os.path.join(self.opts[u'sock_dir'], u'workers.ipc') ) - log.info('ZMQ Worker binding to socket {0}'.format(self.w_uri)) + log.info(u'ZMQ Worker binding to socket %s', self.w_uri) self.poller = zmq.Poller() self.poller.register(self.socket, zmq.POLLIN) self.socket.connect(self.w_uri) diff --git a/salt/minion.py b/salt/minion.py index 309d6e926b..5713a0edb6 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -24,7 +24,7 @@ import salt.serializers.msgpack # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six if six.PY3: import ipaddress else: @@ -38,7 +38,7 @@ try: # TODO: cleanup import zmq.eventloop.ioloop # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x - if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): + if not hasattr(zmq.eventloop.ioloop, u'ZMQIOLoop'): zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop HAS_ZMQ = True @@ -83,19 +83,20 @@ import salt.loader import salt.beacons import salt.engines import salt.payload +import salt.pillar import salt.syspaths import salt.utils +import salt.utils.args import salt.utils.context +import salt.utils.error +import salt.utils.event import salt.utils.files import salt.utils.jid -import salt.pillar -import salt.utils.args -import salt.utils.event -import salt.utils.network import salt.utils.minion import salt.utils.minions +import salt.utils.network +import salt.utils.platform import salt.utils.schedule -import salt.utils.error import salt.utils.zeromq import salt.defaults.exitcodes import salt.cli.daemons @@ -141,64 +142,66 @@ def resolve_dns(opts, fallback=True): ''' ret = {} check_dns = True - if (opts.get('file_client', 'remote') == 'local' and - not opts.get('use_master_when_local', False)): + if (opts.get(u'file_client', u'remote') == u'local' and + not opts.get(u'use_master_when_local', False)): check_dns = False if check_dns is True: # Because I import salt.log below I need to re-import salt.utils here import salt.utils try: - if opts['master'] == '': + if opts[u'master'] == u'': raise SaltSystemExit - ret['master_ip'] = \ - salt.utils.dns_check(opts['master'], int(opts['master_port']), True, opts['ipv6']) + ret[u'master_ip'] = \ + salt.utils.dns_check(opts[u'master'], int(opts[u'master_port']), True, opts[u'ipv6']) except SaltClientError: - if opts['retry_dns']: + if opts[u'retry_dns']: while True: import salt.log - msg = ('Master hostname: \'{0}\' not found or not responsive. ' - 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) + msg = (u'Master hostname: \'{0}\' not found or not responsive. ' + u'Retrying in {1} seconds').format(opts[u'master'], opts[u'retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: - print('WARNING: {0}'.format(msg)) - time.sleep(opts['retry_dns']) + print(u'WARNING: {0}'.format(msg)) + time.sleep(opts[u'retry_dns']) try: - ret['master_ip'] = salt.utils.dns_check( - opts['master'], int(opts['master_port']), True, opts['ipv6'] + ret[u'master_ip'] = salt.utils.dns_check( + opts[u'master'], int(opts[u'master_port']), True, opts[u'ipv6'] ) break except SaltClientError: pass else: if fallback: - ret['master_ip'] = '127.0.0.1' + ret[u'master_ip'] = u'127.0.0.1' else: raise except SaltSystemExit: - unknown_str = 'unknown address' - master = opts.get('master', unknown_str) - if master == '': + unknown_str = u'unknown address' + master = opts.get(u'master', unknown_str) + if master == u'': master = unknown_str - if opts.get('__role') == 'syndic': - err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ - 'Set \'syndic_master\' value in minion config.'.format(master) + if opts.get(u'__role') == u'syndic': + err = u'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ + u'Set \'syndic_master\' value in minion config.'.format(master) else: - err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ - 'Set \'master\' value in minion config.'.format(master) + err = u'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ + u'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: - ret['master_ip'] = '127.0.0.1' + ret[u'master_ip'] = u'127.0.0.1' - if 'master_ip' in ret and 'master_ip' in opts: - if ret['master_ip'] != opts['master_ip']: - log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'], - ret['master_ip']) + if u'master_ip' in ret and u'master_ip' in opts: + if ret[u'master_ip'] != opts[u'master_ip']: + log.warning( + u'Master ip address changed from %s to %s', + opts[u'master_ip'], ret[u'master_ip'] ) - ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'], - port=opts['master_port']) + ret[u'master_uri'] = u'tcp://{ip}:{port}'.format( + ip=ret[u'master_ip'], port=opts[u'master_port']) + return ret @@ -207,23 +210,23 @@ def prep_ip_port(opts): # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. - if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']): - ret['master'] = opts['master'] + if opts[u'master_uri_format'] == u'ip_only' or salt.utils.network.is_ipv6(opts[u'master']): + ret[u'master'] = opts[u'master'] else: - ip_port = opts['master'].rsplit(":", 1) + ip_port = opts[u'master'].rsplit(u':', 1) if len(ip_port) == 1: # e.g. master: mysaltmaster - ret['master'] = ip_port[0] + ret[u'master'] = ip_port[0] else: # e.g. master: localhost:1234 # e.g. master: 127.0.0.1:1234 # e.g. master: [::1]:1234 # Strip off brackets for ipv6 support - ret['master'] = ip_port[0].strip('[]') + ret[u'master'] = ip_port[0].strip(u'[]') # Cast port back to an int! Otherwise a TypeError is thrown # on some of the socket calls elsewhere in the minion and utils code. - ret['master_port'] = int(ip_port[1]) + ret[u'master_port'] = int(ip_port[1]) return ret @@ -243,13 +246,13 @@ def get_proc_dir(cachedir, **kwargs): made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' - fn_ = os.path.join(cachedir, 'proc') - mode = kwargs.pop('mode', None) + fn_ = os.path.join(cachedir, u'proc') + mode = kwargs.pop(u'mode', None) if mode is None: mode = {} else: - mode = {'mode': mode} + mode = {u'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings @@ -261,13 +264,13 @@ def get_proc_dir(cachedir, **kwargs): # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) - if mode_part != mode['mode']: - os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) + if mode_part != mode[u'mode']: + os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode[u'mode']) - if hasattr(os, 'chown'): + if hasattr(os, u'chown'): # only on unix/unix like systems - uid = kwargs.pop('uid', -1) - gid = kwargs.pop('gid', -1) + uid = kwargs.pop(u'uid', -1) + gid = kwargs.pop(u'gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all @@ -289,7 +292,7 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): invalid_kwargs = [] for arg in args: - if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: + if isinstance(arg, dict) and arg.pop(u'__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: @@ -300,30 +303,32 @@ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. - invalid_kwargs.append('{0}={1}'.format(key, val)) + invalid_kwargs.append(u'{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: - log.critical( - 'String kwarg(s) %s passed to ' - 'salt.minion.load_args_and_kwargs(). This is no longer ' - 'supported, so the kwarg(s) will be ignored. Arguments ' - 'passed to salt.minion.load_args_and_kwargs() should be ' - 'passed to salt.utils.args.parse_input() first to load ' - 'and condition them properly.', string_kwarg - ) + if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: + # Function supports **kwargs or is a positional argument to + # the function. + _kwargs.update(string_kwarg) + else: + # **kwargs not in argspec and parsed argument name not in + # list of positional arguments. This keyword argument is + # invalid. + for key, val in six.iteritems(string_kwarg): + invalid_kwargs.append(u'{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: - salt.utils.invalid_kwargs(invalid_kwargs) + salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): - _kwargs['__pub_{0}'.format(key)] = val + _kwargs[u'__pub_{0}'.format(key)] = val return _args, _kwargs @@ -333,40 +338,40 @@ def eval_master_func(opts): Evaluate master function if master type is 'func' and save it result in opts['master'] ''' - if '__master_func_evaluated' not in opts: + if u'__master_func_evaluated' not in opts: # split module and function and try loading the module - mod_fun = opts['master'] - mod, fun = mod_fun.split('.') + mod_fun = opts[u'master'] + mod, fun = mod_fun.split(u'.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address - opts['master'] = master_mod[mod_fun]() + opts[u'master'] = master_mod[mod_fun]() # Check for valid types - if not isinstance(opts['master'], (str, list)): + if not isinstance(opts[u'master'], (six.string_types, list)): raise TypeError - opts['__master_func_evaluated'] = True + opts[u'__master_func_evaluated'] = True except KeyError: - log.error('Failed to load module {0}'.format(mod_fun)) + log.error(u'Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: - log.error('{0} returned from {1} is not a string'.format(opts['master'], mod_fun)) + log.error(u'%s returned from %s is not a string', opts[u'master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) - log.info('Evaluated master from module: {0}'.format(mod_fun)) + log.info(u'Evaluated master from module: %s', mod_fun) def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' - event_map = {'connected': '__master_connected', - 'disconnected': '__master_disconnected', - 'failback': '__master_failback', - 'alive': '__master_alive'} + event_map = {u'connected': u'__master_connected', + u'disconnected': u'__master_disconnected', + u'failback': u'__master_failback', + u'alive': u'__master_alive'} - if type == 'alive' and master is not None: - return '{0}_{1}'.format(event_map.get(type), master) + if type == u'alive' and master is not None: + return u'{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None) @@ -378,22 +383,20 @@ class MinionBase(object): @staticmethod def process_schedule(minion, loop_interval): try: - if hasattr(minion, 'schedule'): + if hasattr(minion, u'schedule'): minion.schedule.eval() else: - log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') + log.error(u'Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( - 'Overriding loop_interval because of scheduled jobs.' + u'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: - log.error( - 'Exception {0} occurred in scheduled job'.format(exc) - ) + log.error(u'Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): @@ -401,10 +404,10 @@ class MinionBase(object): Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' - if 'config.merge' in functions: - b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) + if u'config.merge' in functions: + b_conf = functions[u'config.merge'](u'beacons', self.opts[u'beacons'], omit_opts=True) if b_conf: - return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member + return self.beacons.process(b_conf, self.opts[u'grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine @@ -430,38 +433,40 @@ class MinionBase(object): (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master - if opts['master_type'] == 'disable': - log.warning('Master is set to disable, skipping connection') + if opts[u'master_type'] == u'disable': + log.warning(u'Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # check if master_type was altered from its default - elif opts['master_type'] != 'str' and opts['__role'] != 'syndic': + elif opts[u'master_type'] != u'str' and opts[u'__role'] != u'syndic': # check for a valid keyword - if opts['master_type'] == 'func': + if opts[u'master_type'] == u'func': eval_master_func(opts) # if failover is set, master has to be of type list - elif opts['master_type'] == 'failover': - if isinstance(opts['master'], list): - log.info('Got list of available master addresses:' - ' {0}'.format(opts['master'])) - if opts['master_shuffle']: - if opts['master_failback']: - secondary_masters = opts['master'][1:] + elif opts[u'master_type'] == u'failover': + if isinstance(opts[u'master'], list): + log.info( + u'Got list of available master addresses: %s', + opts[u'master'] + ) + if opts[u'master_shuffle']: + if opts[u'master_failback']: + secondary_masters = opts[u'master'][1:] shuffle(secondary_masters) - opts['master'][1:] = secondary_masters + opts[u'master'][1:] = secondary_masters else: - shuffle(opts['master']) - opts['auth_tries'] = 0 - if opts['master_failback'] and opts['master_failback_interval'] == 0: - opts['master_failback_interval'] = opts['master_alive_interval'] + shuffle(opts[u'master']) + opts[u'auth_tries'] = 0 + if opts[u'master_failback'] and opts[u'master_failback_interval'] == 0: + opts[u'master_failback_interval'] = opts[u'master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] - elif isinstance(opts['master'], str) and ('master_list' not in opts): + elif isinstance(opts[u'master'], six.string_types) and (u'master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details - opts['master'] = [opts['master']] - elif opts['__role'] == 'syndic': - log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master'])) + opts[u'master'] = [opts[u'master']] + elif opts[u'__role'] == u'syndic': + log.info(u'Syndic setting master_syndic to \'%s\'', opts[u'master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop @@ -470,90 +475,94 @@ class MinionBase(object): elif failed: if failback: # failback list of masters to original config - opts['master'] = opts['master_list'] + opts[u'master'] = opts[u'master_list'] else: - log.info('Moving possibly failed master {0} to the end of' - ' the list of masters'.format(opts['master'])) - if opts['master'] in opts['local_masters']: + log.info( + u'Moving possibly failed master %s to the end of ' + u'the list of masters', opts[u'master'] + ) + if opts[u'master'] in opts[u'local_masters']: # create new list of master with the possibly failed # one moved to the end - failed_master = opts['master'] - opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] - opts['master'].append(failed_master) + failed_master = opts[u'master'] + opts[u'master'] = [x for x in opts[u'local_masters'] if opts[u'master'] != x] + opts[u'master'].append(failed_master) else: - opts['master'] = opts['master_list'] + opts[u'master'] = opts[u'master_list'] else: - msg = ('master_type set to \'failover\' but \'master\' ' - 'is not of type list but of type ' - '{0}'.format(type(opts['master']))) + msg = (u'master_type set to \'failover\' but \'master\' ' + u'is not of type list but of type ' + u'{0}'.format(type(opts[u'master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details - if opts['retry_dns']: - msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' - 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') + if opts[u'retry_dns']: + msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' + u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) - opts['retry_dns'] = 0 + opts[u'retry_dns'] = 0 else: - msg = ('Invalid keyword \'{0}\' for variable ' - '\'master_type\''.format(opts['master_type'])) + msg = (u'Invalid keyword \'{0}\' for variable ' + u'\'master_type\''.format(opts[u'master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) - factory_kwargs = {'timeout': timeout, 'safe': safe} - if getattr(self, 'io_loop', None): - factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member + factory_kwargs = {u'timeout': timeout, u'safe': safe} + if getattr(self, u'io_loop', None): + factory_kwargs[u'io_loop'] = self.io_loop # pylint: disable=no-member - tries = opts.get('master_tries', 1) + tries = opts.get(u'master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect - if isinstance(opts['master'], list): + if isinstance(opts[u'master'], list): conn = False # shuffle the masters and then loop through them - opts['local_masters'] = copy.copy(opts['master']) - if opts['random_master']: - shuffle(opts['local_masters']) + opts[u'local_masters'] = copy.copy(opts[u'master']) + if opts[u'random_master']: + shuffle(opts[u'local_masters']) last_exc = None - opts['master_uri_list'] = list() + opts[u'master_uri_list'] = list() # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. - for master in opts['local_masters']: - opts['master'] = master + for master in opts[u'local_masters']: + opts[u'master'] = master opts.update(prep_ip_port(opts)) - opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) + opts[u'master_uri_list'].append(resolve_dns(opts)[u'master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. - yield tornado.gen.sleep(opts['acceptance_wait_time']) + yield tornado.gen.sleep(opts[u'acceptance_wait_time']) attempts += 1 if tries > 0: - log.debug('Connecting to master. Attempt {0} ' - 'of {1}'.format(attempts, tries) + log.debug( + u'Connecting to master. Attempt %s of %s', + attempts, tries ) else: - log.debug('Connecting to master. Attempt {0} ' - '(infinite attempts)'.format(attempts) + log.debug( + u'Connecting to master. Attempt %s (infinite attempts)', + attempts ) - for master in opts['local_masters']: - opts['master'] = master + for master in opts[u'local_masters']: + opts[u'master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed - if 'master_list' not in opts: - opts['master_list'] = copy.copy(opts['local_masters']) + if u'master_list' not in opts: + opts[u'master_list'] = copy.copy(opts[u'local_masters']) self.opts = opts @@ -564,66 +573,70 @@ class MinionBase(object): break except SaltClientError as exc: last_exc = exc - msg = ('Master {0} could not be reached, trying ' - 'next master (if any)'.format(opts['master'])) - log.info(msg) + log.info( + u'Master %s could not be reached, trying next ' + u'next master (if any)', opts[u'master'] + ) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False - self.opts['master'] = copy.copy(self.opts['local_masters']) - msg = ('No master could be reached or all masters ' - 'denied the minions connection attempt.') - log.error(msg) + self.opts[u'master'] = copy.copy(self.opts[u'local_masters']) + log.error( + u'No master could be reached or all masters ' + u'denied the minion\'s connection attempt.' + ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: - self.tok = pub_channel.auth.gen_token('salt') + self.tok = pub_channel.auth.gen_token(u'salt') self.connected = True - raise tornado.gen.Return((opts['master'], pub_channel)) + raise tornado.gen.Return((opts[u'master'], pub_channel)) # single master sign in else: - if opts['random_master']: - log.warning('random_master is True but there is only one master specified. Ignoring.') + if opts[u'random_master']: + log.warning(u'random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. - yield tornado.gen.sleep(opts['acceptance_wait_time']) + yield tornado.gen.sleep(opts[u'acceptance_wait_time']) attempts += 1 if tries > 0: - log.debug('Connecting to master. Attempt {0} ' - 'of {1}'.format(attempts, tries) + log.debug( + u'Connecting to master. Attempt %s of %s', + attempts, tries ) else: - log.debug('Connecting to master. Attempt {0} ' - '(infinite attempts)'.format(attempts) + log.debug( + u'Connecting to master. Attempt %s (infinite attempts)', + attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: - if self.opts['transport'] == 'detect': - self.opts['detect_mode'] = True - for trans in ('zeromq', 'tcp'): - if trans == 'zeromq' and not HAS_ZMQ: + if self.opts[u'transport'] == u'detect': + self.opts[u'detect_mode'] = True + for trans in (u'zeromq', u'tcp'): + if trans == u'zeromq' and not HAS_ZMQ: continue - self.opts['transport'] = trans + self.opts[u'transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue - del self.opts['detect_mode'] + del self.opts[u'detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() - self.tok = pub_channel.auth.gen_token('salt') + self.tok = pub_channel.auth.gen_token(u'salt') self.connected = True - raise tornado.gen.Return((opts['master'], pub_channel)) + raise tornado.gen.Return((opts[u'master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. @@ -640,13 +653,13 @@ class SMinion(MinionBase): ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module - opts['grains'] = salt.loader.grains(opts) + opts[u'grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # Clean out the proc directory (default /var/cache/salt/minion/proc) - if (self.opts.get('file_client', 'remote') == 'remote' - or self.opts.get('use_master_when_local', False)): - if self.opts['transport'] == 'zeromq' and HAS_ZMQ: + if (self.opts.get(u'file_client', u'remote') == u'remote' + or self.opts.get(u'use_master_when_local', False)): + if self.opts[u'transport'] == u'zeromq' and HAS_ZMQ: io_loop = zmq.eventloop.ioloop.ZMQIOLoop() else: io_loop = LOOP_CLASS.current() @@ -656,19 +669,19 @@ class SMinion(MinionBase): self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion - if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): + if self.opts[u'file_client'] == u'remote' and self.opts.get(u'minion_pillar_cache', False): import yaml from salt.utils.yamldumper import SafeOrderedDumper - pdir = os.path.join(self.opts['cachedir'], 'pillar') + pdir = os.path.join(self.opts[u'cachedir'], u'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) - ptop = os.path.join(pdir, 'top.sls') - if self.opts['environment'] is not None: - penv = self.opts['environment'] + ptop = os.path.join(pdir, u'top.sls') + if self.opts[u'environment'] is not None: + penv = self.opts[u'environment'] else: - penv = 'base' - cache_top = {penv: {self.opts['id']: ['cache']}} - with salt.utils.files.fopen(ptop, 'wb') as fp_: + penv = u'base' + cache_top = {penv: {self.opts[u'id']: [u'cache']}} + with salt.utils.files.fopen(ptop, u'wb') as fp_: fp_.write( yaml.dump( cache_top, @@ -676,11 +689,11 @@ class SMinion(MinionBase): ) ) os.chmod(ptop, 0o600) - cache_sls = os.path.join(pdir, 'cache.sls') - with salt.utils.files.fopen(cache_sls, 'wb') as fp_: + cache_sls = os.path.join(pdir, u'cache.sls') + with salt.utils.files.fopen(cache_sls, u'wb') as fp_: fp_.write( yaml.dump( - self.opts['pillar'], + self.opts[u'pillar'], Dumper=SafeOrderedDumper ) ) @@ -696,12 +709,12 @@ class SMinion(MinionBase): salt '*' sys.reload_modules ''' - self.opts['pillar'] = salt.pillar.get_pillar( + self.opts[u'pillar'] = salt.pillar.get_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - self.opts['environment'], - pillarenv=self.opts.get('pillarenv'), + self.opts[u'grains'], + self.opts[u'id'], + self.opts[u'environment'], + pillarenv=self.opts.get(u'pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) @@ -717,7 +730,7 @@ class SMinion(MinionBase): self.serializers) self.rend = salt.loader.render(self.opts, self.functions) self.matcher = Matcher(self.opts, self.functions) - self.functions['sys.reload_modules'] = self.gen_modules + self.functions[u'sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) @@ -736,11 +749,11 @@ class MasterMinion(object): matcher=True, whitelist=None, ignore_config_errors=True): - self.opts = salt.config.minion_config(opts['conf_file'], ignore_config_errors=ignore_config_errors) + self.opts = salt.config.minion_config(opts[u'conf_file'], ignore_config_errors=ignore_config_errors) self.opts.update(opts) self.whitelist = whitelist - self.opts['grains'] = salt.loader.grains(opts) - self.opts['pillar'] = {} + self.opts[u'grains'] = salt.loader.grains(opts) + self.opts[u'pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend @@ -775,7 +788,7 @@ class MasterMinion(object): self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matcher = Matcher(self.opts, self.functions) - self.functions['sys.reload_modules'] = self.gen_modules + self.functions[u'sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): @@ -786,15 +799,15 @@ class MinionManager(MinionBase): ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) - self.auth_wait = self.opts['acceptance_wait_time'] - self.max_auth_wait = self.opts['acceptance_wait_time_max'] + self.auth_wait = self.opts[u'acceptance_wait_time'] + self.max_auth_wait = self.opts[u'acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] if HAS_ZMQ: zmq.eventloop.ioloop.install() self.io_loop = LOOP_CLASS.current() - self.process_manager = ProcessManager(name='MultiMinionProcessManager') + self.process_manager = ProcessManager(name=u'MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, async=True) def __del__(self): @@ -806,8 +819,8 @@ class MinionManager(MinionBase): self.opts, io_loop=self.io_loop, ) - self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) - self.event.subscribe('') + self.event = salt.utils.event.get_event(u'minion', opts=self.opts, io_loop=self.io_loop) + self.event.subscribe(u'') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine @@ -831,19 +844,19 @@ class MinionManager(MinionBase): ''' Spawn all the coroutines which will sign in to masters ''' - masters = self.opts['master'] - if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list): + masters = self.opts[u'master'] + if self.opts[u'master_type'] == u'failover' or not isinstance(self.opts[u'master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) - s_opts['master'] = master - s_opts['multimaster'] = True + s_opts[u'master'] = master + s_opts[u'multimaster'] = True minion = self._create_minion_object(s_opts, - s_opts['auth_timeout'], + s_opts[u'auth_timeout'], False, io_loop=self.io_loop, - loaded_base_name='salt.loader.{0}'.format(s_opts['master']), + loaded_base_name=u'salt.loader.{0}'.format(s_opts[u'master']), jid_queue=self.jid_queue, ) self.minions.append(minion) @@ -855,7 +868,7 @@ class MinionManager(MinionBase): Create a minion, and asynchronously connect it to a master ''' last = 0 # never have we signed in - auth_wait = minion.opts['acceptance_wait_time'] + auth_wait = minion.opts[u'acceptance_wait_time'] failed = False while True: try: @@ -864,14 +877,20 @@ class MinionManager(MinionBase): break except SaltClientError as exc: failed = True - log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master'])) + log.error( + u'Error while bringing up minion for multi-master. Is ' + u'master at %s responding?', minion.opts[u'master'] + ) last = time.time() if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait yield tornado.gen.sleep(auth_wait) # TODO: log? except Exception as e: failed = True - log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True) + log.critical( + u'Unexpected error while connecting to %s', + minion.opts[u'master'], exc_info=True + ) # Multi Master Tune In def tune_in(self): @@ -949,36 +968,38 @@ class Minion(MinionBase): # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( - [int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member + [int(x) for x in zmq.zmq_version().split(u'.')] # pylint: disable=no-member ) if zmq_version_info < (3, 2): log.warning( - 'You have a version of ZMQ less than ZMQ 3.2! There are ' - 'known connection keep-alive issues with ZMQ < 3.2 which ' - 'may result in loss of contact with minions. Please ' - 'upgrade your ZMQ!' + u'You have a version of ZMQ less than ZMQ 3.2! There are ' + u'known connection keep-alive issues with ZMQ < 3.2 which ' + u'may result in loss of contact with minions. Please ' + u'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init - if not salt.utils.is_proxy(): - self.opts['grains'] = salt.loader.grains(opts) + if not salt.utils.platform.is_proxy(): + self.opts[u'grains'] = salt.loader.grains(opts) - log.info('Creating minion process manager') + log.info(u'Creating minion process manager') - if self.opts['random_startup_delay']: - sleep_time = random.randint(0, self.opts['random_startup_delay']) - log.info('Minion sleeping for {0} seconds due to configured ' - 'startup_delay between 0 and {1} seconds'.format(sleep_time, - self.opts['random_startup_delay'])) + if self.opts[u'random_startup_delay']: + sleep_time = random.randint(0, self.opts[u'random_startup_delay']) + log.info( + u'Minion sleeping for %s seconds due to configured ' + u'startup_delay between 0 and %s seconds', + sleep_time, self.opts[u'random_startup_delay'] + ) time.sleep(sleep_time) - self.process_manager = ProcessManager(name='MinionProcessManager') + self.process_manager = ProcessManager(name=u'MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, async=True) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ - if not salt.utils.is_proxy(): + if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) @@ -1006,7 +1027,7 @@ class Minion(MinionBase): Block until we are connected to a master ''' self._sync_connect_master_success = False - log.debug("sync_connect_master") + log.debug(u"sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True @@ -1029,7 +1050,7 @@ class Minion(MinionBase): # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: - raise SaltDaemonNotRunning('Failed to connect to the salt-master') + raise SaltDaemonNotRunning(u'Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): @@ -1056,15 +1077,15 @@ class Minion(MinionBase): functions. ''' if self.connected: - self.opts['master'] = master + self.opts[u'master'] = master # Initialize pillar before loader to make pillar accessible in modules - self.opts['pillar'] = yield salt.pillar.get_async_pillar( + self.opts[u'pillar'] = yield salt.pillar.get_async_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - self.opts['environment'], - pillarenv=self.opts.get('pillarenv') + self.opts[u'grains'], + self.opts[u'id'], + self.opts[u'environment'], + pillarenv=self.opts.get(u'pillarenv') ).compile_pillar() self.functions, self.returners, self.function_errors, self.executors = self._load_modules() @@ -1072,68 +1093,68 @@ class Minion(MinionBase): self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(self.opts, self.functions) - uid = salt.utils.get_uid(user=self.opts.get('user', None)) - self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) + uid = salt.utils.get_uid(user=self.opts.get(u'user', None)) + self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, - cleanup=[master_event(type='alive')]) + cleanup=[master_event(type=u'alive')]) # add default scheduling jobs to the minions scheduler - if self.opts['mine_enabled'] and 'mine.update' in self.functions: + if self.opts[u'mine_enabled'] and u'mine.update' in self.functions: self.schedule.add_job({ - '__mine_interval': + u'__mine_interval': { - 'function': 'mine.update', - 'minutes': self.opts['mine_interval'], - 'jid_include': True, - 'maxrunning': 2, - 'return_job': self.opts.get('mine_return_job', False) + u'function': u'mine.update', + u'minutes': self.opts[u'mine_interval'], + u'jid_include': True, + u'maxrunning': 2, + u'return_job': self.opts.get(u'mine_return_job', False) } }, persist=True) - log.info('Added mine.update to scheduler') + log.info(u'Added mine.update to scheduler') else: - self.schedule.delete_job('__mine_interval', persist=True) + self.schedule.delete_job(u'__mine_interval', persist=True) # add master_alive job if enabled - if (self.opts['transport'] != 'tcp' and - self.opts['master_alive_interval'] > 0 and + if (self.opts[u'transport'] != u'tcp' and + self.opts[u'master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ - master_event(type='alive', master=self.opts['master']): + master_event(type=u'alive', master=self.opts[u'master']): { - 'function': 'status.master', - 'seconds': self.opts['master_alive_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master'], - 'connected': True} + u'function': u'status.master', + u'seconds': self.opts[u'master_alive_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master'], + u'connected': True} } }, persist=True) - if self.opts['master_failback'] and \ - 'master_list' in self.opts and \ - self.opts['master'] != self.opts['master_list'][0]: + if self.opts[u'master_failback'] and \ + u'master_list' in self.opts and \ + self.opts[u'master'] != self.opts[u'master_list'][0]: self.schedule.add_job({ - master_event(type='failback'): + master_event(type=u'failback'): { - 'function': 'status.ping_master', - 'seconds': self.opts['master_failback_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master_list'][0]} + u'function': u'status.ping_master', + u'seconds': self.opts[u'master_failback_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master_list'][0]} } }, persist=True) else: - self.schedule.delete_job(master_event(type='failback'), persist=True) + self.schedule.delete_job(master_event(type=u'failback'), persist=True) else: - self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) - self.schedule.delete_job(master_event(type='failback'), persist=True) + self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True) + self.schedule.delete_job(master_event(type=u'failback'), persist=True) - self.grains_cache = self.opts['grains'] + self.grains_cache = self.opts[u'grains'] self.ready = True def _return_retry_timer(self): @@ -1141,26 +1162,28 @@ class Minion(MinionBase): Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' - msg = 'Minion return retry timer set to {0} seconds' - if self.opts.get('return_retry_timer_max'): + msg = u'Minion return retry timer set to {0} seconds' + # future lint: disable=str-format-in-logging + if self.opts.get(u'return_retry_timer_max'): try: - random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) - log.debug(msg.format(random_retry) + ' (randomized)') + random_retry = randint(self.opts[u'return_retry_timer'], self.opts[u'return_retry_timer_max']) + log.debug(msg.format(random_retry) + u' (randomized)') return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( - 'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})' - 'both must be a positive integers'.format( - self.opts['return_retry_timer'], - self.opts['return_retry_timer_max'], - ) + u'Invalid value (return_retry_timer: %s or ' + u'return_retry_timer_max: %s). Both must be positive ' + u'integers.', + self.opts[u'return_retry_timer'], + self.opts[u'return_retry_timer_max'], ) - log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer'])) - return DEFAULT_MINION_OPTS['return_retry_timer'] + log.debug(msg.format(DEFAULT_MINION_OPTS[u'return_retry_timer'])) + return DEFAULT_MINION_OPTS[u'return_retry_timer'] else: - log.debug(msg.format(self.opts.get('return_retry_timer'))) - return self.opts.get('return_retry_timer') + log.debug(msg.format(self.opts.get(u'return_retry_timer'))) + return self.opts.get(u'return_retry_timer') + # future lint: enable=str-format-in-logging def _prep_mod_opts(self): ''' @@ -1168,7 +1191,7 @@ class Minion(MinionBase): ''' mod_opts = {} for key, val in six.iteritems(self.opts): - if key == 'logger': + if key == u'logger': continue mod_opts[key] = val return mod_opts @@ -1182,30 +1205,33 @@ class Minion(MinionBase): # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False - if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: - log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory'])) + if self.opts.get(u'modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: + log.debug( + u'modules_max_memory set, enforcing a maximum of %s', + self.opts[u'modules_max_memory'] + ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info() - mem_limit = rss + vms + self.opts['modules_max_memory'] + mem_limit = rss + vms + self.opts[u'modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) - elif self.opts.get('modules_max_memory', -1) > 0: + elif self.opts.get(u'modules_max_memory', -1) > 0: if not HAS_PSUTIL: - log.error('Unable to enforce modules_max_memory because psutil is missing') + log.error(u'Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: - log.error('Unable to enforce modules_max_memory because resource is missing') + log.error(u'Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion - if hasattr(self, 'proxy'): + if hasattr(self, u'proxy'): proxy = self.proxy else: proxy = None if grains is None: - self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy) + self.opts[u'grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(self.opts, proxy=proxy) - if self.opts.get('multimaster', False): + if self.opts.get(u'multimaster', False): s_opts = copy.deepcopy(self.opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) @@ -1213,9 +1239,9 @@ class Minion(MinionBase): functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(self.opts, functions, proxy=proxy) errors = {} - if '_errors' in functions: - errors = functions['_errors'] - functions.pop('_errors') + if u'_errors' in functions: + errors = functions[u'_errors'] + functions.pop(u'_errors') # we're done, reset the limits! if modules_max_memory is True: @@ -1227,11 +1253,11 @@ class Minion(MinionBase): def _send_req_sync(self, load, timeout): - if self.opts['minion_sign_messages']: - log.trace('Signing event to be published onto the bus.') - minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') + if self.opts[u'minion_sign_messages']: + log.trace(u'Signing event to be published onto the bus.') + minion_privkey_path = os.path.join(self.opts[u'pki_dir'], u'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) - load['sig'] = sig + load[u'sig'] = sig channel = salt.transport.Channel.factory(self.opts) return channel.send(load, timeout=timeout) @@ -1239,49 +1265,51 @@ class Minion(MinionBase): @tornado.gen.coroutine def _send_req_async(self, load, timeout): - if self.opts['minion_sign_messages']: - log.trace('Signing event to be published onto the bus.') - minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') + if self.opts[u'minion_sign_messages']: + log.trace(u'Signing event to be published onto the bus.') + minion_privkey_path = os.path.join(self.opts[u'pki_dir'], u'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) - load['sig'] = sig + load[u'sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) - def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True): + def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' - load = {'id': self.opts['id'], - 'cmd': '_minion_event', - 'pretag': pretag, - 'tok': self.tok} + load = {u'id': self.opts[u'id'], + u'cmd': u'_minion_event', + u'pretag': pretag, + u'tok': self.tok} if events: - load['events'] = events + load[u'events'] = events elif data and tag: - load['data'] = data - load['tag'] = tag + load[u'data'] = data + load[u'tag'] = tag elif not data and tag: - load['data'] = {} - load['tag'] = tag + load[u'data'] = {} + load[u'tag'] = tag else: return - def timeout_handler(*_): - log.info('fire_master failed: master could not be contacted. Request timed out.') - return True - if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: - log.info('fire_master failed: master could not be contacted. Request timed out.') + log.info(u'fire_master failed: master could not be contacted. Request timed out.') return False except Exception: - log.info('fire_master failed: {0}'.format(traceback.format_exc())) + log.info(u'fire_master failed: %s', traceback.format_exc()) return False else: + if timeout_handler is None: + def handle_timeout(*_): + log.info(u'fire_master failed: master could not be contacted. Request timed out.') + return True + timeout_handler = handle_timeout + with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @@ -1291,29 +1319,30 @@ class Minion(MinionBase): Override this method if you wish to handle the decoded data differently. ''' - if 'user' in data: + if u'user' in data: log.info( - 'User {0[user]} Executing command {0[fun]} with jid ' - '{0[jid]}'.format(data) + u'User %s Executing command %s with jid %s', + data[u'user'], data[u'fun'], data[u'jid'] ) else: log.info( - 'Executing command {0[fun]} with jid {0[jid]}'.format(data) + u'Executing command %s with jid %s', + data[u'fun'], data[u'jid'] ) - log.debug('Command details {0}'.format(data)) + log.debug(u'Command details %s', data) # Don't duplicate jobs - log.trace('Started JIDs: {0}'.format(self.jid_queue)) + log.trace(u'Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: - if data['jid'] in self.jid_queue: + if data[u'jid'] in self.jid_queue: return else: - self.jid_queue.append(data['jid']) - if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: + self.jid_queue.append(data[u'jid']) + if len(self.jid_queue) > self.opts[u'minion_jid_queue_hwm']: self.jid_queue.pop(0) - if isinstance(data['fun'], six.string_types): - if data['fun'] == 'sys.reload_modules': + if isinstance(data[u'fun'], six.string_types): + if data[u'fun'] == u'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners @@ -1322,9 +1351,9 @@ class Minion(MinionBase): # python needs to be able to reconstruct the reference on the other # side. instance = self - multiprocessing_enabled = self.opts.get('multiprocessing', True) + multiprocessing_enabled = self.opts.get(u'multiprocessing', True) if multiprocessing_enabled: - if sys.platform.startswith('win'): + if sys.platform.startswith(u'win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None @@ -1336,7 +1365,7 @@ class Minion(MinionBase): process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), - name=data['jid'] + name=data[u'jid'] ) if multiprocessing_enabled: @@ -1348,14 +1377,15 @@ class Minion(MinionBase): process.start() # TODO: remove the windows specific check? - if multiprocessing_enabled and not salt.utils.is_windows(): + if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() else: self.win_proc.append(process) def ctx(self): - '''Return a single context manager for the minion's data + ''' + Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( @@ -1375,24 +1405,24 @@ class Minion(MinionBase): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected - if not hasattr(minion_instance, 'functions'): + if not hasattr(minion_instance, u'functions'): functions, returners, function_errors, executors = ( - minion_instance._load_modules(grains=opts['grains']) + minion_instance._load_modules(grains=opts[u'grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors - if not hasattr(minion_instance, 'serial'): + if not hasattr(minion_instance, u'serial'): minion_instance.serial = salt.payload.Serial(opts) - if not hasattr(minion_instance, 'proc_dir'): - uid = salt.utils.get_uid(user=opts.get('user', None)) + if not hasattr(minion_instance, u'proc_dir'): + uid = salt.utils.get_uid(user=opts.get(u'user', None)) minion_instance.proc_dir = ( - get_proc_dir(opts['cachedir'], uid=uid) + get_proc_dir(opts[u'cachedir'], uid=uid) ) with tornado.stack_context.StackContext(minion_instance.ctx): - if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): + if isinstance(data[u'fun'], tuple) or isinstance(data[u'fun'], list): Minion._thread_multi_return(minion_instance, opts, data) else: Minion._thread_return(minion_instance, opts, data) @@ -1403,9 +1433,9 @@ class Minion(MinionBase): This method should be used as a threading target, start the actual minion side execution. ''' - fn_ = os.path.join(minion_instance.proc_dir, data['jid']) + fn_ = os.path.join(minion_instance.proc_dir, data[u'jid']) - if opts['multiprocessing'] and not salt.utils.is_windows(): + if opts[u'multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() @@ -1414,45 +1444,53 @@ class Minion(MinionBase): # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() - salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) + salt.utils.appendproctitle(u'{0}._thread_return {1}'.format(cls.__name__, data[u'jid'])) - sdata = {'pid': os.getpid()} + sdata = {u'pid': os.getpid()} sdata.update(data) - log.info('Starting a new job with PID {0}'.format(sdata['pid'])) - with salt.utils.files.fopen(fn_, 'w+b') as fp_: + log.info(u'Starting a new job with PID %s', sdata[u'pid']) + with salt.utils.files.fopen(fn_, u'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) - ret = {'success': False} - function_name = data['fun'] + ret = {u'success': False} + function_name = data[u'fun'] if function_name in minion_instance.functions: try: - if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): - # this minion is blacked out. Only allow saltutil.refresh_pillar - if function_name != 'saltutil.refresh_pillar' and \ - function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []): - raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' - 'to False in pillar to resume operations. Only ' - 'saltutil.refresh_pillar allowed in blackout mode.') + minion_blackout_violation = False + if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False): + whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []) + # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist + if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist: + minion_blackout_violation = True + elif minion_instance.opts[u'grains'].get(u'minion_blackout', False): + whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', []) + if function_name != u'saltutil.refresh_pillar' and function_name not in whitelist: + minion_blackout_violation = True + if minion_blackout_violation: + raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' ' + u'to False in pillar or grains to resume operations. Only ' + u'saltutil.refresh_pillar allowed in blackout mode.') + func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, - data['arg'], + data[u'arg'], data) - minion_instance.functions.pack['__context__']['retcode'] = 0 + minion_instance.functions.pack[u'__context__'][u'retcode'] = 0 - executors = data.get('module_executors') or opts.get('module_executors', ['direct_call']) + executors = data.get(u'module_executors') or opts.get(u'module_executors', [u'direct_call']) if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: - raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". + raise SaltInvocationError(u"Wrong executors specification: {0}. String or non-empty list expected". format(executors)) - if opts.get('sudo_user', '') and executors[-1] != 'sudo': - executors[-1] = 'sudo' # replace the last one with sudo - log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member + if opts.get(u'sudo_user', u'') and executors[-1] != u'sudo': + executors[-1] = u'sudo' # replace the last one with sudo + log.trace(u'Executors list %s', executors) # pylint: disable=no-member for name in executors: - fname = '{0}.execute'.format(name) + fname = u'{0}.execute'.format(name) if fname not in minion_instance.executors: - raise SaltInvocationError("Executor '{0}' is not available".format(name)) + raise SaltInvocationError(u"Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break @@ -1467,79 +1505,80 @@ class Minion(MinionBase): if not iret: iret = [] iret.append(single) - tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job') - event_data = {'return': single} + tag = tagify([data[u'jid'], u'prog', opts[u'id'], str(ind)], u'job') + event_data = {u'return': single} minion_instance._fire_master(event_data, tag) ind += 1 - ret['return'] = iret + ret[u'return'] = iret else: - ret['return'] = return_data - ret['retcode'] = minion_instance.functions.pack['__context__'].get( - 'retcode', + ret[u'return'] = return_data + ret[u'retcode'] = minion_instance.functions.pack[u'__context__'].get( + u'retcode', 0 ) - ret['success'] = True + ret[u'success'] = True except CommandNotFoundError as exc: - msg = 'Command required for \'{0}\' not found'.format( + msg = u'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) - ret['return'] = '{0}: {1}'.format(msg, exc) - ret['out'] = 'nested' + ret[u'return'] = u'{0}: {1}'.format(msg, exc) + ret[u'out'] = u'nested' except CommandExecutionError as exc: log.error( - 'A command in \'{0}\' had a problem: {1}'.format( - function_name, - exc - ), + u'A command in \'%s\' had a problem: %s', + function_name, exc, exc_info_on_loglevel=logging.DEBUG ) - ret['return'] = 'ERROR: {0}'.format(exc) - ret['out'] = 'nested' + ret[u'return'] = u'ERROR: {0}'.format(exc) + ret[u'out'] = u'nested' except SaltInvocationError as exc: log.error( - 'Problem executing \'{0}\': {1}'.format( - function_name, - exc - ), + u'Problem executing \'%s\': %s', + function_name, exc, exc_info_on_loglevel=logging.DEBUG ) - ret['return'] = 'ERROR executing \'{0}\': {1}'.format( + ret[u'return'] = u'ERROR executing \'{0}\': {1}'.format( function_name, exc ) - ret['out'] = 'nested' + ret[u'out'] = u'nested' except TypeError as exc: - msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, ) + msg = u'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) - ret['return'] = msg - ret['out'] = 'nested' + ret[u'return'] = msg + ret[u'out'] = u'nested' except Exception: - msg = 'The minion function caused an exception' + msg = u'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) - ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) - ret['out'] = 'nested' + ret[u'return'] = u'{0}: {1}'.format(msg, traceback.format_exc()) + ret[u'out'] = u'nested' else: - ret['return'] = minion_instance.functions.missing_fun_string(function_name) - mod_name = function_name.split('.')[0] - if mod_name in minion_instance.function_errors: - ret['return'] += ' Possible reasons: \'{0}\''.format( - minion_instance.function_errors[mod_name] - ) - ret['success'] = False - ret['retcode'] = 254 - ret['out'] = 'nested' - - ret['jid'] = data['jid'] - ret['fun'] = data['fun'] - ret['fun_args'] = data['arg'] - if 'master_id' in data: - ret['master_id'] = data['master_id'] - if 'metadata' in data: - if isinstance(data['metadata'], dict): - ret['metadata'] = data['metadata'] + docs = minion_instance.functions[u'sys.doc'](u'{0}*'.format(function_name)) + if docs: + docs[function_name] = minion_instance.functions.missing_fun_string(function_name) + ret[u'return'] = docs else: - log.warning('The metadata parameter must be a dictionary. Ignoring.') + ret[u'return'] = minion_instance.functions.missing_fun_string(function_name) + mod_name = function_name.split('.')[0] + if mod_name in minion_instance.function_errors: + ret[u'return'] += u' Possible reasons: \'{0}\''.format( + minion_instance.function_errors[mod_name] + ) + ret[u'success'] = False + ret[u'retcode'] = 254 + ret[u'out'] = u'nested' + + ret[u'jid'] = data[u'jid'] + ret[u'fun'] = data[u'fun'] + ret[u'fun_args'] = data[u'arg'] + if u'master_id' in data: + ret[u'master_id'] = data[u'master_id'] + if u'metadata' in data: + if isinstance(data[u'metadata'], dict): + ret[u'metadata'] = data[u'metadata'] + else: + log.warning(u'The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, @@ -1548,37 +1587,35 @@ class Minion(MinionBase): # Add default returners from minion config # Should have been coverted to comma-delimited string already - if isinstance(opts.get('return'), six.string_types): - if data['ret']: - data['ret'] = ','.join((data['ret'], opts['return'])) + if isinstance(opts.get(u'return'), six.string_types): + if data[u'ret']: + data[u'ret'] = u','.join((data[u'ret'], opts[u'return'])) else: - data['ret'] = opts['return'] + data[u'ret'] = opts[u'return'] - log.debug('minion return: %s', ret) + log.debug(u'minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ - if data['ret'] and isinstance(data['ret'], six.string_types): - if 'ret_config' in data: - ret['ret_config'] = data['ret_config'] - if 'ret_kwargs' in data: - ret['ret_kwargs'] = data['ret_kwargs'] - ret['id'] = opts['id'] - for returner in set(data['ret'].split(',')): + if data[u'ret'] and isinstance(data[u'ret'], six.string_types): + if u'ret_config' in data: + ret[u'ret_config'] = data[u'ret_config'] + if u'ret_kwargs' in data: + ret[u'ret_kwargs'] = data[u'ret_kwargs'] + ret[u'id'] = opts[u'id'] + for returner in set(data[u'ret'].split(u',')): try: - returner_str = '{0}.returner'.format(returner) + returner_str = u'{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) - log.error('Returner {0} could not be loaded: {1}'.format( - returner_str, returner_err)) - except Exception as exc: - log.error( - 'The return failed for job {0} {1}'.format( - data['jid'], - exc + log.error( + u'Returner %s could not be loaded: %s', + returner_str, returner_err ) + except Exception as exc: + log.exception( + u'The return failed for job %s: %s', data[u'jid'], exc ) - log.error(traceback.format_exc()) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): @@ -1586,78 +1623,81 @@ class Minion(MinionBase): This method should be used as a threading target, start the actual minion side execution. ''' - salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) + salt.utils.appendproctitle(u'{0}._thread_multi_return {1}'.format(cls.__name__, data[u'jid'])) ret = { - 'return': {}, - 'retcode': {}, - 'success': {} + u'return': {}, + u'retcode': {}, + u'success': {} } - for ind in range(0, len(data['fun'])): - ret['success'][data['fun'][ind]] = False + for ind in range(0, len(data[u'fun'])): + ret[u'success'][data[u'fun'][ind]] = False try: - if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): - # this minion is blacked out. Only allow saltutil.refresh_pillar - if data['fun'][ind] != 'saltutil.refresh_pillar' and \ - data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []): - raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' - 'to False in pillar to resume operations. Only ' - 'saltutil.refresh_pillar allowed in blackout mode.') - func = minion_instance.functions[data['fun'][ind]] + minion_blackout_violation = False + if minion_instance.connected and minion_instance.opts[u'pillar'].get(u'minion_blackout', False): + whitelist = minion_instance.opts[u'pillar'].get(u'minion_blackout_whitelist', []) + # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist + if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist: + minion_blackout_violation = True + elif minion_instance.opts[u'grains'].get(u'minion_blackout', False): + whitelist = minion_instance.opts[u'grains'].get(u'minion_blackout_whitelist', []) + if data[u'fun'][ind] != u'saltutil.refresh_pillar' and data[u'fun'][ind] not in whitelist: + minion_blackout_violation = True + if minion_blackout_violation: + raise SaltInvocationError(u'Minion in blackout mode. Set \'minion_blackout\' ' + u'to False in pillar or grains to resume operations. Only ' + u'saltutil.refresh_pillar allowed in blackout mode.') + + func = minion_instance.functions[data[u'fun'][ind]] + args, kwargs = load_args_and_kwargs( func, - data['arg'][ind], + data[u'arg'][ind], data) - minion_instance.functions.pack['__context__']['retcode'] = 0 - ret['return'][data['fun'][ind]] = func(*args, **kwargs) - ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get( - 'retcode', + minion_instance.functions.pack[u'__context__'][u'retcode'] = 0 + ret[u'return'][data[u'fun'][ind]] = func(*args, **kwargs) + ret[u'retcode'][data[u'fun'][ind]] = minion_instance.functions.pack[u'__context__'].get( + u'retcode', 0 ) - ret['success'][data['fun'][ind]] = True + ret[u'success'][data[u'fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() - log.warning( - 'The minion function caused an exception: {0}'.format( - exc - ) - ) - ret['return'][data['fun'][ind]] = trb - ret['jid'] = data['jid'] - ret['fun'] = data['fun'] - ret['fun_args'] = data['arg'] - if 'metadata' in data: - ret['metadata'] = data['metadata'] + log.warning(u'The minion function caused an exception: %s', exc) + ret[u'return'][data[u'fun'][ind]] = trb + ret[u'jid'] = data[u'jid'] + ret[u'fun'] = data[u'fun'] + ret[u'fun_args'] = data[u'arg'] + if u'metadata' in data: + ret[u'metadata'] = data[u'metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) - if data['ret']: - if 'ret_config' in data: - ret['ret_config'] = data['ret_config'] - if 'ret_kwargs' in data: - ret['ret_kwargs'] = data['ret_kwargs'] - for returner in set(data['ret'].split(',')): - ret['id'] = opts['id'] + if data[u'ret']: + if u'ret_config' in data: + ret[u'ret_config'] = data[u'ret_config'] + if u'ret_kwargs' in data: + ret[u'ret_kwargs'] = data[u'ret_kwargs'] + for returner in set(data[u'ret'].split(u',')): + ret[u'id'] = opts[u'id'] try: - minion_instance.returners['{0}.returner'.format( + minion_instance.returners[u'{0}.returner'.format( returner )](ret) except Exception as exc: log.error( - 'The return failed for job {0} {1}'.format( - data['jid'], - exc - ) + u'The return failed for job %s: %s', + data[u'jid'], exc ) - def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): + def _return_pub(self, ret, ret_cmd=u'_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' - jid = ret.get('jid', ret.get('__jid__')) - fun = ret.get('fun', ret.get('__fun__')) - if self.opts['multiprocessing']: + jid = ret.get(u'jid', ret.get(u'__jid__')) + fun = ret.get(u'fun', ret.get(u'__fun__')) + if self.opts[u'multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: @@ -1665,35 +1705,37 @@ class Minion(MinionBase): except (OSError, IOError): # The file is gone already pass - log.info('Returning information for job: {0}'.format(jid)) - if ret_cmd == '_syndic_return': - load = {'cmd': ret_cmd, - 'id': self.opts['id'], - 'jid': jid, - 'fun': fun, - 'arg': ret.get('arg'), - 'tgt': ret.get('tgt'), - 'tgt_type': ret.get('tgt_type'), - 'load': ret.get('__load__')} - if '__master_id__' in ret: - load['master_id'] = ret['__master_id__'] - load['return'] = {} + log.info(u'Returning information for job: %s', jid) + if ret_cmd == u'_syndic_return': + load = {u'cmd': ret_cmd, + u'id': self.opts[u'uid'], + u'jid': jid, + u'fun': fun, + u'arg': ret.get(u'arg'), + u'tgt': ret.get(u'tgt'), + u'tgt_type': ret.get(u'tgt_type'), + u'load': ret.get(u'__load__')} + if u'__master_id__' in ret: + load[u'master_id'] = ret[u'__master_id__'] + load[u'return'] = {} for key, value in six.iteritems(ret): - if key.startswith('__'): + if key.startswith(u'__'): continue - load['return'][key] = value + load[u'return'][key] = value else: - load = {'cmd': ret_cmd, - 'id': self.opts['id']} + load = {u'cmd': ret_cmd, + u'id': self.opts[u'id']} for key, value in six.iteritems(ret): load[key] = value - if 'out' in ret: - if isinstance(ret['out'], six.string_types): - load['out'] = ret['out'] + if u'out' in ret: + if isinstance(ret[u'out'], six.string_types): + load[u'out'] = ret[u'out'] else: - log.error('Invalid outputter {0}. This is likely a bug.' - .format(ret['out'])) + log.error( + u'Invalid outputter %s. This is likely a bug.', + ret[u'out'] + ) else: try: oput = self.functions[fun].__outputter__ @@ -1701,20 +1743,21 @@ class Minion(MinionBase): pass else: if isinstance(oput, six.string_types): - load['out'] = oput - if self.opts['cache_jobs']: + load[u'out'] = oput + if self.opts[u'cache_jobs']: # Local job cache has been enabled - salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) + salt.utils.minion.cache_jobs(self.opts, load[u'jid'], ret) - if not self.opts['pub_ret']: - return '' + if not self.opts[u'pub_ret']: + return u'' def timeout_handler(*_): - msg = ('The minion failed to return the job information for job ' - '{0}. This is often due to the master being shut down or ' - 'overloaded. If the master is running consider increasing ' - 'the worker_threads value.').format(jid) - log.warning(msg) + log.warning( + u'The minion failed to return the job information for job %s. ' + u'This is often due to the master being shut down or ' + u'overloaded. If the master is running, consider increasing ' + u'the worker_threads value.', jid + ) return True if sync: @@ -1722,35 +1765,37 @@ class Minion(MinionBase): ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() - return '' + return u'' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg - log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member + log.trace(u'ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' - if self.opts['startup_states']: - if self.opts.get('master_type', 'str') == 'disable' and \ - self.opts.get('file_client', 'remote') == 'remote': - log.warning('Cannot run startup_states when \'master_type\' is ' - 'set to \'disable\' and \'file_client\' is set to ' - '\'remote\'. Skipping.') + if self.opts[u'startup_states']: + if self.opts.get(u'master_type', u'str') == u'disable' and \ + self.opts.get(u'file_client', u'remote') == u'remote': + log.warning( + u'Cannot run startup_states when \'master_type\' is set ' + u'to \'disable\' and \'file_client\' is set to ' + u'\'remote\'. Skipping.' + ) else: - data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} - if self.opts['startup_states'] == 'sls': - data['fun'] = 'state.sls' - data['arg'] = [self.opts['sls_list']] - elif self.opts['startup_states'] == 'top': - data['fun'] = 'state.top' - data['arg'] = [self.opts['top_file']] + data = {u'jid': u'req', u'ret': self.opts.get(u'ext_job_cache', u'')} + if self.opts[u'startup_states'] == u'sls': + data[u'fun'] = u'state.sls' + data[u'arg'] = [self.opts[u'sls_list']] + elif self.opts[u'startup_states'] == u'top': + data[u'fun'] = u'state.top' + data[u'arg'] = [self.opts[u'top_file']] else: - data['fun'] = 'state.highstate' - data['arg'] = [] + data[u'fun'] = u'state.highstate' + data[u'arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): @@ -1759,41 +1804,41 @@ class Minion(MinionBase): :param refresh_interval_in_minutes: :return: None ''' - if '__update_grains' not in self.opts.get('schedule', {}): - if 'schedule' not in self.opts: - self.opts['schedule'] = {} - self.opts['schedule'].update({ - '__update_grains': + if u'__update_grains' not in self.opts.get(u'schedule', {}): + if u'schedule' not in self.opts: + self.opts[u'schedule'] = {} + self.opts[u'schedule'].update({ + u'__update_grains': { - 'function': 'event.fire', - 'args': [{}, 'grains_refresh'], - 'minutes': refresh_interval_in_minutes + u'function': u'event.fire', + u'args': [{}, u'grains_refresh'], + u'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live self._fire_master( - 'Minion {0} started at {1}'.format( - self.opts['id'], + u'Minion {0} started at {1}'.format( + self.opts[u'id'], time.asctime() ), - 'minion_start' + u'minion_start' ) # dup name spaced event self._fire_master( - 'Minion {0} started at {1}'.format( - self.opts['id'], + u'Minion {0} started at {1}'.format( + self.opts[u'id'], time.asctime() ), - tagify([self.opts['id'], 'start'], 'minion'), + tagify([self.opts[u'id'], u'start'], u'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' - log.debug('Refreshing modules. Notify={0}'.format(notify)) + log.debug(u'Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions @@ -1803,7 +1848,7 @@ class Minion(MinionBase): ''' Refresh the functions and returners. ''' - log.debug('Refreshing beacons.') + log.debug(u'Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) # TODO: only allow one future in flight at a time? @@ -1813,77 +1858,77 @@ class Minion(MinionBase): Refresh the pillar ''' if self.connected: - log.debug('Refreshing pillar') + log.debug(u'Refreshing pillar') try: - self.opts['pillar'] = yield salt.pillar.get_async_pillar( + self.opts[u'pillar'] = yield salt.pillar.get_async_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - self.opts['environment'], - pillarenv=self.opts.get('pillarenv'), + self.opts[u'grains'], + self.opts[u'id'], + self.opts[u'environment'], + pillarenv=self.opts.get(u'pillarenv'), ).compile_pillar() except SaltClientError: # Do not exit if a pillar refresh fails. - log.error('Pillar data could not be refreshed. ' - 'One or more masters may be down!') + log.error(u'Pillar data could not be refreshed. ' + u'One or more masters may be down!') self.module_refresh(force_refresh) def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' - func = data.get('func', None) - name = data.get('name', None) - schedule = data.get('schedule', None) - where = data.get('where', None) - persist = data.get('persist', None) + func = data.get(u'func', None) + name = data.get(u'name', None) + schedule = data.get(u'schedule', None) + where = data.get(u'where', None) + persist = data.get(u'persist', None) - if func == 'delete': + if func == u'delete': self.schedule.delete_job(name, persist) - elif func == 'add': + elif func == u'add': self.schedule.add_job(schedule, persist) - elif func == 'modify': + elif func == u'modify': self.schedule.modify_job(name, schedule, persist) - elif func == 'enable': + elif func == u'enable': self.schedule.enable_schedule() - elif func == 'disable': + elif func == u'disable': self.schedule.disable_schedule() - elif func == 'enable_job': + elif func == u'enable_job': self.schedule.enable_job(name, persist) - elif func == 'run_job': + elif func == u'run_job': self.schedule.run_job(name) - elif func == 'disable_job': + elif func == u'disable_job': self.schedule.disable_job(name, persist) - elif func == 'reload': + elif func == u'reload': self.schedule.reload(schedule) - elif func == 'list': + elif func == u'list': self.schedule.list(where) - elif func == 'save_schedule': + elif func == u'save_schedule': self.schedule.save_schedule() def manage_beacons(self, tag, data): ''' Manage Beacons ''' - func = data.get('func', None) - name = data.get('name', None) - beacon_data = data.get('beacon_data', None) + func = data.get(u'func', None) + name = data.get(u'name', None) + beacon_data = data.get(u'beacon_data', None) - if func == 'add': + if func == u'add': self.beacons.add_beacon(name, beacon_data) - elif func == 'modify': + elif func == u'modify': self.beacons.modify_beacon(name, beacon_data) - elif func == 'delete': + elif func == u'delete': self.beacons.delete_beacon(name) - elif func == 'enable': + elif func == u'enable': self.beacons.enable_beacons() - elif func == 'disable': + elif func == u'disable': self.beacons.disable_beacons() - elif func == 'enable_beacon': + elif func == u'enable_beacon': self.beacons.enable_beacon(name) - elif func == 'disable_beacon': + elif func == u'disable_beacon': self.beacons.disable_beacon(name) - elif func == 'list': + elif func == u'list': self.beacons.list_beacons() def environ_setenv(self, tag, data): @@ -1891,11 +1936,11 @@ class Minion(MinionBase): Set the salt-minion main process environment according to the data contained in the minion event data ''' - environ = data.get('environ', None) + environ = data.get(u'environ', None) if environ is None: return False - false_unsets = data.get('false_unsets', False) - clear_all = data.get('clear_all', False) + false_unsets = data.get(u'false_unsets', False) + clear_all = data.get(u'clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) @@ -1908,32 +1953,29 @@ class Minion(MinionBase): self._running = True elif self._running is False: log.error( - 'This {0} was scheduled to stop. Not running ' - '{0}.tune_in()'.format(self.__class__.__name__) + u'This %s was scheduled to stop. Not running %s.tune_in()', + self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( - 'This {0} is already running. Not running ' - '{0}.tune_in()'.format(self.__class__.__name__) + u'This %s is already running. Not running %s.tune_in()', + self.__class__.__name__, self.__class__.__name__ ) return try: log.info( - '{0} is starting as user \'{1}\''.format( - self.__class__.__name__, - salt.utils.get_user() - ) + u'%s is starting as user \'%s\'', + self.__class__.__name__, salt.utils.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( - salt.utils.is_windows() and logging.DEBUG or logging.ERROR, - 'Failed to get the user who is starting {0}'.format( - self.__class__.__name__ - ), + salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, + u'Failed to get the user who is starting %s', + self.__class__.__name__, exc_info=err ) @@ -1942,12 +1984,12 @@ class Minion(MinionBase): Send mine data to the master ''' channel = salt.transport.Channel.factory(self.opts) - data['tok'] = self.tok + data[u'tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: - log.warning('Unable to send mine data to master.') + log.warning(u'Unable to send mine data to master.') return None @tornado.gen.coroutine @@ -1958,79 +2000,83 @@ class Minion(MinionBase): if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) - log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag)) - if tag.startswith('module_refresh'): + log.debug( + u'Minion of \'%s\' is handling event tag \'%s\'', + self.opts[u'master'], tag + ) + if tag.startswith(u'module_refresh'): self.module_refresh( - force_refresh=data.get('force_refresh', False), - notify=data.get('notify', False) + force_refresh=data.get(u'force_refresh', False), + notify=data.get(u'notify', False) ) - elif tag.startswith('pillar_refresh'): + elif tag.startswith(u'pillar_refresh'): yield self.pillar_refresh( - force_refresh=data.get('force_refresh', False) + force_refresh=data.get(u'force_refresh', False) ) - elif tag.startswith('beacons_refresh'): + elif tag.startswith(u'beacons_refresh'): self.beacons_refresh() - elif tag.startswith('manage_schedule'): + elif tag.startswith(u'manage_schedule'): self.manage_schedule(tag, data) - elif tag.startswith('manage_beacons'): + elif tag.startswith(u'manage_beacons'): self.manage_beacons(tag, data) - elif tag.startswith('grains_refresh'): - if (data.get('force_refresh', False) or - self.grains_cache != self.opts['grains']): + elif tag.startswith(u'grains_refresh'): + if (data.get(u'force_refresh', False) or + self.grains_cache != self.opts[u'grains']): self.pillar_refresh(force_refresh=True) - self.grains_cache = self.opts['grains'] - elif tag.startswith('environ_setenv'): + self.grains_cache = self.opts[u'grains'] + elif tag.startswith(u'environ_setenv'): self.environ_setenv(tag, data) - elif tag.startswith('_minion_mine'): + elif tag.startswith(u'_minion_mine'): self._mine_send(tag, data) - elif tag.startswith('fire_master'): - log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) - self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) - elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')): + elif tag.startswith(u'fire_master'): + if self.connected: + log.debug(u'Forwarding master event tag=%s', data[u'tag']) + self._fire_master(data[u'data'], data[u'tag'], data[u'events'], data[u'pretag']) + elif tag.startswith(master_event(type=u'disconnected')) or tag.startswith(master_event(type=u'failback')): # if the master disconnect event is for a different master, raise an exception - if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: + if tag.startswith(master_event(type=u'disconnected')) and data[u'master'] != self.opts[u'master']: # not mine master, ignore return - if tag.startswith(master_event(type='failback')): + if tag.startswith(master_event(type=u'failback')): # if the master failback event is not for the top master, raise an exception - if data['master'] != self.opts['master_list'][0]: - raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( - data['master'], self.opts['master'])) + if data[u'master'] != self.opts[u'master_list'][0]: + raise SaltException(u'Bad master \'{0}\' when mine failback is \'{1}\''.format( + data[u'master'], self.opts[u'master'])) # if the master failback event is for the current master, raise an exception - elif data['master'] == self.opts['master'][0]: - raise SaltException('Already connected to \'{0}\''.format(data['master'])) + elif data[u'master'] == self.opts[u'master'][0]: + raise SaltException(u'Already connected to \'{0}\''.format(data[u'master'])) if self.connected: # we are not connected anymore self.connected = False - log.info('Connection to master {0} lost'.format(self.opts['master'])) + log.info(u'Connection to master %s lost', self.opts[u'master']) - if self.opts['master_type'] != 'failover': + if self.opts[u'master_type'] != u'failover': # modify the scheduled job to fire on reconnect - if self.opts['transport'] != 'tcp': + if self.opts[u'transport'] != u'tcp': schedule = { - 'function': 'status.master', - 'seconds': self.opts['master_alive_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master'], - 'connected': False} + u'function': u'status.master', + u'seconds': self.opts[u'master_alive_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master'], + u'connected': False} } - self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), + self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process - if self.opts['transport'] != 'tcp': - self.schedule.delete_job(name=master_event(type='alive')) + if self.opts[u'transport'] != u'tcp': + self.schedule.delete_job(name=master_event(type=u'alive')) - log.info('Trying to tune in to next master from master-list') + log.info(u'Trying to tune in to next master from master-list') - if hasattr(self, 'pub_channel'): + if hasattr(self, u'pub_channel'): self.pub_channel.on_recv(None) - if hasattr(self.pub_channel, 'auth'): + if hasattr(self.pub_channel, u'auth'): self.pub_channel.auth.invalidate() - if hasattr(self.pub_channel, 'close'): + if hasattr(self.pub_channel, u'close'): self.pub_channel.close() del self.pub_channel @@ -2040,95 +2086,102 @@ class Minion(MinionBase): master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, - failback=tag.startswith(master_event(type='failback'))) + failback=tag.startswith(master_event(type=u'failback'))) except SaltClientError: pass if self.connected: - self.opts['master'] = master + self.opts[u'master'] = master # re-init the subsystems to work with the new master - log.info('Re-initialising subsystems for new ' - 'master {0}'.format(self.opts['master'])) + log.info( + u'Re-initialising subsystems for new master %s', + self.opts[u'master'] + ) # put the current schedule into the new loaders - self.opts['schedule'] = self.schedule.option('schedule') + self.opts[u'schedule'] = self.schedule.option(u'schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() - log.info('Minion is ready to receive requests!') + log.info(u'Minion is ready to receive requests!') # update scheduled job to run with the new master addr - if self.opts['transport'] != 'tcp': + if self.opts[u'transport'] != u'tcp': schedule = { - 'function': 'status.master', - 'seconds': self.opts['master_alive_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master'], - 'connected': True} + u'function': u'status.master', + u'seconds': self.opts[u'master_alive_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master'], + u'connected': True} } - self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), + self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule) - if self.opts['master_failback'] and 'master_list' in self.opts: - if self.opts['master'] != self.opts['master_list'][0]: + if self.opts[u'master_failback'] and u'master_list' in self.opts: + if self.opts[u'master'] != self.opts[u'master_list'][0]: schedule = { - 'function': 'status.ping_master', - 'seconds': self.opts['master_failback_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master_list'][0]} + u'function': u'status.ping_master', + u'seconds': self.opts[u'master_failback_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master_list'][0]} } - self.schedule.modify_job(name=master_event(type='failback'), + self.schedule.modify_job(name=master_event(type=u'failback'), schedule=schedule) else: - self.schedule.delete_job(name=master_event(type='failback'), persist=True) + self.schedule.delete_job(name=master_event(type=u'failback'), persist=True) else: self.restart = True self.io_loop.stop() - elif tag.startswith(master_event(type='connected')): + elif tag.startswith(master_event(type=u'connected')): # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure - if not self.connected and self.opts['master_type'] != 'failover': - log.info('Connection to master {0} re-established'.format(self.opts['master'])) + if not self.connected and self.opts[u'master_type'] != u'failover': + log.info(u'Connection to master %s re-established', self.opts[u'master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again - if self.opts['transport'] != 'tcp': + if self.opts[u'transport'] != u'tcp': schedule = { - 'function': 'status.master', - 'seconds': self.opts['master_alive_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master'], - 'connected': True} + u'function': u'status.master', + u'seconds': self.opts[u'master_alive_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master'], + u'connected': True} } - self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), + self.schedule.modify_job(name=master_event(type=u'alive', master=self.opts[u'master']), schedule=schedule) - elif tag.startswith('__schedule_return'): + elif tag.startswith(u'__schedule_return'): # reporting current connection with master - if data['schedule'].startswith(master_event(type='alive', master='')): - if data['return']: - log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1])) - self._return_pub(data, ret_cmd='_return', sync=False) - elif tag.startswith('_salt_error'): + if data[u'schedule'].startswith(master_event(type=u'alive', master=u'')): + if data[u'return']: + log.debug( + u'Connected to master %s', + data[u'schedule'].split(master_event(type=u'alive', master=u''))[1] + ) + self._return_pub(data, ret_cmd=u'_return', sync=False) + elif tag.startswith(u'_salt_error'): if self.connected: - log.debug('Forwarding salt error event tag={tag}'.format(tag=tag)) + log.debug(u'Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) - elif tag.startswith('salt/auth/creds'): - key = tuple(data['key']) - log.debug('Updating auth data for {0}: {1} -> {2}'.format( - key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'])) - salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] + elif tag.startswith(u'salt/auth/creds'): + key = tuple(data[u'key']) + log.debug( + u'Updating auth data for %s: %s -> %s', + key, salt.crypt.AsyncAuth.creds_map.get(key), data[u'creds'] + ) + salt.crypt.AsyncAuth.creds_map[tuple(data[u'key'])] = data[u'creds'] def _fallback_cleanups(self): ''' @@ -2138,7 +2191,7 @@ class Minion(MinionBase): multiprocessing.active_children() # Cleanup Windows threads - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): @@ -2157,13 +2210,13 @@ class Minion(MinionBase): ''' self._pre_tune() - log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id'])) + log.debug(u'Minion \'%s\' trying to tune in', self.opts[u'id']) if start: self.sync_connect_master() if self.connected: self._fire_master_minion_start() - log.info('Minion is ready to receive requests!') + log.info(u'Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() @@ -2174,48 +2227,45 @@ class Minion(MinionBase): # On first startup execute a state run if configured to do so self._state_run() - loop_interval = self.opts['loop_interval'] + loop_interval = self.opts[u'loop_interval'] try: - if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! - if self.opts['grains_refresh_every'] > 1: - log.debug( - 'Enabling the grains refresher. Will run every {0} minutes.'.format( - self.opts['grains_refresh_every']) - ) - else: # Clean up minute vs. minutes in log message - log.debug( - 'Enabling the grains refresher. Will run every {0} minute.'.format( - self.opts['grains_refresh_every']) - - ) + if self.opts[u'grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! + log.debug( + u'Enabling the grains refresher. Will run every %s ' + u'minute%s.', + self.opts[u'grains_refresh_every'], + u's' if self.opts[u'grains_refresh_every'] > 1 else u'' + ) self._refresh_grains_watcher( - abs(self.opts['grains_refresh_every']) + abs(self.opts[u'grains_refresh_every']) ) except Exception as exc: log.error( - 'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format( - exc) + u'Exception occurred in attempt to initialize grain refresh ' + u'routine during minion tune-in: %s', exc ) self.periodic_callbacks = {} # schedule the stuff that runs every interval - ping_interval = self.opts.get('ping_interval', 0) * 60 + ping_interval = self.opts.get(u'ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: - if not self._fire_master('ping', 'minion_ping'): - if not self.opts.get('auth_safemode', True): - log.error('** Master Ping failed. Attempting to restart minion**') - delay = self.opts.get('random_reauth_delay', 5) - log.info('delaying random_reauth_delay {0}s'.format(delay)) + def ping_timeout_handler(*_): + if not self.opts.get(u'auth_safemode', True): + log.error(u'** Master Ping failed. Attempting to restart minion**') + delay = self.opts.get(u'random_reauth_delay', 5) + log.info(u'delaying random_reauth_delay %ss', delay) # regular sys.exit raises an exception -- which isn't sufficient in a thread os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE) - except Exception: - log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) - self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) - self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop) + self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) + except Exception: + log.warning(u'Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) + self.periodic_callbacks[u'ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) + + self.periodic_callbacks[u'cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop) def handle_beacons(): # Process Beacons @@ -2223,27 +2273,27 @@ class Minion(MinionBase): try: beacons = self.process_beacons(self.functions) except Exception: - log.critical('The beacon errored: ', exc_info=True) + log.critical(u'The beacon errored: ', exc_info=True) if beacons and self.connected: - self._fire_master(events=beacons) + self._fire_master(events=beacons, sync=False) - self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop) + self.periodic_callbacks[u'beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) - if hasattr(self, 'schedule'): - self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop) + if hasattr(self, u'schedule'): + self.periodic_callbacks[u'schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop) # start all the other callbacks for periodic_cb in six.itervalues(self.periodic_callbacks): periodic_cb.start() # add handler to subscriber - if hasattr(self, 'pub_channel') and self.pub_channel is not None: + if hasattr(self, u'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) - elif self.opts.get('master_type') != 'disable': - log.error('No connection to master found. Scheduled jobs will not run.') + elif self.opts.get(u'master_type') != u'disable': + log.error(u'No connection to master found. Scheduled jobs will not run.') if start: try: @@ -2254,20 +2304,23 @@ class Minion(MinionBase): self.destroy() def _handle_payload(self, payload): - if payload is not None and payload['enc'] == 'aes': - if self._target_load(payload['load']): - self._handle_decoded_payload(payload['load']) - elif self.opts['zmq_filtering']: + if payload is not None and payload[u'enc'] == u'aes': + if self._target_load(payload[u'load']): + self._handle_decoded_payload(payload[u'load']) + elif self.opts[u'zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt - log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load'])) + log.trace( + u'Broadcast message received not for this minion, Load: %s', + payload[u'load'] + ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid - if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ - or 'arg' not in load: + if u'tgt' not in load or u'jid' not in load or u'fun' not in load \ + or u'arg' not in load: return False # Verify that the publication applies to this minion @@ -2277,19 +2330,19 @@ class Minion(MinionBase): # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. - if 'tgt_type' in load: + if u'tgt_type' in load: match_func = getattr(self.matcher, - '{0}_match'.format(load['tgt_type']), None) + u'{0}_match'.format(load[u'tgt_type']), None) if match_func is None: return False - if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): - delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) - if not match_func(load['tgt'], delimiter=delimiter): + if load[u'tgt_type'] in (u'grain', u'grain_pcre', u'pillar'): + delimiter = load.get(u'delimiter', DEFAULT_TARGET_DELIM) + if not match_func(load[u'tgt'], delimiter=delimiter): return False - elif not match_func(load['tgt']): + elif not match_func(load[u'tgt']): return False else: - if not self.matcher.glob_match(load['tgt']): + if not self.matcher.glob_match(load[u'tgt']): return False return True @@ -2299,14 +2352,14 @@ class Minion(MinionBase): Tear down the minion ''' self._running = False - if hasattr(self, 'schedule'): + if hasattr(self, u'schedule'): del self.schedule - if hasattr(self, 'pub_channel') and self.pub_channel is not None: + if hasattr(self, u'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) - if hasattr(self.pub_channel, 'close'): + if hasattr(self.pub_channel, u'close'): self.pub_channel.close() del self.pub_channel - if hasattr(self, 'periodic_callbacks'): + if hasattr(self, u'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() @@ -2320,11 +2373,11 @@ class Syndic(Minion): master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): - self._syndic_interface = opts.get('interface') + self._syndic_interface = opts.get(u'interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart - opts['auth_safemode'] = True - opts['loop_interval'] = 1 + opts[u'auth_safemode'] = True + opts[u'loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() @@ -2338,9 +2391,9 @@ class Syndic(Minion): differently. ''' # TODO: even do this?? - data['to'] = int(data.get('to', self.opts['timeout'])) - 1 + data[u'to'] = int(data.get(u'to', self.opts[u'timeout'])) - 1 # Only forward the command if it didn't originate from ourselves - if data.get('master_id', 0) != self.opts.get('master_id', 1): + if data.get(u'master_id', 0) != self.opts.get(u'master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): @@ -2348,29 +2401,29 @@ class Syndic(Minion): Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type - if 'tgt_type' not in data: - data['tgt_type'] = 'glob' + if u'tgt_type' not in data: + data[u'tgt_type'] = u'glob' kwargs = {} # optionally add a few fields to the publish data - for field in ('master_id', # which master the job came from - 'user', # which user ran the job + for field in (u'master_id', # which master the job came from + u'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): - log.warning('Unable to forward pub data: {0}'.format(args[1])) + log.warning(u'Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): - self.local.pub_async(data['tgt'], - data['fun'], - data['arg'], - data['tgt_type'], - data['ret'], - data['jid'], - data['to'], + self.local.pub_async(data[u'tgt'], + data[u'fun'], + data[u'arg'], + data[u'tgt_type'], + data[u'ret'], + data[u'jid'], + data[u'to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) @@ -2378,19 +2431,19 @@ class Syndic(Minion): def fire_master_syndic_start(self): # Send an event to the master that the minion is live self._fire_master( - 'Syndic {0} started at {1}'.format( - self.opts['id'], + u'Syndic {0} started at {1}'.format( + self.opts[u'id'], time.asctime() ), - 'syndic_start', + u'syndic_start', sync=False, ) self._fire_master( - 'Syndic {0} started at {1}'.format( - self.opts['id'], + u'Syndic {0} started at {1}'.format( + self.opts[u'id'], time.asctime() ), - tagify([self.opts['id'], 'start'], 'syndic'), + tagify([self.opts[u'id'], u'start'], u'syndic'), sync=False, ) @@ -2403,15 +2456,15 @@ class Syndic(Minion): ''' # Instantiate the local client self.local = salt.client.get_local_client( - self.opts['_minion_conf_file'], io_loop=self.io_loop) + self.opts[u'_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): - if payload is not None and payload['enc'] == 'aes': - log.trace('Handling payload') - self._handle_decoded_payload(payload['load']) + if payload is not None and payload[u'enc'] == u'aes': + log.trace(u'Handling payload') + self._handle_decoded_payload(payload[u'load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @@ -2420,15 +2473,15 @@ class Syndic(Minion): def _return_pub_multi(self, values): for value in values: yield self._return_pub(value, - '_syndic_return', + u'_syndic_return', timeout=self._return_retry_timer(), sync=False) @tornado.gen.coroutine def reconnect(self): - if hasattr(self, 'pub_channel'): + if hasattr(self, u'pub_channel'): self.pub_channel.on_recv(None) - if hasattr(self.pub_channel, 'close'): + if hasattr(self.pub_channel, u'close'): self.pub_channel.close() del self.pub_channel @@ -2437,9 +2490,9 @@ class Syndic(Minion): master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: - self.opts['master'] = master + self.opts[u'master'] = master self.pub_channel.on_recv(self._process_cmd_socket) - log.info('Minion is ready to receive requests!') + log.info(u'Minion is ready to receive requests!') raise tornado.gen.Return(self) @@ -2450,10 +2503,10 @@ class Syndic(Minion): # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() - if hasattr(self, 'local'): + if hasattr(self, u'local'): del self.local - if hasattr(self, 'forward_events'): + if hasattr(self, u'forward_events'): self.forward_events.stop() @@ -2482,15 +2535,15 @@ class SyndicManager(MinionBase): SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): - opts['loop_interval'] = 1 + opts[u'loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) - self.syndic_mode = self.opts.get('syndic_mode', 'sync') - self.syndic_failover = self.opts.get('syndic_failover', 'random') + self.syndic_mode = self.opts.get(u'syndic_mode', u'sync') + self.syndic_failover = self.opts.get(u'syndic_failover', u'random') - self.auth_wait = self.opts['acceptance_wait_time'] - self.max_auth_wait = self.opts['acceptance_wait_time_max'] + self.auth_wait = self.opts[u'acceptance_wait_time'] + self.max_auth_wait = self.opts[u'acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() @@ -2517,12 +2570,12 @@ class SyndicManager(MinionBase): Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic - masters = self.opts['master'] + masters = self.opts[u'master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) - s_opts['master'] = master + s_opts[u'master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine @@ -2531,10 +2584,13 @@ class SyndicManager(MinionBase): Create a syndic, and asynchronously connect it to a master ''' last = 0 # never have we signed in - auth_wait = opts['acceptance_wait_time'] + auth_wait = opts[u'acceptance_wait_time'] failed = False while True: - log.debug('Syndic attempting to connect to {0}'.format(opts['master'])) + log.debug( + u'Syndic attempting to connect to %s', + opts[u'master'] + ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, @@ -2548,11 +2604,17 @@ class SyndicManager(MinionBase): # Send an event to the master that the minion is live syndic.fire_master_syndic_start() - log.info('Syndic successfully connected to {0}'.format(opts['master'])) + log.info( + u'Syndic successfully connected to %s', + opts[u'master'] + ) break except SaltClientError as exc: failed = True - log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master'])) + log.error( + u'Error while bringing up syndic for multi-syndic. Is the ' + u'master at %s responding?', opts[u'master'] + ) last = time.time() if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait @@ -2561,7 +2623,10 @@ class SyndicManager(MinionBase): raise except: # pylint: disable=W0702 failed = True - log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True) + log.critical( + u'Unexpected error while connecting to %s', + opts[u'master'], exc_info=True + ) raise tornado.gen.Return(syndic) @@ -2574,7 +2639,11 @@ class SyndicManager(MinionBase): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: - log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug? + # TODO: debug? + log.info( + u'Attempting to mark %s as dead, although it is already ' + u'marked dead', master + ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' @@ -2584,26 +2653,35 @@ class SyndicManager(MinionBase): kwargs = {} for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): - log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master)) + log.error( + u'Unable to call %s on %s, that syndic is not connected', + func, master + ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) return except SaltClientError: - log.error('Unable to call {0} on {1}, trying another...'.format(func, master)) + log.error( + u'Unable to call %s on %s, trying another...', + func, master + ) self._mark_master_dead(master) continue - log.critical('Unable to call {0} on any masters!'.format(func)) + log.critical(u'Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' - func = '_return_pub_multi' + func = u'_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): - log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master)) + log.error( + u'Unable to call %s on %s, that syndic is not connected', + func, master + ) continue future, data = self.pub_futures.get(master, (None, None)) @@ -2617,7 +2695,10 @@ class SyndicManager(MinionBase): continue elif future.exception(): # Previous execution on this master returned an error - log.error('Unable to call {0} on {1}, trying another...'.format(func, master)) + log.error( + u'Unable to call %s on %s, trying another...', + func, master + ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master @@ -2634,7 +2715,7 @@ class SyndicManager(MinionBase): Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) - if self.opts['syndic_failover'] == 'random': + if self.opts[u'syndic_failover'] == u'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) @@ -2663,10 +2744,10 @@ class SyndicManager(MinionBase): self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( - self.opts['_minion_conf_file'], io_loop=self.io_loop) - self.local.event.subscribe('') + self.opts[u'_minion_conf_file'], io_loop=self.io_loop) + self.local.event.subscribe(u'') - log.debug('SyndicManager \'{0}\' trying to tune in'.format(self.opts['id'])) + log.debug(u'SyndicManager \'%s\' trying to tune in', self.opts[u'id']) # register the event sub to the poller self.job_rets = {} @@ -2677,7 +2758,7 @@ class SyndicManager(MinionBase): # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, - self.opts['syndic_event_forward_timeout'] * 1000, + self.opts[u'syndic_event_forward_timeout'] * 1000, io_loop=self.io_loop) self.forward_events.start() @@ -2689,65 +2770,65 @@ class SyndicManager(MinionBase): def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) - log.trace('Got event {0}'.format(mtag)) # pylint: disable=no-member + log.trace(u'Got event %s', mtag) # pylint: disable=no-member - tag_parts = mtag.split('/') - if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ - salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ - 'return' in data: - if 'jid' not in data: + tag_parts = mtag.split(u'/') + if len(tag_parts) >= 4 and tag_parts[1] == u'job' and \ + salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == u'ret' and \ + u'return' in data: + if u'jid' not in data: # Not a job return return - if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): - log.debug('Return received with matching master_id, not forwarding') + if self.syndic_mode == u'cluster' and data.get(u'master_id', 0) == self.opts.get(u'master_id', 1): + log.debug(u'Return received with matching master_id, not forwarding') return - master = data.get('master_id') + master = data.get(u'master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: - jdict['__fun__'] = data.get('fun') - jdict['__jid__'] = data['jid'] - jdict['__load__'] = {} - fstr = '{0}.get_load'.format(self.opts['master_job_cache']) + jdict[u'__fun__'] = data.get(u'fun') + jdict[u'__jid__'] = data[u'jid'] + jdict[u'__load__'] = {} + fstr = u'{0}.get_load'.format(self.opts[u'master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! - if data['jid'] not in self.jid_forward_cache: - jdict['__load__'].update( - self.mminion.returners[fstr](data['jid']) + if data[u'jid'] not in self.jid_forward_cache: + jdict[u'__load__'].update( + self.mminion.returners[fstr](data[u'jid']) ) - self.jid_forward_cache.add(data['jid']) - if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: + self.jid_forward_cache.add(data[u'jid']) + if len(self.jid_forward_cache) > self.opts[u'syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli - jdict['__master_id__'] = master + jdict[u'__master_id__'] = master ret = {} - for key in 'return', 'retcode', 'success': + for key in u'return', u'retcode', u'success': if key in data: ret[key] = data[key] - jdict[data['id']] = ret + jdict[data[u'id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events - if self.syndic_mode == 'sync': + if self.syndic_mode == u'sync': # Add generic event aggregation here - if 'retcode' not in data: - self.raw_events.append({'data': data, 'tag': mtag}) + if u'retcode' not in data: + self.raw_events.append({u'data': data, u'tag': mtag}) def _forward_events(self): - log.trace('Forwarding events') # pylint: disable=no-member + log.trace(u'Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] - self._call_syndic('_fire_master', - kwargs={'events': events, - 'pretag': tagify(self.opts['id'], base='syndic'), - 'timeout': self.SYNDIC_EVENT_TIMEOUT, - 'sync': False, + self._call_syndic(u'_fire_master', + kwargs={u'events': events, + u'pretag': tagify(self.opts[u'id'], base=u'syndic'), + u'timeout': self.SYNDIC_EVENT_TIMEOUT, + u'sync': False, }, ) if self.delayed: @@ -2774,24 +2855,22 @@ class Matcher(object): Takes the data passed to a top file environment and determines if the data matches this minion ''' - matcher = 'compound' + matcher = u'compound' if not data: - log.error('Received bad data when setting the match from the top ' - 'file') + log.error(u'Received bad data when setting the match from the top ' + u'file') return False for item in data: if isinstance(item, dict): - if 'match' in item: - matcher = item['match'] - if hasattr(self, matcher + '_match'): - funcname = '{0}_match'.format(matcher) - if matcher == 'nodegroup': + if u'match' in item: + matcher = item[u'match'] + if hasattr(self, matcher + u'_match'): + funcname = u'{0}_match'.format(matcher) + if matcher == u'nodegroup': return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: - log.error('Attempting to match with unknown matcher: {0}'.format( - matcher - )) + log.error(u'Attempting to match with unknown matcher: %s', matcher) return False def glob_match(self, tgt): @@ -2801,45 +2880,45 @@ class Matcher(object): if not isinstance(tgt, six.string_types): return False - return fnmatch.fnmatch(self.opts['id'], tgt) + return fnmatch.fnmatch(self.opts[u'id'], tgt) def pcre_match(self, tgt): ''' Returns true if the passed pcre regex matches ''' - return bool(re.match(tgt, self.opts['id'])) + return bool(re.match(tgt, self.opts[u'id'])) def list_match(self, tgt): ''' Determines if this host is on the list ''' if isinstance(tgt, six.string_types): - tgt = tgt.split(',') - return bool(self.opts['id'] in tgt) + tgt = tgt.split(u',') + return bool(self.opts[u'id'] in tgt) def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the grains glob match ''' - log.debug('grains target: {0}'.format(tgt)) + log.debug(u'grains target: %s', tgt) if delimiter not in tgt: - log.error('Got insufficient arguments for grains match ' - 'statement from master') + log.error(u'Got insufficient arguments for grains match ' + u'statement from master') return False return salt.utils.subdict_match( - self.opts['grains'], tgt, delimiter=delimiter + self.opts[u'grains'], tgt, delimiter=delimiter ) def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Matches a grain based on regex ''' - log.debug('grains pcre target: {0}'.format(tgt)) + log.debug(u'grains pcre target: %s', tgt) if delimiter not in tgt: - log.error('Got insufficient arguments for grains pcre match ' - 'statement from master') + log.error(u'Got insufficient arguments for grains pcre match ' + u'statement from master') return False - return salt.utils.subdict_match(self.opts['grains'], tgt, + return salt.utils.subdict_match(self.opts[u'grains'], tgt, delimiter=delimiter, regex_match=True) def data_match(self, tgt): @@ -2849,10 +2928,10 @@ class Matcher(object): if self.functions is None: utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=utils) - comps = tgt.split(':') + comps = tgt.split(u':') if len(comps) < 2: return False - val = self.functions['data.getval'](comps[0]) + val = self.functions[u'data.getval'](comps[0]) if val is None: # The value is not defined return False @@ -2875,38 +2954,38 @@ class Matcher(object): ''' Reads in the pillar glob match ''' - log.debug('pillar target: {0}'.format(tgt)) + log.debug(u'pillar target: %s', tgt) if delimiter not in tgt: - log.error('Got insufficient arguments for pillar match ' - 'statement from master') + log.error(u'Got insufficient arguments for pillar match ' + u'statement from master') return False return salt.utils.subdict_match( - self.opts['pillar'], tgt, delimiter=delimiter + self.opts[u'pillar'], tgt, delimiter=delimiter ) def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar pcre match ''' - log.debug('pillar PCRE target: {0}'.format(tgt)) + log.debug(u'pillar PCRE target: %s', tgt) if delimiter not in tgt: - log.error('Got insufficient arguments for pillar PCRE match ' - 'statement from master') + log.error(u'Got insufficient arguments for pillar PCRE match ' + u'statement from master') return False return salt.utils.subdict_match( - self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True + self.opts[u'pillar'], tgt, delimiter=delimiter, regex_match=True ) - def pillar_exact_match(self, tgt, delimiter=':'): + def pillar_exact_match(self, tgt, delimiter=u':'): ''' Reads in the pillar match, no globbing, no PCRE ''' - log.debug('pillar target: {0}'.format(tgt)) + log.debug(u'pillar target: %s', tgt) if delimiter not in tgt: - log.error('Got insufficient arguments for pillar match ' - 'statement from master') + log.error(u'Got insufficient arguments for pillar match ' + u'statement from master') return False - return salt.utils.subdict_match(self.opts['pillar'], + return salt.utils.subdict_match(self.opts[u'pillar'], tgt, delimiter=delimiter, exact_match=True) @@ -2923,11 +3002,11 @@ class Matcher(object): # Target is a network? tgt = ipaddress.ip_network(tgt) except: # pylint: disable=bare-except - log.error('Invalid IP/CIDR target: {0}'.format(tgt)) + log.error(u'Invalid IP/CIDR target: %s', tgt) return [] - proto = 'ipv{0}'.format(tgt.version) + proto = u'ipv{0}'.format(tgt.version) - grains = self.opts['grains'] + grains = self.opts[u'grains'] if proto not in grains: match = False @@ -2943,11 +3022,11 @@ class Matcher(object): Matches based on range cluster ''' if HAS_RANGE: - range_ = seco.range.Range(self.opts['range_server']) + range_ = seco.range.Range(self.opts[u'range_server']) try: - return self.opts['grains']['fqdn'] in range_.expand(tgt) + return self.opts[u'grains'][u'fqdn'] in range_.expand(tgt) except seco.range.RangeException as exc: - log.debug('Range exception in compound match: {0}'.format(exc)) + log.debug(u'Range exception in compound match: %s', exc) return False return False @@ -2956,22 +3035,22 @@ class Matcher(object): Runs the compound target check ''' if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): - log.error('Compound target received that is neither string, list nor tuple') + log.error(u'Compound target received that is neither string, list nor tuple') return False - log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt)) - ref = {'G': 'grain', - 'P': 'grain_pcre', - 'I': 'pillar', - 'J': 'pillar_pcre', - 'L': 'list', - 'N': None, # Nodegroups should already be expanded - 'S': 'ipcidr', - 'E': 'pcre'} + log.debug(u'compound_match: %s ? %s', self.opts[u'id'], tgt) + ref = {u'G': u'grain', + u'P': u'grain_pcre', + u'I': u'pillar', + u'J': u'pillar_pcre', + u'L': u'list', + u'N': None, # Nodegroups should already be expanded + u'S': u'ipcidr', + u'E': u'pcre'} if HAS_RANGE: - ref['R'] = 'range' + ref[u'R'] = u'range' results = [] - opers = ['and', 'or', 'not', '(', ')'] + opers = [u'and', u'or', u'not', u'(', u')'] if isinstance(tgt, six.string_types): words = tgt.split() @@ -2984,55 +3063,55 @@ class Matcher(object): # Easy check first if word in opers: if results: - if results[-1] == '(' and word in ('and', 'or'): - log.error('Invalid beginning operator after "(": {0}'.format(word)) + if results[-1] == u'(' and word in (u'and', u'or'): + log.error(u'Invalid beginning operator after "(": %s', word) return False - if word == 'not': - if not results[-1] in ('and', 'or', '('): - results.append('and') + if word == u'not': + if not results[-1] in (u'and', u'or', u'('): + results.append(u'and') results.append(word) else: # seq start with binary oper, fail - if word not in ['(', 'not']: - log.error('Invalid beginning operator: {0}'.format(word)) + if word not in [u'(', u'not']: + log.error(u'Invalid beginning operator: %s', word) return False results.append(word) - elif target_info and target_info['engine']: - if 'N' == target_info['engine']: + elif target_info and target_info[u'engine']: + if u'N' == target_info[u'engine']: # Nodegroups should already be expanded/resolved to other engines - log.error('Detected nodegroup expansion failure of "{0}"'.format(word)) + log.error( + u'Detected nodegroup expansion failure of "%s"', word) return False - engine = ref.get(target_info['engine']) + engine = ref.get(target_info[u'engine']) if not engine: # If an unknown engine is called at any time, fail out - log.error('Unrecognized target engine "{0}" for' - ' target expression "{1}"'.format( - target_info['engine'], - word, - ) - ) + log.error( + u'Unrecognized target engine "%s" for target ' + u'expression "%s"', target_info[u'engine'], word + ) return False - engine_args = [target_info['pattern']] + engine_args = [target_info[u'pattern']] engine_kwargs = {} - if target_info['delimiter']: - engine_kwargs['delimiter'] = target_info['delimiter'] + if target_info[u'delimiter']: + engine_kwargs[u'delimiter'] = target_info[u'delimiter'] results.append( - str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs)) + str(getattr(self, u'{0}_match'.format(engine))(*engine_args, **engine_kwargs)) ) else: # The match is not explicitly defined, evaluate it as a glob results.append(str(self.glob_match(word))) - results = ' '.join(results) - log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results)) + results = u' '.join(results) + log.debug(u'compound_match %s ? "%s" => "%s"', self.opts[u'id'], tgt, results) try: return eval(results) # pylint: disable=W0123 except Exception: - log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results)) + log.error( + u'Invalid compound target: %s for results: %s', tgt, results) return False return False @@ -3069,7 +3148,7 @@ class ProxyMinionManager(MinionManager): class ProxyMinion(Minion): ''' - This class instantiates a 'proxy' minion--a minion that does not manipulate + This class instantiates a u'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' @@ -3089,30 +3168,30 @@ class ProxyMinion(Minion): which is why the differences are not factored out into separate helper functions. ''' - log.debug("subclassed _post_master_init") + log.debug(u"subclassed _post_master_init") if self.connected: - self.opts['master'] = master + self.opts[u'master'] = master - self.opts['pillar'] = yield salt.pillar.get_async_pillar( + self.opts[u'pillar'] = yield salt.pillar.get_async_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - saltenv=self.opts['environment'], - pillarenv=self.opts.get('pillarenv'), + self.opts[u'grains'], + self.opts[u'id'], + saltenv=self.opts[u'environment'], + pillarenv=self.opts.get(u'pillarenv'), ).compile_pillar() - if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: - errmsg = 'No proxy key found in pillar or opts for id '+self.opts['id']+'. '+\ - 'Check your pillar/opts configuration and contents. Salt-proxy aborted.' + if u'proxy' not in self.opts[u'pillar'] and u'proxy' not in self.opts: + errmsg = u'No proxy key found in pillar or opts for id ' + self.opts[u'id'] + u'. ' + \ + u'Check your pillar/opts configuration and contents. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=-1, msg=errmsg) - if 'proxy' not in self.opts: - self.opts['proxy'] = self.opts['pillar']['proxy'] + if u'proxy' not in self.opts: + self.opts[u'proxy'] = self.opts[u'pillar'][u'proxy'] - fq_proxyname = self.opts['proxy']['proxytype'] + fq_proxyname = self.opts[u'proxy'][u'proxytype'] # Need to load the modules so they get all the dunder variables self.functions, self.returners, self.function_errors, self.executors = self._load_modules() @@ -3120,7 +3199,7 @@ class ProxyMinion(Minion): # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. - self.functions['saltutil.sync_all'](saltenv=self.opts['environment']) + self.functions[u'saltutil.sync_all'](saltenv=self.opts[u'environment']) # Pull in the utils self.utils = salt.loader.utils(self.opts) @@ -3130,14 +3209,14 @@ class ProxyMinion(Minion): # And re-load the modules so the __proxy__ variable gets injected self.functions, self.returners, self.function_errors, self.executors = self._load_modules() - self.functions.pack['__proxy__'] = self.proxy - self.proxy.pack['__salt__'] = self.functions - self.proxy.pack['__ret__'] = self.returners - self.proxy.pack['__pillar__'] = self.opts['pillar'] + self.functions.pack[u'__proxy__'] = self.proxy + self.proxy.pack[u'__salt__'] = self.functions + self.proxy.pack[u'__ret__'] = self.returners + self.proxy.pack[u'__pillar__'] = self.opts[u'pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) - self.proxy.pack['__utils__'] = self.utils + self.proxy.pack[u'__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() @@ -3148,119 +3227,119 @@ class ProxyMinion(Minion): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy) - if ('{0}.init'.format(fq_proxyname) not in self.proxy - or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): - errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname)+\ - 'Check your proxymodule. Salt-proxy aborted.' + if (u'{0}.init'.format(fq_proxyname) not in self.proxy + or u'{0}.shutdown'.format(fq_proxyname) not in self.proxy): + errmsg = u'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ + u'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=-1, msg=errmsg) - proxy_init_fn = self.proxy[fq_proxyname+'.init'] + proxy_init_fn = self.proxy[fq_proxyname + u'.init'] proxy_init_fn(self.opts) - self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) + self.opts[u'grains'] = salt.loader.grains(self.opts, proxy=self.proxy) self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(self.opts, self.functions) - uid = salt.utils.get_uid(user=self.opts.get('user', None)) - self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) + uid = salt.utils.get_uid(user=self.opts.get(u'user', None)) + self.proc_dir = get_proc_dir(self.opts[u'cachedir'], uid=uid) - if self.connected and self.opts['pillar']: + if self.connected and self.opts[u'pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() - if hasattr(self, 'schedule'): + if hasattr(self, u'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners - if not hasattr(self, 'schedule'): + if not hasattr(self, u'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, - cleanup=[master_event(type='alive')], + cleanup=[master_event(type=u'alive')], proxy=self.proxy) # add default scheduling jobs to the minions scheduler - if self.opts['mine_enabled'] and 'mine.update' in self.functions: + if self.opts[u'mine_enabled'] and u'mine.update' in self.functions: self.schedule.add_job({ - '__mine_interval': + u'__mine_interval': { - 'function': 'mine.update', - 'minutes': self.opts['mine_interval'], - 'jid_include': True, - 'maxrunning': 2, - 'return_job': self.opts.get('mine_return_job', False) + u'function': u'mine.update', + u'minutes': self.opts[u'mine_interval'], + u'jid_include': True, + u'maxrunning': 2, + u'return_job': self.opts.get(u'mine_return_job', False) } }, persist=True) - log.info('Added mine.update to scheduler') + log.info(u'Added mine.update to scheduler') else: - self.schedule.delete_job('__mine_interval', persist=True) + self.schedule.delete_job(u'__mine_interval', persist=True) # add master_alive job if enabled - if (self.opts['transport'] != 'tcp' and - self.opts['master_alive_interval'] > 0): + if (self.opts[u'transport'] != u'tcp' and + self.opts[u'master_alive_interval'] > 0): self.schedule.add_job({ - master_event(type='alive', master=self.opts['master']): + master_event(type=u'alive', master=self.opts[u'master']): { - 'function': 'status.master', - 'seconds': self.opts['master_alive_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master'], - 'connected': True} + u'function': u'status.master', + u'seconds': self.opts[u'master_alive_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master'], + u'connected': True} } }, persist=True) - if self.opts['master_failback'] and \ - 'master_list' in self.opts and \ - self.opts['master'] != self.opts['master_list'][0]: + if self.opts[u'master_failback'] and \ + u'master_list' in self.opts and \ + self.opts[u'master'] != self.opts[u'master_list'][0]: self.schedule.add_job({ - master_event(type='failback'): + master_event(type=u'failback'): { - 'function': 'status.ping_master', - 'seconds': self.opts['master_failback_interval'], - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': {'master': self.opts['master_list'][0]} + u'function': u'status.ping_master', + u'seconds': self.opts[u'master_failback_interval'], + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': {u'master': self.opts[u'master_list'][0]} } }, persist=True) else: - self.schedule.delete_job(master_event(type='failback'), persist=True) + self.schedule.delete_job(master_event(type=u'failback'), persist=True) else: - self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) - self.schedule.delete_job(master_event(type='failback'), persist=True) + self.schedule.delete_job(master_event(type=u'alive', master=self.opts[u'master']), persist=True) + self.schedule.delete_job(master_event(type=u'failback'), persist=True) # proxy keepalive - proxy_alive_fn = fq_proxyname+'.alive' + proxy_alive_fn = fq_proxyname+u'.alive' if (proxy_alive_fn in self.proxy - and 'status.proxy_reconnect' in self.functions - and self.opts.get('proxy_keep_alive', True)): + and u'status.proxy_reconnect' in self.functions + and self.opts.get(u'proxy_keep_alive', True)): # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting self.schedule.add_job({ - '__proxy_keepalive': + u'__proxy_keepalive': { - 'function': 'status.proxy_reconnect', - 'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute - 'jid_include': True, - 'maxrunning': 1, - 'return_job': False, - 'kwargs': { - 'proxy_name': fq_proxyname + u'function': u'status.proxy_reconnect', + u'minutes': self.opts.get(u'proxy_keep_alive_interval', 1), # by default, check once per minute + u'jid_include': True, + u'maxrunning': 1, + u'return_job': False, + u'kwargs': { + u'proxy_name': fq_proxyname } } }, persist=True) self.schedule.enable_schedule() else: - self.schedule.delete_job('__proxy_keepalive', persist=True) + self.schedule.delete_job(u'__proxy_keepalive', persist=True) # Sync the grains here so the proxy can communicate them to the master - self.functions['saltutil.sync_grains'](saltenv='base') - self.grains_cache = self.opts['grains'] + self.functions[u'saltutil.sync_grains'](saltenv=u'base') + self.grains_cache = self.opts[u'grains'] self.ready = True @classmethod @@ -3268,10 +3347,10 @@ class ProxyMinion(Minion): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected - if not hasattr(minion_instance, 'functions'): + if not hasattr(minion_instance, u'functions'): # Need to load the modules so they get all the dunder variables functions, returners, function_errors, executors = ( - minion_instance._load_modules(grains=opts['grains']) + minion_instance._load_modules(grains=opts[u'grains']) ) minion_instance.functions = functions minion_instance.returners = returners @@ -3286,38 +3365,38 @@ class ProxyMinion(Minion): # And re-load the modules so the __proxy__ variable gets injected functions, returners, function_errors, executors = ( - minion_instance._load_modules(grains=opts['grains']) + minion_instance._load_modules(grains=opts[u'grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors - minion_instance.functions.pack['__proxy__'] = minion_instance.proxy - minion_instance.proxy.pack['__salt__'] = minion_instance.functions - minion_instance.proxy.pack['__ret__'] = minion_instance.returners - minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar'] + minion_instance.functions.pack[u'__proxy__'] = minion_instance.proxy + minion_instance.proxy.pack[u'__salt__'] = minion_instance.functions + minion_instance.proxy.pack[u'__ret__'] = minion_instance.returners + minion_instance.proxy.pack[u'__pillar__'] = minion_instance.opts[u'pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy) - minion_instance.proxy.pack['__utils__'] = minion_instance.utils + minion_instance.proxy.pack[u'__utils__'] = minion_instance.utils # Reload all modules so all dunder variables are injected minion_instance.proxy.reload_modules() - fq_proxyname = opts['proxy']['proxytype'] - proxy_init_fn = minion_instance.proxy[fq_proxyname+'.init'] + fq_proxyname = opts[u'proxy'][u'proxytype'] + proxy_init_fn = minion_instance.proxy[fq_proxyname + u'.init'] proxy_init_fn(opts) - if not hasattr(minion_instance, 'serial'): + if not hasattr(minion_instance, u'serial'): minion_instance.serial = salt.payload.Serial(opts) - if not hasattr(minion_instance, 'proc_dir'): - uid = salt.utils.get_uid(user=opts.get('user', None)) + if not hasattr(minion_instance, u'proc_dir'): + uid = salt.utils.get_uid(user=opts.get(u'user', None)) minion_instance.proc_dir = ( - get_proc_dir(opts['cachedir'], uid=uid) + get_proc_dir(opts[u'cachedir'], uid=uid) ) with tornado.stack_context.StackContext(minion_instance.ctx): - if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): + if isinstance(data[u'fun'], tuple) or isinstance(data[u'fun'], list): Minion._thread_multi_return(minion_instance, opts, data) else: Minion._thread_return(minion_instance, opts, data) diff --git a/salt/modules/acme.py b/salt/modules/acme.py index b07fab8197..af0ab0175f 100644 --- a/salt/modules/acme.py +++ b/salt/modules/acme.py @@ -31,11 +31,11 @@ import datetime import os # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) -LEA = salt.utils.which_bin(['certbot', 'letsencrypt', +LEA = salt.utils.path.which_bin(['certbot', 'letsencrypt', 'certbot-auto', 'letsencrypt-auto', '/opt/letsencrypt/letsencrypt-auto']) LE_LIVE = '/etc/letsencrypt/live/' @@ -125,7 +125,8 @@ def cert(name, salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public ''' - cmd = [LEA, 'certonly', '--quiet'] + # cmd = [LEA, 'certonly', '--quiet'] + cmd = [LEA, 'certonly'] cert_file = _cert_file(name, 'cert') if not __salt__['file.file_exists'](cert_file): diff --git a/salt/modules/aliases.py b/salt/modules/aliases.py index 2ef10dcce4..b4621a123d 100644 --- a/salt/modules/aliases.py +++ b/salt/modules/aliases.py @@ -12,11 +12,11 @@ import tempfile # Import salt libs import salt.utils.files -from salt.utils import which as _which +import salt.utils.path from salt.exceptions import SaltInvocationError # Import third party libs -import salt.ext.six as six +from salt.ext import six __outputter__ = { 'rm_alias': 'txt', @@ -97,7 +97,7 @@ def __write_aliases_file(lines): os.rename(out.name, afn) # Search $PATH for the newalises command - newaliases = _which('newaliases') + newaliases = salt.utils.path.which('newaliases') if newaliases is not None: __salt__['cmd.run'](newaliases) diff --git a/salt/modules/alternatives.py b/salt/modules/alternatives.py index 1b444c1c6b..b1a5ecb4a8 100644 --- a/salt/modules/alternatives.py +++ b/salt/modules/alternatives.py @@ -14,7 +14,7 @@ import logging import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __outputter__ = { diff --git a/salt/modules/apache.py b/salt/modules/apache.py index bb776bf533..e44e496709 100644 --- a/salt/modules/apache.py +++ b/salt/modules/apache.py @@ -16,7 +16,7 @@ import logging # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import cStringIO from salt.ext.six.moves.urllib.error import URLError from salt.ext.six.moves.urllib.request import ( @@ -29,8 +29,8 @@ from salt.ext.six.moves.urllib.request import ( # pylint: enable=import-error,no-name-in-module # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -40,7 +40,7 @@ def __virtual__(): Only load the module if apache is installed ''' cmd = _detect_os() - if salt.utils.which(cmd): + if salt.utils.path.which(cmd): return 'apache' return (False, 'The apache execution module cannot be loaded: apache is not installed.') @@ -399,7 +399,7 @@ def server_status(profile='default'): def _parse_config(conf, slot=None): ret = cStringIO() - if isinstance(conf, str): + if isinstance(conf, six.string_types): if slot: print('{0} {1}'.format(slot, conf), file=ret, end='') else: @@ -414,7 +414,7 @@ def _parse_config(conf, slot=None): ) del conf['this'] for key, value in six.iteritems(conf): - if isinstance(value, str): + if isinstance(value, six.string_types): print('{0} {1}'.format(key, value), file=ret) elif isinstance(value, list): print(_parse_config(value, key), file=ret) diff --git a/salt/modules/apcups.py b/salt/modules/apcups.py index 68abb7f565..647ae78280 100644 --- a/salt/modules/apcups.py +++ b/salt/modules/apcups.py @@ -9,6 +9,7 @@ import logging # Import Salt libs import salt.utils +import salt.utils.path import salt.utils.decorators as decorators log = logging.getLogger(__name__) @@ -22,7 +23,7 @@ def _check_apcaccess(): ''' Looks to see if apcaccess is present on the system ''' - return salt.utils.which('apcaccess') + return salt.utils.path.which('apcaccess') def __virtual__(): diff --git a/salt/modules/apf.py b/salt/modules/apf.py index 55e8a401b2..2a007eff36 100644 --- a/salt/modules/apf.py +++ b/salt/modules/apf.py @@ -18,15 +18,15 @@ except ImportError: # Import Salt Libs +import salt.utils.path from salt.exceptions import CommandExecutionError -import salt.utils def __virtual__(): ''' Only load if apf exists on the system ''' - if salt.utils.which('apf') is None: + if salt.utils.path.which('apf') is None: return (False, 'The apf execution module cannot be loaded: apf unavailable.') elif not IPTC_IMPORTED: @@ -40,7 +40,7 @@ def __apf_cmd(cmd): ''' Return the apf location ''' - apf_cmd = '{0} {1}'.format(salt.utils.which('apf'), cmd) + apf_cmd = '{0} {1}'.format(salt.utils.path.which('apf'), cmd) out = __salt__['cmd.run_all'](apf_cmd) if out['retcode'] != 0: diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index a13a3ee9f8..c5f9d33650 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -28,7 +28,7 @@ import json # Import third party libs import yaml # pylint: disable=no-name-in-module,import-error,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range from salt.ext.six.moves.urllib.error import HTTPError from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen @@ -39,11 +39,14 @@ import salt.config import salt.syspaths from salt.modules.cmdmod import _parse_env import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.itertools +import salt.utils.path import salt.utils.pkg import salt.utils.pkg.deb import salt.utils.systemd +import salt.utils.versions from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) @@ -250,7 +253,7 @@ def latest_version(*names, **kwargs): if 'repo' in kwargs: # Remember to kill _get_repo() too when removing this warning. - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Hydrogen', 'The \'repo\' argument to apt.latest_version is deprecated, and ' 'will be removed in Salt {version}. Please use \'fromrepo\' ' @@ -317,7 +320,7 @@ def latest_version(*names, **kwargs): # to the install candidate, then the candidate is an upgrade, so # add it to the return dict if not any( - (salt.utils.compare_versions(ver1=x, + (salt.utils.versions.compare(ver1=x, oper='>=', ver2=candidate, cmp_func=version_cmp) @@ -762,12 +765,12 @@ def install(name=None, cver = old.get(pkgname, '') if reinstall and cver \ - and salt.utils.compare_versions(ver1=version_num, + and salt.utils.versions.compare(ver1=version_num, oper='==', ver2=cver, cmp_func=version_cmp): to_reinstall[pkgname] = pkgstr - elif not cver or salt.utils.compare_versions(ver1=version_num, + elif not cver or salt.utils.versions.compare(ver1=version_num, oper='>=', ver2=cver, cmp_func=version_cmp): @@ -2198,7 +2201,7 @@ def mod_repo(repo, saltenv='base', **kwargs): # secure PPAs cannot be supported as of the time of this code # implementation via apt-add-repository. The code path for # secure PPAs should be the same as urllib method - if salt.utils.which('apt-add-repository') \ + if salt.utils.path.which('apt-add-repository') \ and 'ppa_auth' not in kwargs: repo_info = get_repo(repo) if repo_info: @@ -2813,10 +2816,10 @@ def info_installed(*names, **kwargs): salt '*' pkg.info_installed ... salt '*' pkg.info_installed failhard=false ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) failhard = kwargs.pop('failhard', True) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) ret = dict() for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard).items(): diff --git a/salt/modules/archive.py b/salt/modules/archive.py index 048e2bf625..70ef0bdecc 100644 --- a/salt/modules/archive.py +++ b/salt/modules/archive.py @@ -23,7 +23,7 @@ except ImportError: from pipes import quote as _quote # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module try: import rarfile @@ -33,11 +33,16 @@ except ImportError: # Import salt libs from salt.exceptions import SaltInvocationError, CommandExecutionError -import salt.utils +import salt.utils.decorators +import salt.utils.decorators.path import salt.utils.files -import salt.utils.itertools +import salt.utils.path +import salt.utils.platform import salt.utils.templates +if salt.utils.platform.is_windows(): + import win32file + # TODO: Check that the passed arguments are correct # Don't shadow built-in's. @@ -188,7 +193,7 @@ def list_(name, info={'error': stderr} ) else: - if not salt.utils.which('tar'): + if not salt.utils.path.which('tar'): raise CommandExecutionError('\'tar\' command not available') if decompress_cmd is not None: # Guard against shell injection @@ -199,7 +204,7 @@ def list_(name, except AttributeError: raise CommandExecutionError('Invalid CLI options') else: - if salt.utils.which('xz') \ + if salt.utils.path.which('xz') \ and __salt__['cmd.retcode'](['xz', '-t', cached], python_shell=False, ignore_retcode=True) == 0: @@ -234,7 +239,7 @@ def list_(name, with contextlib.closing(zipfile.ZipFile(cached)) as zip_archive: for member in zip_archive.infolist(): path = member.filename - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if path.endswith('/'): # zipfile.ZipInfo objects on windows use forward # slash at end of the directory name. @@ -279,7 +284,7 @@ def list_(name, else: files.append(path) else: - if not salt.utils.which('rar'): + if not salt.utils.path.which('rar'): raise CommandExecutionError( 'rar command not available, is it installed?' ) @@ -445,7 +450,7 @@ def _expand_sources(sources): for path in _glob(source)] -@salt.utils.decorators.which('tar') +@salt.utils.decorators.path.which('tar') def tar(options, tarfile, sources=None, dest=None, cwd=None, template=None, runas=None): ''' @@ -533,7 +538,7 @@ def tar(options, tarfile, sources=None, dest=None, python_shell=False).splitlines() -@salt.utils.decorators.which('gzip') +@salt.utils.decorators.path.which('gzip') def gzip(sourcefile, template=None, runas=None, options=None): ''' Uses the gzip command to create gzip files @@ -573,7 +578,7 @@ def gzip(sourcefile, template=None, runas=None, options=None): python_shell=False).splitlines() -@salt.utils.decorators.which('gunzip') +@salt.utils.decorators.path.which('gunzip') def gunzip(gzipfile, template=None, runas=None, options=None): ''' Uses the gunzip command to unpack gzip files @@ -613,7 +618,7 @@ def gunzip(gzipfile, template=None, runas=None, options=None): python_shell=False).splitlines() -@salt.utils.decorators.which('zip') +@salt.utils.decorators.path.which('zip') def cmd_zip(zip_file, sources, template=None, cwd=None, runas=None): ''' .. versionadded:: 2015.5.0 @@ -784,10 +789,9 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None): if os.path.isdir(src): for dir_name, sub_dirs, files in os.walk(src): if cwd and dir_name.startswith(cwd): - arc_dir = salt.utils.relpath(dir_name, cwd) + arc_dir = os.path.relpath(dir_name, cwd) else: - arc_dir = salt.utils.relpath(dir_name, - rel_root) + arc_dir = os.path.relpath(dir_name, rel_root) if arc_dir: archived_files.append(arc_dir + '/') zfile.write(dir_name, arc_dir) @@ -798,9 +802,9 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None): zfile.write(abs_name, arc_name) else: if cwd and src.startswith(cwd): - arc_name = salt.utils.relpath(src, cwd) + arc_name = os.path.relpath(src, cwd) else: - arc_name = salt.utils.relpath(src, rel_root) + arc_name = os.path.relpath(src, rel_root) archived_files.append(arc_name) zfile.write(src, arc_name) except Exception as exc: @@ -820,7 +824,7 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None): return archived_files -@salt.utils.decorators.which('unzip') +@salt.utils.decorators.path.which('unzip') def cmd_unzip(zip_file, dest, excludes=None, @@ -1050,7 +1054,7 @@ def unzip(zip_file, cleaned_files.extend([x for x in files if x not in excludes]) for target in cleaned_files: if target not in excludes: - if salt.utils.is_windows() is False: + if salt.utils.platform.is_windows() is False: info = zfile.getinfo(target) # Check if zipped file is a symbolic link if stat.S_ISLNK(info.external_attr >> 16): @@ -1059,15 +1063,19 @@ def unzip(zip_file, continue zfile.extract(target, dest, password) if extract_perms: - perm = zfile.getinfo(target).external_attr >> 16 - if perm == 0: - umask_ = os.umask(0) - os.umask(umask_) - if target.endswith('/'): - perm = 0o777 & ~umask_ - else: - perm = 0o666 & ~umask_ - os.chmod(os.path.join(dest, target), perm) + if not salt.utils.platform.is_windows(): + perm = zfile.getinfo(target).external_attr >> 16 + if perm == 0: + umask_ = os.umask(0) + os.umask(umask_) + if target.endswith('/'): + perm = 0o777 & ~umask_ + else: + perm = 0o666 & ~umask_ + os.chmod(os.path.join(dest, target), perm) + else: + win32_attr = zfile.getinfo(target).external_attr & 0xFF + win32file.SetFileAttributes(os.path.join(dest, target), win32_attr) except Exception as exc: if runas: os.seteuid(euid) @@ -1149,7 +1157,7 @@ def is_encrypted(name, clean=False, saltenv='base'): return ret -@salt.utils.decorators.which('rar') +@salt.utils.decorators.path.which('rar') def rar(rarfile, sources, template=None, cwd=None, runas=None): ''' Uses `rar for Linux`_ to create rar files @@ -1200,7 +1208,7 @@ def rar(rarfile, sources, template=None, cwd=None, runas=None): python_shell=False).splitlines() -@salt.utils.decorators.which_bin(('unrar', 'rar')) +@salt.utils.decorators.path.which_bin(('unrar', 'rar')) def unrar(rarfile, dest, excludes=None, template=None, runas=None, trim_output=False): ''' Uses `rar for Linux`_ to unpack rar files @@ -1235,7 +1243,7 @@ def unrar(rarfile, dest, excludes=None, template=None, runas=None, trim_output=F if isinstance(excludes, six.string_types): excludes = [entry.strip() for entry in excludes.split(',')] - cmd = [salt.utils.which_bin(('unrar', 'rar')), + cmd = [salt.utils.path.which_bin(('unrar', 'rar')), 'x', '-idp', '{0}'.format(rarfile)] if excludes is not None: for exclude in excludes: diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index d38ed9cc1e..284532f201 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -10,7 +10,7 @@ import base64 import logging # Import Salt libs -import salt.utils.files +import salt.utils import salt.ext.six.moves.http_client # pylint: disable=import-error,redefined-builtin,no-name-in-module from salt.ext.six.moves import urllib # pylint: disable=no-name-in-module from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=no-name-in-module @@ -38,7 +38,7 @@ def __virtual__(): return True -def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None): +def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False): ''' Gets latest snapshot of the given artifact @@ -69,15 +69,15 @@ def get_latest_snapshot(artifactory_url, repository, group_id, artifact_id, pack headers = {} if username and password: headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) - artifact_metadata = _get_artifact_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers) + artifact_metadata = _get_artifact_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id) version = artifact_metadata['latest_version'] - snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, headers=headers) + snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id) target_file = __resolve_target_file(file_name, target_dir, target_file) return __save_artifact(snapshot_url, target_file, headers) -def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None): +def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, version, snapshot_version=None, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False): ''' Gets snapshot of the desired version of the artifact @@ -109,13 +109,13 @@ def get_snapshot(artifactory_url, repository, group_id, artifact_id, packaging, headers = {} if username and password: headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) - snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers) + snapshot_url, file_name = _get_snapshot_url(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, packaging=packaging, snapshot_version=snapshot_version, classifier=classifier, headers=headers, use_literal_group_id=use_literal_group_id) target_file = __resolve_target_file(file_name, target_dir, target_file) return __save_artifact(snapshot_url, target_file, headers) -def get_latest_release(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None): +def get_latest_release(artifactory_url, repository, group_id, artifact_id, packaging, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False): ''' Gets the latest release of the artifact @@ -146,13 +146,13 @@ def get_latest_release(artifactory_url, repository, group_id, artifact_id, packa if username and password: headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) version = __find_latest_version(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers) - release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier) + release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id) target_file = __resolve_target_file(file_name, target_dir, target_file) return __save_artifact(release_url, target_file, headers) -def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None): +def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None, use_literal_group_id=False): ''' Gets the specified release of the artifact @@ -184,7 +184,7 @@ def get_release(artifactory_url, repository, group_id, artifact_id, packaging, v headers = {} if username and password: headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) - release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier) + release_url, file_name = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier, use_literal_group_id) target_file = __resolve_target_file(file_name, target_dir, target_file) return __save_artifact(release_url, target_file, headers) @@ -196,7 +196,7 @@ def __resolve_target_file(file_name, target_dir, target_file=None): return target_file -def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, version, packaging, snapshot_version=None, classifier=None, headers=None): +def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, version, packaging, snapshot_version=None, classifier=None, headers=None, use_literal_group_id=False): if headers is None: headers = {} has_classifier = classifier is not None and classifier != "" @@ -242,7 +242,7 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] - group_url = __get_group_id_subpath(group_id) + group_url = __get_group_id_subpath(group_id, use_literal_group_id) file_name = '{artifact_id}-{snapshot_version}{classifier}.{packaging}'.format( artifact_id=artifact_id, @@ -262,8 +262,8 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio return snapshot_url, file_name -def _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier=None): - group_url = __get_group_id_subpath(group_id) +def _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier=None, use_literal_group_id=False): + group_url = __get_group_id_subpath(group_id, use_literal_group_id) # for released versions the suffix for the file is same as version file_name = '{artifact_id}-{version}{classifier}.{packaging}'.format( @@ -283,8 +283,8 @@ def _get_release_url(repository, group_id, artifact_id, packaging, version, arti return release_url, file_name -def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_id): - group_url = __get_group_id_subpath(group_id) +def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False): + group_url = __get_group_id_subpath(group_id, use_literal_group_id) # for released versions the suffix for the file is same as version artifact_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml'.format( artifactory_url=artifactory_url, @@ -295,13 +295,14 @@ def _get_artifact_metadata_url(artifactory_url, repository, group_id, artifact_i return artifact_metadata_url -def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers): +def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False): artifact_metadata_url = _get_artifact_metadata_url( artifactory_url=artifactory_url, repository=repository, group_id=group_id, - artifact_id=artifact_id + artifact_id=artifact_id, + use_literal_group_id=use_literal_group_id ) try: @@ -318,8 +319,8 @@ def _get_artifact_metadata_xml(artifactory_url, repository, group_id, artifact_i return artifact_metadata_xml -def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers): - metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers) +def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False): + metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id) root = ET.fromstring(metadata_xml) assert group_id == root.find('groupId').text @@ -331,8 +332,8 @@ def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, h # functions for handling snapshots -def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, artifact_id, version): - group_url = __get_group_id_subpath(group_id) +def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, artifact_id, version, use_literal_group_id=False): + group_url = __get_group_id_subpath(group_id, use_literal_group_id) # for released versions the suffix for the file is same as version snapshot_version_metadata_url = '{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml'.format( artifactory_url=artifactory_url, @@ -344,14 +345,15 @@ def _get_snapshot_version_metadata_url(artifactory_url, repository, group_id, ar return snapshot_version_metadata_url -def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers): +def _get_snapshot_version_metadata_xml(artifactory_url, repository, group_id, artifact_id, version, headers, use_literal_group_id=False): snapshot_version_metadata_url = _get_snapshot_version_metadata_url( artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, - version=version + version=version, + use_literal_group_id=use_literal_group_id ) try: @@ -388,8 +390,8 @@ def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifa } -def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id): - group_url = __get_group_id_subpath(group_id) +def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False): + group_url = __get_group_id_subpath(group_id, use_literal_group_id) # for released versions the suffix for the file is same as version latest_version_url = '{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}'.format( artifactory_url=artifactory_url, @@ -400,13 +402,14 @@ def __get_latest_version_url(artifactory_url, repository, group_id, artifact_id) return latest_version_url -def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers): +def __find_latest_version(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False): latest_version_url = __get_latest_version_url( artifactory_url=artifactory_url, repository=repository, group_id=group_id, - artifact_id=artifact_id + artifact_id=artifact_id, + use_literal_group_id=use_literal_group_id ) try: @@ -478,9 +481,11 @@ def __save_artifact(artifact_url, target_file, headers): return result -def __get_group_id_subpath(group_id): - group_url = group_id.replace('.', '/') - return group_url +def __get_group_id_subpath(group_id, use_literal_group_id=False): + if not use_literal_group_id: + group_url = group_id.replace('.', '/') + return group_url + return group_id def __get_classifier_url(classifier): diff --git a/salt/modules/at.py b/salt/modules/at.py index 5a27e5b5ee..d6700f07ab 100644 --- a/salt/modules/at.py +++ b/salt/modules/at.py @@ -23,7 +23,8 @@ from salt.ext.six.moves import map from salt.exceptions import CommandNotFoundError # Import salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform # OS Families that should work (Ubuntu and Debian are the default) # TODO: Refactor some of this module to remove the checks for binaries @@ -38,9 +39,9 @@ def __virtual__(): ''' Most everything has the ability to support at(1) ''' - if salt.utils.is_windows() or salt.utils.is_sunos(): + if salt.utils.platform.is_windows() or salt.utils.platform.is_sunos(): return (False, 'The at module could not be loaded: unsupported platform') - if salt.utils.which('at') is None: + if salt.utils.path.which('at') is None: return (False, 'The at module could not be loaded: at command not found') return __virtualname__ @@ -49,7 +50,7 @@ def _cmd(binary, *args): ''' Wrapper to run at(1) or return None. ''' - binary = salt.utils.which(binary) + binary = salt.utils.path.which(binary) if not binary: raise CommandNotFoundError('{0}: command not found'.format(binary)) cmd = [binary] + list(args) @@ -163,7 +164,7 @@ def atrm(*args): ''' # Need to do this here also since we use atq() - if not salt.utils.which('at'): + if not salt.utils.path.which('at'): return '\'at.atrm\' is not available.' if not args: @@ -211,7 +212,7 @@ def at(*args, **kwargs): # pylint: disable=C0103 # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() - binary = salt.utils.which('at') + binary = salt.utils.path.which('at') if not binary: return '\'at.at\' is not available.' diff --git a/salt/modules/at_solaris.py b/salt/modules/at_solaris.py index c93fe5da9c..2a16625df5 100644 --- a/salt/modules/at_solaris.py +++ b/salt/modules/at_solaris.py @@ -25,8 +25,9 @@ import logging from salt.ext.six.moves import map # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'at' @@ -36,11 +37,11 @@ def __virtual__(): ''' We only deal with Solaris' specific version of at ''' - if not salt.utils.is_sunos(): + if not salt.utils.platform.is_sunos(): return (False, 'The at module could not be loaded: unsupported platform') - if not salt.utils.which('at') or \ - not salt.utils.which('atq') or \ - not salt.utils.which('atrm'): + if not salt.utils.path.which('at') or \ + not salt.utils.path.which('atq') or \ + not salt.utils.path.which('atrm'): return (False, 'The at module could not be loaded: at command not found') return __virtualname__ diff --git a/salt/modules/augeas_cfg.py b/salt/modules/augeas_cfg.py index 6b1f1e7b1b..9631958667 100644 --- a/salt/modules/augeas_cfg.py +++ b/salt/modules/augeas_cfg.py @@ -30,7 +30,7 @@ import os import re import logging from salt.ext.six.moves import zip -import salt.ext.six as six +from salt.ext import six # Make sure augeas python interface is installed HAS_AUGEAS = False @@ -42,6 +42,7 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.args from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) @@ -199,7 +200,7 @@ def execute(context=None, lens=None, commands=(), load_path=None): method = METHOD_MAP[cmd] nargs = arg_map[method] - parts = salt.utils.shlex_split(arg) + parts = salt.utils.args.shlex_split(arg, posix=False) if len(parts) not in nargs: err = '{0} takes {1} args: {2}'.format(method, nargs, parts) diff --git a/salt/modules/aws_sqs.py b/salt/modules/aws_sqs.py index 3c615573bd..a906899718 100644 --- a/salt/modules/aws_sqs.py +++ b/salt/modules/aws_sqs.py @@ -10,7 +10,8 @@ import json # Import salt libs import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six log = logging.getLogger(__name__) @@ -18,7 +19,7 @@ _OUTPUT = '--output json' def __virtual__(): - if salt.utils.which('aws'): + if salt.utils.path.which('aws'): # awscli is installed, load the module return True return (False, 'The module aws_sqs could not be loaded: aws command not found') diff --git a/salt/modules/bamboohr.py b/salt/modules/bamboohr.py index 353e736c63..0fc4acd8fb 100644 --- a/salt/modules/bamboohr.py +++ b/salt/modules/bamboohr.py @@ -20,7 +20,7 @@ import logging # Import salt libs import salt.utils.http -import salt.ext.six as six +from salt.ext import six from salt._compat import ElementTree as ET log = logging.getLogger(__name__) diff --git a/salt/modules/bcache.py b/salt/modules/bcache.py index 8e87256dbd..23baba0034 100644 --- a/salt/modules/bcache.py +++ b/salt/modules/bcache.py @@ -25,7 +25,7 @@ import re from salt.ext import six # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -44,14 +44,14 @@ __func_alias__ = { 'super_': 'super', } -HAS_BLKDISCARD = salt.utils.which('blkdiscard') is not None +HAS_BLKDISCARD = salt.utils.path.which('blkdiscard') is not None def __virtual__(): ''' Only work when make-bcache is installed ''' - return salt.utils.which('make-bcache') is not None + return salt.utils.path.which('make-bcache') is not None def uuid(dev=None): diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 2734973268..0b6cd342f4 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -81,7 +81,7 @@ def add(name, beacon_data, **kwargs): .. code-block:: bash - salt '*' beacons.add ps "{'salt-master': 'stopped', 'apache2': 'stopped'}" + salt '*' beacons.add ps "[{'salt-master': 'stopped', 'apache2': 'stopped'}]" ''' ret = {'comment': 'Failed to add beacon {0}.'.format(name), @@ -125,6 +125,7 @@ def add(name, beacon_data, **kwargs): res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons') if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_add_complete', wait=30) + log.debug('=== event_ret {} ==='.format(event_ret)) if event_ret and event_ret['complete']: beacons = event_ret['beacons'] if name in beacons and beacons[name] == beacon_data: @@ -149,7 +150,7 @@ def modify(name, beacon_data, **kwargs): .. code-block:: bash - salt '*' beacons.modify ps "{'salt-master': 'stopped', 'apache2': 'stopped'}" + salt '*' beacons.modify ps "[{'salt-master': 'stopped', 'apache2': 'stopped'}]" ''' ret = {'comment': '', @@ -252,6 +253,7 @@ def delete(name, **kwargs): if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_delete_complete', wait=30) if event_ret and event_ret['complete']: + log.debug('== event_ret {} =='.format(event_ret)) beacons = event_ret['beacons'] if name not in beacons: ret['result'] = True diff --git a/salt/modules/bigip.py b/salt/modules/bigip.py index 8715e2ccd9..6b1d964e26 100644 --- a/salt/modules/bigip.py +++ b/salt/modules/bigip.py @@ -19,7 +19,7 @@ except ImportError: HAS_LIBS = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.utils @@ -970,7 +970,7 @@ def replace_pool_members(hostname, username, password, name, members): #specify members if provided if members is not None: - if isinstance(members, str): + if isinstance(members, six.string_types): members = members.split(',') pool_members = [] @@ -1473,7 +1473,7 @@ def create_virtual(hostname, username, password, name, destination, payload['vlans'] = 'none' elif vlans == 'default': payload['vlans'] = 'default' - elif isinstance(vlans, str) and (vlans.startswith('enabled') or vlans.startswith('disabled')): + elif isinstance(vlans, six.string_types) and (vlans.startswith('enabled') or vlans.startswith('disabled')): try: vlans_setting = vlans.split(':')[0] payload['vlans'] = vlans.split(':')[1].split(',') diff --git a/salt/modules/boto_apigateway.py b/salt/modules/boto_apigateway.py index 701a217ee2..6b42350b5a 100644 --- a/salt/modules/boto_apigateway.py +++ b/salt/modules/boto_apigateway.py @@ -82,7 +82,7 @@ import json import datetime # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.boto3 import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion diff --git a/salt/modules/boto_asg.py b/salt/modules/boto_asg.py index b183a0b243..294c00b8f9 100644 --- a/salt/modules/boto_asg.py +++ b/salt/modules/boto_asg.py @@ -58,7 +58,7 @@ DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" # Import third party libs import yaml -import salt.ext.six as six +from salt.ext import six try: import boto import boto.ec2 diff --git a/salt/modules/boto_cloudtrail.py b/salt/modules/boto_cloudtrail.py index 0140deabfc..99a1191ec8 100644 --- a/salt/modules/boto_cloudtrail.py +++ b/salt/modules/boto_cloudtrail.py @@ -54,7 +54,7 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.boto3 import salt.utils.compat import salt.utils diff --git a/salt/modules/boto_dynamodb.py b/salt/modules/boto_dynamodb.py index a7b23df5a3..ccbc90449c 100644 --- a/salt/modules/boto_dynamodb.py +++ b/salt/modules/boto_dynamodb.py @@ -53,7 +53,7 @@ logger = logging.getLogger(__name__) logging.getLogger('boto').setLevel(logging.INFO) # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.exceptions import SaltInvocationError try: diff --git a/salt/modules/boto_ec2.py b/salt/modules/boto_ec2.py index 4a7001a56c..cd77b8f129 100644 --- a/salt/modules/boto_ec2.py +++ b/salt/modules/boto_ec2.py @@ -54,7 +54,7 @@ import json # Import Salt libs import salt.utils import salt.utils.compat -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion diff --git a/salt/modules/boto_efs.py b/salt/modules/boto_efs.py index 0bb20c31d2..8ae17f5bf3 100644 --- a/salt/modules/boto_efs.py +++ b/salt/modules/boto_efs.py @@ -55,7 +55,7 @@ import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import boto3 HAS_BOTO3 = True @@ -97,7 +97,7 @@ def _get_conn(key=None, ''' client = None if profile: - if isinstance(profile, str): + if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: @@ -158,7 +158,7 @@ def create_file_system(name, import os import base64 creation_token = base64.b64encode(os.urandom(46), ['-', '_']) - tags = {"Key": "Name", "Value": name} + tags = [{"Key": "Name", "Value": name}] client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) @@ -223,10 +223,23 @@ def create_mount_target(filesystemid, client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) - return client.create_mount_point(FileSystemId=filesystemid, - SubnetId=subnetid, - IpAddress=ipaddress, - SecurityGroups=securitygroups) + if ipaddress is None and securitygroups is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid) + + if ipaddress is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + SecurityGroups=securitygroups) + if securitygroups is None: + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + IpAddress=ipaddress) + + return client.create_mount_target(FileSystemId=filesystemid, + SubnetId=subnetid, + IpAddress=ipaddress, + SecurityGroups=securitygroups) def create_tags(filesystemid, diff --git a/salt/modules/boto_elasticache.py b/salt/modules/boto_elasticache.py index 1cafff7d32..21de556b4c 100644 --- a/salt/modules/boto_elasticache.py +++ b/salt/modules/boto_elasticache.py @@ -51,7 +51,7 @@ import logging import time # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict diff --git a/salt/modules/boto_elasticsearch_domain.py b/salt/modules/boto_elasticsearch_domain.py index fce241f1c3..d4a156db5e 100644 --- a/salt/modules/boto_elasticsearch_domain.py +++ b/salt/modules/boto_elasticsearch_domain.py @@ -80,7 +80,7 @@ import logging import json # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.boto3 import salt.utils.compat import salt.utils diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index eed2759ca0..ddbb543d74 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -57,7 +57,7 @@ import salt.utils.odict as odict from salt.utils.versions import LooseVersion as _LooseVersion # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six import string_types try: import boto @@ -706,7 +706,7 @@ def register_instances(name, instances, region=None, key=None, keyid=None, ''' # convert instances to list type, enabling consistent use of instances # variable throughout the register_instances method - if isinstance(instances, str) or isinstance(instances, six.text_type): + if isinstance(instances, six.string_types) or isinstance(instances, six.text_type): instances = [instances] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) @@ -750,7 +750,7 @@ def deregister_instances(name, instances, region=None, key=None, keyid=None, ''' # convert instances to list type, enabling consistent use of instances # variable throughout the deregister_instances method - if isinstance(instances, str) or isinstance(instances, six.text_type): + if isinstance(instances, six.string_types) or isinstance(instances, six.text_type): instances = [instances] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py index 5af42e532d..11121b1960 100644 --- a/salt/modules/boto_elbv2.py +++ b/salt/modules/boto_elbv2.py @@ -47,7 +47,7 @@ log = logging.getLogger(__name__) # Import Salt libs # Import third party libs -import salt.ext.six as six +from salt.ext import six try: # pylint: disable=unused-import @@ -253,7 +253,7 @@ def register_targets(name, targets, region=None, key=None, keyid=None, salt myminion boto_elbv2.register_targets myelb "[instance_id,instance_id]" ''' targetsdict = [] - if isinstance(targets, str) or isinstance(targets, six.text_type): + if isinstance(targets, six.string_types) or isinstance(targets, six.text_type): targetsdict.append({"Id": targets}) else: for target in targets: @@ -290,7 +290,7 @@ def deregister_targets(name, targets, region=None, key=None, keyid=None, salt myminion boto_elbv2.deregister_targets myelb "[instance_id,instance_id]" ''' targetsdict = [] - if isinstance(targets, str) or isinstance(targets, six.text_type): + if isinstance(targets, six.string_types) or isinstance(targets, six.text_type): targetsdict.append({"Id": targets}) else: for target in targets: diff --git a/salt/modules/boto_iam.py b/salt/modules/boto_iam.py index 97eccd6616..a4d1ab9c1e 100644 --- a/salt/modules/boto_iam.py +++ b/salt/modules/boto_iam.py @@ -44,7 +44,7 @@ import json import yaml # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.compat import salt.utils.odict as odict @@ -1955,7 +1955,7 @@ def list_policy_versions(policy_name, return ret.get('list_policy_versions_response', {}).get('list_policy_versions_result', {}).get('versions') except boto.exception.BotoServerError as e: log.debug(e) - msg = 'Failed to list {0} policy vesions.' + msg = 'Failed to list {0} policy versions.' log.error(msg.format(policy_name)) return [] diff --git a/salt/modules/boto_lambda.py b/salt/modules/boto_lambda.py index a30b28e27e..2b0e8bb95d 100644 --- a/salt/modules/boto_lambda.py +++ b/salt/modules/boto_lambda.py @@ -87,7 +87,7 @@ import time import random # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.compat import salt.utils.files from salt.utils.versions import LooseVersion as _LooseVersion diff --git a/salt/modules/boto_rds.py b/salt/modules/boto_rds.py index fbfcfecf7b..f57b9633de 100644 --- a/salt/modules/boto_rds.py +++ b/salt/modules/boto_rds.py @@ -63,7 +63,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: #pylint: disable=unused-import diff --git a/salt/modules/boto_route53.py b/salt/modules/boto_route53.py index 97a5c0f3d2..23683d76ee 100644 --- a/salt/modules/boto_route53.py +++ b/salt/modules/boto_route53.py @@ -53,6 +53,7 @@ import time # Import salt libs import salt.utils.compat +import salt.utils.versions import salt.utils.odict as odict from salt.exceptions import SaltInvocationError from salt.utils.versions import LooseVersion as _LooseVersion @@ -255,7 +256,7 @@ def zone_exists(zone, region=None, key=None, keyid=None, profile=None, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if retry_on_rate_limit or rate_limit_retries is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments ' 'have been deprecated in favor of \'retry_on_errors\' and ' @@ -387,7 +388,7 @@ def get_record(name, zone, record_type, fetch_all=False, region=None, key=None, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if retry_on_rate_limit or rate_limit_retries is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments ' 'have been deprecated in favor of \'retry_on_errors\' and ' @@ -468,7 +469,7 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if retry_on_rate_limit or rate_limit_retries is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments ' 'have been deprecated in favor of \'retry_on_errors\' and ' @@ -555,7 +556,7 @@ def update_record(name, value, zone, record_type, identifier=None, ttl=None, _type = record_type.upper() if retry_on_rate_limit or rate_limit_retries is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments ' 'have been deprecated in favor of \'retry_on_errors\' and ' @@ -617,7 +618,7 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False, _type = record_type.upper() if retry_on_rate_limit or rate_limit_retries is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The \'retry_on_rate_limit\' and \'rate_limit_retries\' arguments ' 'have been deprecated in favor of \'retry_on_errors\' and ' diff --git a/salt/modules/boto_secgroup.py b/salt/modules/boto_secgroup.py index 8cfba94e62..ac8c915f04 100644 --- a/salt/modules/boto_secgroup.py +++ b/salt/modules/boto_secgroup.py @@ -52,7 +52,7 @@ import logging log = logging.getLogger(__name__) # Import third party libs -import salt.ext.six as six +from salt.ext import six try: # pylint: disable=unused-import import boto @@ -270,9 +270,9 @@ def get_all_security_groups(groupnames=None, group_ids=None, filters=None, ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - if isinstance(groupnames, str): + if isinstance(groupnames, six.string_types): groupnames = [groupnames] - if isinstance(group_ids, str): + if isinstance(group_ids, six.string_types): groupnames = [group_ids] interesting = ['description', 'id', 'instances', 'name', 'owner_id', diff --git a/salt/modules/boto_sqs.py b/salt/modules/boto_sqs.py index bc02e66d68..eb44f4a949 100644 --- a/salt/modules/boto_sqs.py +++ b/salt/modules/boto_sqs.py @@ -7,7 +7,7 @@ Connection module for Amazon SQS :configuration: This module accepts explicit sqs credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further - configuration is necessary. More Information available at: + configuration is necessary. More information available at: .. code-block:: text @@ -39,44 +39,68 @@ Connection module for Amazon SQS key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 -:depends: boto +:depends: boto3 ''' # keep lint from choking on _get_conn and _cache_id -#pylint: disable=E0602 +# pylint: disable=E0602 from __future__ import absolute_import # Import Python libs import logging import json -import salt.ext.six as six + +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module log = logging.getLogger(__name__) +__func_alias__ = { + 'list_': 'list', +} + # Import third party libs try: # pylint: disable=unused-import - import boto - import boto.sqs + import boto3 + import botocore # pylint: enable=unused-import - logging.getLogger('boto').setLevel(logging.CRITICAL) - HAS_BOTO = True + logging.getLogger('boto3').setLevel(logging.CRITICAL) + HAS_BOTO3 = True except ImportError: - HAS_BOTO = False - -from salt.ext.six import string_types + HAS_BOTO3 = False def __virtual__(): ''' - Only load if boto libraries exist. + Only load if boto3 libraries exist. ''' - if not HAS_BOTO: - return (False, 'The boto_sqs module could not be loaded: boto libraries not found') - __utils__['boto.assign_funcs'](__name__, 'sqs', pack=__salt__) + if not HAS_BOTO3: + return (False, 'The boto_sqs module could not be loaded: boto3 libraries not found') + __utils__['boto3.assign_funcs'](__name__, 'sqs', pack=__salt__) return True +def _preprocess_attributes(attributes): + ''' + Pre-process incoming queue attributes before setting them + ''' + if isinstance(attributes, six.string_types): + attributes = json.loads(attributes) + + def stringified(val): + # Some attributes take full json policy documents, but they take them + # as json strings. Convert the value back into a json string. + if isinstance(val, dict): + return json.dumps(val) + return val + + return dict( + (attr, stringified(val)) for attr, val in six.iteritems(attributes) + ) + + def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a queue exists. @@ -89,13 +113,24 @@ def exists(name, region=None, key=None, keyid=None, profile=None): ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - if conn.get_queue(name): - return True - else: - return False + try: + conn.get_queue_url(QueueName=name) + except botocore.exceptions.ClientError as e: + missing_code = 'AWS.SimpleQueueService.NonExistentQueue' + if e.response.get('Error', {}).get('Code') == missing_code: + return {'result': False} + return {'error': __utils__['boto3.get_error'](e)} + return {'result': True} -def create(name, region=None, key=None, keyid=None, profile=None): +def create( + name, + attributes=None, + region=None, + key=None, + keyid=None, + profile=None, +): ''' Create an SQS queue. @@ -107,15 +142,15 @@ def create(name, region=None, key=None, keyid=None, profile=None): ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - if not conn.get_queue(name): - try: - conn.create_queue(name) - except boto.exception.SQSError: - msg = 'Failed to create queue {0}'.format(name) - log.error(msg) - return False - log.info('Created queue {0}'.format(name)) - return True + if attributes is None: + attributes = {} + attributes = _preprocess_attributes(attributes) + + try: + conn.create_queue(QueueName=name, Attributes=attributes) + except botocore.exceptions.ClientError as e: + return {'error': __utils__['boto3.get_error'](e)} + return {'result': True} def delete(name, region=None, key=None, keyid=None, profile=None): @@ -130,37 +165,15 @@ def delete(name, region=None, key=None, keyid=None, profile=None): ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - queue_obj = conn.get_queue(name) - if queue_obj: - deleted_queue = conn.delete_queue(queue_obj) - if not deleted_queue: - msg = 'Failed to delete queue {0}'.format(name) - log.error(msg) - return False - return True - - -def get_all_queues(prefix=None, region=None, key=None, keyid=None, profile=None): - ''' - Return a list of Queue() objects describing all visible queues. - - .. versionadded:: 2016.11.0 - - CLI Example: - - .. code-block:: bash - - salt myminion boto_sqs.get_all_queues region=us-east-1 --output yaml - ''' - conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: - return conn.get_all_queues(prefix=prefix) - except boto.exception.SQSError: - log.error('Error listing queues') - return [] + url = conn.get_queue_url(QueueName=name)['QueueUrl'] + conn.delete_queue(QueueUrl=url) + except botocore.exceptions.ClientError as e: + return {'error': __utils__['boto3.get_error'](e)} + return {'result': True} -def list(prefix=None, region=None, key=None, keyid=None, profile=None): +def list_(prefix='', region=None, key=None, keyid=None, profile=None): ''' Return a list of the names of all visible queues. @@ -172,8 +185,19 @@ def list(prefix=None, region=None, key=None, keyid=None, profile=None): salt myminion boto_sqs.list region=us-east-1 ''' - return [r.name for r in get_all_queues(prefix=prefix, region=region, key=key, - keyid=keyid, profile=profile)] + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + + def extract_name(queue_url): + # Note: this logic taken from boto, so should be safe + return _urlparse(queue_url).path.split('/')[2] + + try: + r = conn.list_queues(QueueNamePrefix=prefix) + # The 'QueueUrls' attribute is missing if there are no queues + urls = r.get('QueueUrls', []) + return {'result': [extract_name(url) for url in urls]} + except botocore.exceptions.ClientError as e: + return {'error': __utils__['boto3.get_error'](e)} def get_attributes(name, region=None, key=None, keyid=None, profile=None): @@ -187,17 +211,23 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_sqs.get_attributes myqueue ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - if not conn: - return {} - queue_obj = conn.get_queue(name) - if not queue_obj: - log.error('Queue {0} does not exist.'.format(name)) - return {} - return conn.get_queue_attributes(queue_obj) + + try: + url = conn.get_queue_url(QueueName=name)['QueueUrl'] + r = conn.get_queue_attributes(QueueUrl=url, AttributeNames=['All']) + return {'result': r['Attributes']} + except botocore.exceptions.ClientError as e: + return {'error': __utils__['boto3.get_error'](e)} -def set_attributes(name, attributes, region=None, key=None, keyid=None, - profile=None): +def set_attributes( + name, + attributes, + region=None, + key=None, + keyid=None, + profile=None, +): ''' Set attributes on an SQS queue. @@ -207,22 +237,13 @@ def set_attributes(name, attributes, region=None, key=None, keyid=None, salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1 ''' - ret = True conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - queue_obj = conn.get_queue(name) - if not queue_obj: - log.error('Queue {0} does not exist.'.format(name)) - ret = False - if isinstance(attributes, string_types): - attributes = json.loads(attributes) - for attr, val in six.iteritems(attributes): - attr_set = queue_obj.set_attribute(attr, val) - if not attr_set: - msg = 'Failed to set attribute {0} = {1} on queue {2}' - log.error(msg.format(attr, val, name)) - ret = False - else: - msg = 'Set attribute {0} = {1} on queue {2}' - log.info(msg.format(attr, val, name)) - return ret + attributes = _preprocess_attributes(attributes) + + try: + url = conn.get_queue_url(QueueName=name)['QueueUrl'] + conn.set_queue_attributes(QueueUrl=url, Attributes=attributes) + except botocore.exceptions.ClientError as e: + return {'error': __utils__['boto3.get_error'](e)} + return {'result': True} diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index a7effb3c3a..75abc06a23 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -137,6 +137,7 @@ import random import salt.utils.boto import salt.utils.boto3 import salt.utils.compat +import salt.utils.versions from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion @@ -150,7 +151,7 @@ ACTIVE = 'active' log = logging.getLogger(__name__) # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error # pylint: disable=import-error try: @@ -2456,9 +2457,10 @@ def describe_route_table(route_table_id=None, route_table_name=None, ''' - salt.utils.warn_until('Oxygen', - 'The \'describe_route_table\' method has been deprecated and ' - 'replaced by \'describe_route_tables\'.' + salt.utils.versions.warn_until( + 'Oxygen', + 'The \'describe_route_table\' method has been deprecated and ' + 'replaced by \'describe_route_tables\'.' ) if not any((route_table_id, route_table_name, tags)): raise SaltInvocationError('At least one of the following must be specified: ' diff --git a/salt/modules/bower.py b/salt/modules/bower.py index 1b88d80446..ecf7e06134 100644 --- a/salt/modules/bower.py +++ b/salt/modules/bower.py @@ -16,7 +16,7 @@ import logging import shlex # Import salt libs -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion @@ -33,7 +33,7 @@ def __virtual__(): ''' Only work when Bower is installed ''' - if salt.utils.which('bower') is None: + if salt.utils.path.which('bower') is None: return (False, 'The bower module could not be loaded: bower command not found') return True diff --git a/salt/modules/bridge.py b/salt/modules/bridge.py index 37799619b8..c37e6331f2 100644 --- a/salt/modules/bridge.py +++ b/salt/modules/bridge.py @@ -6,7 +6,7 @@ from __future__ import absolute_import import sys import re -import salt.utils +import salt.utils.path __func_alias__ = { @@ -31,7 +31,7 @@ def __virtual__(): } cur_os = __grains__['kernel'] for _os in supported_os_tool: - if cur_os == _os and salt.utils.which(supported_os_tool[cur_os]): + if cur_os == _os and salt.utils.path.which(supported_os_tool[cur_os]): return True return (False, 'The bridge execution module failed to load: requires one of the following tool/os' ' combinations: ifconfig on FreeBSD/OpenBSD, brctl on Linux or brconfig on NetBSD.') @@ -41,7 +41,7 @@ def _tool_path(ostool): ''' Internal, returns tools path ''' - return salt.utils.which(ostool) + return salt.utils.path.which(ostool) def _linux_brshow(br=None): diff --git a/salt/modules/bsd_shadow.py b/salt/modules/bsd_shadow.py index da39dbffa0..4194a7f196 100644 --- a/salt/modules/bsd_shadow.py +++ b/salt/modules/bsd_shadow.py @@ -17,7 +17,7 @@ except ImportError: pass # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.exceptions import SaltInvocationError diff --git a/salt/modules/btrfs.py b/salt/modules/btrfs.py index 8e37ed452e..ed4359cba5 100644 --- a/salt/modules/btrfs.py +++ b/salt/modules/btrfs.py @@ -27,12 +27,12 @@ import logging # Import Salt libs -import salt.utils import salt.utils.fsutils +import salt.utils.platform from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -41,7 +41,7 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - return not salt.utils.is_windows() and __grains__.get('kernel') == 'Linux' + return not salt.utils.platform.is_windows() and __grains__.get('kernel') == 'Linux' def version(): diff --git a/salt/modules/cabal.py b/salt/modules/cabal.py index ed6029f337..3260ba442b 100644 --- a/salt/modules/cabal.py +++ b/salt/modules/cabal.py @@ -10,7 +10,7 @@ from __future__ import absolute_import import logging -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError logger = logging.getLogger(__name__) @@ -25,8 +25,8 @@ def __virtual__(): ''' Only work when cabal-install is installed. ''' - return (salt.utils.which('cabal') is not None) and \ - (salt.utils.which('ghc-pkg') is not None) + return (salt.utils.path.which('cabal') is not None) and \ + (salt.utils.path.which('ghc-pkg') is not None) def update(user=None, env=None): diff --git a/salt/modules/capirca_acl.py b/salt/modules/capirca_acl.py index 594b730eec..884c0c1e7e 100644 --- a/salt/modules/capirca_acl.py +++ b/salt/modules/capirca_acl.py @@ -32,15 +32,15 @@ import datetime log = logging.getLogger(__file__) # Import third party libs +from salt.ext import six try: import aclgen HAS_CAPIRCA = True except ImportError: HAS_CAPIRCA = False -# Import Salt modules +# Import Salt libs import salt.utils.files -from salt.ext import six # ------------------------------------------------------------------------------ # module properties diff --git a/salt/modules/cassandra.py b/salt/modules/cassandra.py index 4cd790fcfa..ba5433120d 100644 --- a/salt/modules/cassandra.py +++ b/salt/modules/cassandra.py @@ -18,7 +18,7 @@ import logging log = logging.getLogger(__name__) # Import salt libs -import salt.utils +import salt.utils.path HAS_PYCASSA = False try: @@ -35,7 +35,7 @@ def __virtual__(): if not HAS_PYCASSA: return (False, 'The cassandra execution module cannot be loaded: pycassa not installed.') - if HAS_PYCASSA and salt.utils.which('nodetool'): + if HAS_PYCASSA and salt.utils.path.which('nodetool'): return 'cassandra' return (False, 'The cassandra execution module cannot be loaded: nodetool not found.') diff --git a/salt/modules/cassandra_cql.py b/salt/modules/cassandra_cql.py index 67887308e4..b377a49556 100644 --- a/salt/modules/cassandra_cql.py +++ b/salt/modules/cassandra_cql.py @@ -85,7 +85,7 @@ import ssl # Import Salt Libs from salt.exceptions import CommandExecutionError -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range SSL_VERSION = 'ssl_version' diff --git a/salt/modules/chassis.py b/salt/modules/chassis.py index 9d53de7290..6176070f87 100644 --- a/salt/modules/chassis.py +++ b/salt/modules/chassis.py @@ -17,7 +17,7 @@ from __future__ import absolute_import # Import python libs import logging -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -30,7 +30,7 @@ def __virtual__(): ''' Only work on proxy ''' - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return __virtualname__ return (False, 'The chassis execution module cannot be loaded: ' 'this only works in proxy minions.') diff --git a/salt/modules/chef.py b/salt/modules/chef.py index e2e1345ff9..45ec239e1d 100644 --- a/salt/modules/chef.py +++ b/salt/modules/chef.py @@ -10,11 +10,12 @@ import os import tempfile # Import Salt libs -import salt.utils -import salt.utils.decorators as decorators +import salt.utils.path +import salt.utils.platform +import salt.utils.decorators.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -23,7 +24,7 @@ def __virtual__(): ''' Only load if chef is installed ''' - if not salt.utils.which('chef-client'): + if not salt.utils.path.which('chef-client'): return (False, 'Cannot load chef module: chef-client not found') return True @@ -32,7 +33,7 @@ def _default_logfile(exe_name): ''' Retrieve the logfile name ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): tmp_dir = os.path.join(__opts__['cachedir'], 'tmp') if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) @@ -43,7 +44,7 @@ def _default_logfile(exe_name): logfile = logfile_tmp.name logfile_tmp.close() else: - logfile = salt.utils.path_join( + logfile = salt.utils.path.join( '/var/log', '{0}.log'.format(exe_name) ) @@ -51,7 +52,7 @@ def _default_logfile(exe_name): return logfile -@decorators.which('chef-client') +@salt.utils.decorators.path.which('chef-client') def client(whyrun=False, localmode=False, logfile=None, @@ -140,7 +141,7 @@ def client(whyrun=False, return _exec_cmd(*args, **kwargs) -@decorators.which('chef-solo') +@salt.utils.decorators.path.which('chef-solo') def solo(whyrun=False, logfile=None, **kwargs): diff --git a/salt/modules/chocolatey.py b/salt/modules/chocolatey.py index 87895af323..1226505f47 100644 --- a/salt/modules/chocolatey.py +++ b/salt/modules/chocolatey.py @@ -15,6 +15,7 @@ import tempfile # Import salt libs import salt.utils +import salt.utils.platform from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import CommandExecutionError, CommandNotFoundError, \ SaltInvocationError @@ -36,7 +37,7 @@ def __virtual__(): for simulating UAC forces a GUI prompt, and is not compatible with salt-minion running as SYSTEM. ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return (False, 'Cannot load module chocolatey: Chocolatey requires ' 'Windows') diff --git a/salt/modules/chronos.py b/salt/modules/chronos.py index 6b4e53c337..066e0b80ca 100644 --- a/salt/modules/chronos.py +++ b/salt/modules/chronos.py @@ -10,8 +10,8 @@ from __future__ import absolute_import import json import logging -import salt.utils import salt.utils.http +import salt.utils.platform from salt.exceptions import get_error_message @@ -21,7 +21,7 @@ log = logging.getLogger(__file__) def __virtual__(): # only valid in proxy minions for now - return salt.utils.is_proxy() and 'proxy' in __opts__ + return salt.utils.platform.is_proxy() and 'proxy' in __opts__ def _base_url(): diff --git a/salt/modules/cisconso.py b/salt/modules/cisconso.py index a6d1f2b0d9..6b83086714 100644 --- a/salt/modules/cisconso.py +++ b/salt/modules/cisconso.py @@ -9,15 +9,15 @@ for :mod:`salt.proxy.cisconso`. ''' from __future__ import absolute_import -import salt.utils -import salt.ext.six as six +import salt.utils.platform +from salt.ext import six __proxyenabled__ = ['cisconso'] __virtualname__ = 'cisconso' def __virtual__(): - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return __virtualname__ return (False, 'The cisconso execution module failed to load: ' 'only available on proxy minions.') diff --git a/salt/modules/cloud.py b/salt/modules/cloud.py index 062386b72b..796da4c622 100644 --- a/salt/modules/cloud.py +++ b/salt/modules/cloud.py @@ -21,7 +21,7 @@ import salt.utils from salt.exceptions import SaltCloudConfigError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index a864e8a7be..862b1d4b70 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -25,13 +25,18 @@ import tempfile # Import salt libs import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.utils.powershell -import salt.utils.timed_subprocess -import salt.grains.extra -import salt.ext.six as six -from salt.utils import vt +import salt.utils.stringutils import salt.utils.templates +import salt.utils.timed_subprocess +import salt.utils.versions +import salt.utils.vt +import salt.grains.extra +from salt.ext import six from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \ SaltInvocationError from salt.log import LOG_LEVELS @@ -45,7 +50,7 @@ try: except ImportError: pass -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): from salt.utils.win_runas import runas as win_runas HAS_WIN_RUNAS = True else: @@ -294,6 +299,9 @@ def _run(cmd, if runas is None and '__context__' in globals(): runas = __context__.get('runas') + if password is None and '__context__' in globals(): + password = __context__.get('runas_password') + # Set the default working directory to the home directory of the user # salt-minion is running as. Defaults to home directory of user under which # the minion is running. @@ -306,18 +314,18 @@ def _run(cmd, # the euid might not have access to it. See issue #1844 if not os.access(cwd, os.R_OK): cwd = '/' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): cwd = os.path.abspath(os.sep) else: # Handle edge cases where numeric/other input is entered, and would be # yaml-ified into non-string types - cwd = str(cwd) + cwd = six.text_type(cwd) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): if not os.path.isfile(shell) or not os.access(shell, os.X_OK): msg = 'The shell {0} is not available'.format(shell) raise CommandExecutionError(msg) - if salt.utils.is_windows() and use_vt: # Memozation so not much overhead + if salt.utils.platform.is_windows() and use_vt: # Memozation so not much overhead raise CommandExecutionError('VT not available on windows') if shell.lower().strip() == 'powershell': @@ -362,8 +370,8 @@ def _run(cmd, def _get_stripped(cmd): # Return stripped command string copies to improve logging. if isinstance(cmd, list): - return [x.strip() if isinstance(x, str) else x for x in cmd] - elif isinstance(cmd, str): + return [x.strip() if isinstance(x, six.string_types) else x for x in cmd] + elif isinstance(cmd, six.string_types): return cmd.strip() else: return cmd @@ -373,7 +381,7 @@ def _run(cmd, # requested. The command output is what will be controlled by the # 'loglevel' parameter. msg = ( - 'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format( + u'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format( '\'' if not isinstance(cmd, list) else '', _get_stripped(cmd), 'as user \'{0}\' '.format(runas) if runas else '', @@ -384,7 +392,7 @@ def _run(cmd, ) log.info(log_callback(msg)) - if runas and salt.utils.is_windows(): + if runas and salt.utils.platform.is_windows(): if not password: msg = 'password is a required argument for runas on Windows' raise CommandExecutionError(msg) @@ -394,7 +402,7 @@ def _run(cmd, raise CommandExecutionError(msg) if not isinstance(cmd, list): - cmd = salt.utils.shlex_split(cmd, posix=False) + cmd = salt.utils.args.shlex_split(cmd, posix=False) cmd = ' '.join(cmd) @@ -457,7 +465,7 @@ def _run(cmd, ) if reset_system_locale is True: - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): # Default to C! # Salt only knows how to parse English words # Don't override if the user has passed LC_ALL @@ -520,7 +528,7 @@ def _run(cmd, runas, _umask) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): # close_fds is not supported on Windows platforms if you redirect # stdin/stdout/stderr if kwargs['shell'] is True: @@ -535,9 +543,9 @@ def _run(cmd, if python_shell is not True and not isinstance(cmd, list): posix = True - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): posix = False - cmd = salt.utils.shlex_split(cmd, posix=posix) + cmd = salt.utils.args.shlex_split(cmd, posix=posix) if not use_vt: # This is where the magic happens try: @@ -574,17 +582,21 @@ def _run(cmd, ret['retcode'] = 1 return ret - out, err = proc.stdout, proc.stderr - if err is None: - # Will happen if redirect_stderr is True, since stderr was sent to - # stdout. - err = '' + try: + out = proc.stdout.decode(__salt_system_encoding__) + except AttributeError: + out = u'' + + try: + err = proc.stderr.decode(__salt_system_encoding__) + except AttributeError: + err = u'' if rstrip: if out is not None: - out = salt.utils.to_str(out).rstrip() + out = out.rstrip() if err is not None: - err = salt.utils.to_str(err).rstrip() + err = err.rstrip() ret['pid'] = proc.process.pid ret['retcode'] = proc.process.returncode ret['stdout'] = out @@ -603,7 +615,7 @@ def _run(cmd, else: will_timeout = -1 try: - proc = vt.Terminal(cmd, + proc = salt.utils.vt.Terminal(cmd, shell=True, log_stdout=True, log_stderr=True, @@ -642,7 +654,7 @@ def _run(cmd, ret['stderr'] = 'SALT: User break\n{0}'.format(stderr) ret['retcode'] = 1 break - except vt.TerminalException as exc: + except salt.utils.vt.TerminalException as exc: log.error( 'VT: {0}'.format(exc), exc_info_on_loglevel=logging.DEBUG) @@ -770,6 +782,7 @@ def run(cmd, bg=False, password=None, encoded_cmd=False, + raise_err=False, **kwargs): r''' Execute the passed command and return the output as a string @@ -873,6 +886,10 @@ def run(cmd, :param bool encoded_cmd: Specify if the supplied command is encoded. Only applies to shell 'powershell'. + :param bool raise_err: Specifies whether to raise a CommandExecutionError. + If False, the error will be logged, but no exception will be raised. + Default is False. + .. warning:: This function does not process commands through a shell unless the python_shell flag is set to True. This means that any @@ -962,7 +979,9 @@ def run(cmd, ) ) log.error(log_callback(msg)) - log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout']))) + if raise_err: + raise CommandExecutionError(log_callback(ret['stdout'])) + log.log(lvl, u'output: %s', log_callback(ret['stdout'])) return ret['stdout'] @@ -1720,9 +1739,9 @@ def run_all(cmd, ) log.error(log_callback(msg)) if ret['stdout']: - log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout']))) + log.log(lvl, u'stdout: {0}'.format(log_callback(ret['stdout']))) if ret['stderr']: - log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr']))) + log.log(lvl, u'stderr: {0}'.format(log_callback(ret['stderr']))) if ret['retcode']: log.log(lvl, 'retcode: {0}'.format(ret['retcode'])) return ret @@ -2101,7 +2120,7 @@ def script(source, ) if '__env__' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'__env__\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2109,7 +2128,7 @@ def script(source, ) kwargs.pop('__env__') - if salt.utils.is_windows() and runas and cwd is None: + if salt.utils.platform.is_windows() and runas and cwd is None: cwd = tempfile.mkdtemp(dir=__opts__['cachedir']) __salt__['win_dacl.add_ace']( cwd, 'File', runas, 'READ&EXECUTE', 'ALLOW', @@ -2127,7 +2146,7 @@ def script(source, saltenv, **kwargs) if not fn_: - if salt.utils.is_windows() and runas: + if salt.utils.platform.is_windows() and runas: _cleanup_tempfile(cwd) else: _cleanup_tempfile(path) @@ -2139,7 +2158,7 @@ def script(source, else: fn_ = __salt__['cp.cache_file'](source, saltenv) if not fn_: - if salt.utils.is_windows() and runas: + if salt.utils.platform.is_windows() and runas: _cleanup_tempfile(cwd) else: _cleanup_tempfile(path) @@ -2149,7 +2168,7 @@ def script(source, 'stderr': '', 'cache_error': True} shutil.copyfile(fn_, path) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): os.chmod(path, 320) os.chown(path, __salt__['file.user_to_uid'](runas), -1) ret = _run(path + ' ' + str(args) if args else path, @@ -2169,7 +2188,7 @@ def script(source, bg=bg, password=password, **kwargs) - if salt.utils.is_windows() and runas: + if salt.utils.platform.is_windows() and runas: _cleanup_tempfile(cwd) else: _cleanup_tempfile(path) @@ -2317,7 +2336,7 @@ def script_retcode(source, salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' if '__env__' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'__env__\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2355,7 +2374,7 @@ def which(cmd): salt '*' cmd.which cat ''' - return salt.utils.which(cmd) + return salt.utils.path.which(cmd) def which_bin(cmds): @@ -2368,7 +2387,7 @@ def which_bin(cmds): salt '*' cmd.which_bin '[pip2, pip, pip-python]' ''' - return salt.utils.which_bin(cmds) + return salt.utils.path.which_bin(cmds) def has_exec(cmd): @@ -2451,7 +2470,7 @@ def tty(device, echo=''): return {'Error': 'The specified device is not a valid TTY'} try: with salt.utils.files.fopen(teletype, 'wb') as tty_device: - tty_device.write(salt.utils.to_bytes(echo)) + tty_device.write(salt.utils.stringutils.to_bytes(echo)) return { 'Success': 'Message was successfully echoed to {0}'.format(teletype) } @@ -2656,7 +2675,7 @@ def _is_valid_shell(shell): Attempts to search for valid shells on a system and see if a given shell is in the list ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return True # Don't even try this for Windows shells = '/etc/shells' available_shells = [] @@ -2767,7 +2786,7 @@ def shell_info(shell, list_modules=False): } # Ensure ret['installed'] always as a value of True, False or None (not sure) ret = {'installed': False} - if salt.utils.is_windows() and shell == 'powershell': + if salt.utils.platform.is_windows() and shell == 'powershell': pw_keys = __salt__['reg.list_keys']( 'HKEY_LOCAL_MACHINE', 'Software\\Microsoft\\PowerShell') @@ -2831,7 +2850,7 @@ def shell_info(shell, list_modules=False): # We need to assume ports of unix shells to windows will look after # themselves in setting HOME as they do it in many different ways newenv = os.environ - if ('HOME' not in newenv) and (not salt.utils.is_windows()): + if ('HOME' not in newenv) and (not salt.utils.platform.is_windows()): newenv['HOME'] = os.path.expanduser('~') log.debug('HOME environment set to {0}'.format(newenv['HOME'])) try: diff --git a/salt/modules/composer.py b/salt/modules/composer.py index f880dbb18c..a7a38f28bc 100644 --- a/salt/modules/composer.py +++ b/salt/modules/composer.py @@ -9,7 +9,8 @@ import logging import os.path # Import salt libs -import salt.utils +import salt.utils.args +import salt.utils.path from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, @@ -35,7 +36,7 @@ def _valid_composer(composer): ''' Validate the composer file is indeed there. ''' - if salt.utils.which(composer): + if salt.utils.path.which(composer): return True return False @@ -152,7 +153,7 @@ def _run_composer(action, cmd = [composer, action, '--no-interaction', '--no-ansi'] if extra_flags is not None: - cmd.extend(salt.utils.shlex_split(extra_flags)) + cmd.extend(salt.utils.args.shlex_split(extra_flags)) # If php is set, prepend it if php is not None: diff --git a/salt/modules/config.py b/salt/modules/config.py index 9916950472..e7b1445a8d 100644 --- a/salt/modules/config.py +++ b/salt/modules/config.py @@ -13,6 +13,7 @@ import logging # Import salt libs import salt.config import salt.utils +import salt.utils.platform try: # Gated for salt-ssh (salt.utils.cloud imports msgpack) import salt.utils.cloud @@ -25,9 +26,9 @@ import salt.syspaths as syspaths import salt.utils.sdb as sdb # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): _HOSTS_FILE = os.path.join( os.environ['SystemRoot'], 'System32', 'drivers', 'etc', 'hosts') else: @@ -176,14 +177,14 @@ def merge(value, if not omit_opts: if value in __opts__: ret = __opts__[value] - if isinstance(ret, str): + if isinstance(ret, six.string_types): return ret if not omit_master: if value in __pillar__.get('master', {}): tmp = __pillar__['master'][value] if ret is None: ret = tmp - if isinstance(ret, str): + if isinstance(ret, six.string_types): return ret elif isinstance(ret, dict) and isinstance(tmp, dict): tmp.update(ret) @@ -196,7 +197,7 @@ def merge(value, tmp = __pillar__[value] if ret is None: ret = tmp - if isinstance(ret, str): + if isinstance(ret, six.string_types): return ret elif isinstance(ret, dict) and isinstance(tmp, dict): tmp.update(ret) diff --git a/salt/modules/consul.py b/salt/modules/consul.py index 049831b082..68a4bd3288 100644 --- a/salt/modules/consul.py +++ b/salt/modules/consul.py @@ -205,7 +205,11 @@ def get(consul_url=None, key=None, recurse=False, decode=False, raw=False): if ret['res']: if decode: for item in ret['data']: - item['Value'] = base64.b64decode(item['Value']) + if item['Value'] != None: + item['Value'] = base64.b64decode(item['Value']) + else: + item['Value'] = "" + return ret @@ -1978,6 +1982,8 @@ def acl_create(consul_url=None, **kwargs): Create a new ACL token. :param consul_url: The Consul server URL. + :param id: Unique identifier for the ACL to create + leave it blank to let consul server generate one :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the @@ -2008,6 +2014,9 @@ def acl_create(consul_url=None, **kwargs): else: raise SaltInvocationError('Required argument "name" is missing.') + if 'id' in kwargs: + data['ID'] = kwargs['id'] + if 'type' in kwargs: data['Type'] = kwargs['type'] @@ -2126,7 +2135,7 @@ def acl_delete(consul_url=None, **kwargs): ret['res'] = False return ret - function = 'acl/delete/{0}'.format(kwargs['id']) + function = 'acl/destroy/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, data=data, method='PUT', diff --git a/salt/modules/container_resource.py b/salt/modules/container_resource.py index 4148a90659..b3ddc46dfa 100644 --- a/salt/modules/container_resource.py +++ b/salt/modules/container_resource.py @@ -21,9 +21,10 @@ import time import traceback # Import salt libs -import salt.utils +import salt.utils.args +import salt.utils.path +import salt.utils.vt from salt.exceptions import CommandExecutionError, SaltInvocationError -from salt.utils import vt log = logging.getLogger(__name__) @@ -54,12 +55,12 @@ def _validate(wrapped): 'Invalid command execution driver. Valid drivers are: {0}' .format(', '.join(valid_driver[container_type])) ) - if exec_driver == 'lxc-attach' and not salt.utils.which('lxc-attach'): + if exec_driver == 'lxc-attach' and not salt.utils.path.which('lxc-attach'): raise SaltInvocationError( 'The \'lxc-attach\' execution driver has been chosen, but ' 'lxc-attach is not available. LXC may not be installed.' ) - return wrapped(*args, **salt.utils.clean_kwargs(**kwargs)) + return wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs)) return wrapper @@ -220,7 +221,7 @@ def run(name, ignore_retcode=ignore_retcode) else: stdout, stderr = '', '' - proc = vt.Terminal( + proc = salt.utils.vt.Terminal( full_cmd, shell=python_shell, log_stdin_level='quiet' if output_loglevel == 'quiet' else 'info', @@ -251,7 +252,7 @@ def run(name, 'pid': 2, 'stdout': stdout, 'stderr': stderr} - except vt.TerminalException: + except salt.utils.vt.TerminalException: trace = traceback.format_exc() log.error(trace) ret = stdout if output is None \ diff --git a/salt/modules/cp.py b/salt/modules/cp.py index b525b25c53..2b6ecae96b 100644 --- a/salt/modules/cp.py +++ b/salt/modules/cp.py @@ -17,6 +17,7 @@ import salt.fileclient import salt.utils import salt.utils.files import salt.utils.gzip_util +import salt.utils.locales import salt.utils.templates import salt.utils.url import salt.crypt @@ -25,7 +26,7 @@ from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -428,7 +429,11 @@ def cache_file(path, saltenv='base'): It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' - contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) + path = salt.utils.locales.sdecode(path) + saltenv = salt.utils.locales.sdecode(saltenv) + + contextkey = u'{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) + path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: @@ -454,9 +459,8 @@ def cache_file(path, saltenv='base'): result = _client().cache_file(path, saltenv) if not result: log.error( - 'Unable to cache file \'{0}\' from saltenv \'{1}\'.'.format( - path, saltenv - ) + u'Unable to cache file \'%s\' from saltenv \'%s\'.', + path, saltenv ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent diff --git a/salt/modules/cpan.py b/salt/modules/cpan.py index 5dff7c307f..509ce78913 100644 --- a/salt/modules/cpan.py +++ b/salt/modules/cpan.py @@ -12,8 +12,8 @@ import os.path import logging # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -27,7 +27,7 @@ def __virtual__(): ''' Only work on supported POSIX-like systems ''' - if salt.utils.which('cpan'): + if salt.utils.path.which('cpan'): return True return (False, 'Unable to locate cpan. Make sure it is installed and in the PATH.') diff --git a/salt/modules/cron.py b/salt/modules/cron.py index 49a6c71f10..c387da3d63 100644 --- a/salt/modules/cron.py +++ b/salt/modules/cron.py @@ -16,6 +16,8 @@ import random # Import salt libs import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.stringutils from salt.ext.six.moves import range from salt.utils.locales import sdecode @@ -25,7 +27,7 @@ SALT_CRON_NO_IDENTIFIER = 'NO ID SET' def __virtual__(): - if salt.utils.which('crontab'): + if salt.utils.path.which('crontab'): return True else: return (False, 'Cannot load cron module: crontab command not found') @@ -33,7 +35,7 @@ def __virtual__(): def _encode(string): try: - string = salt.utils.to_str(string) + string = salt.utils.stringutils.to_str(string) except TypeError: if not string: string = '' diff --git a/salt/modules/cryptdev.py b/salt/modules/cryptdev.py index 605f2f5058..3063bb1936 100644 --- a/salt/modules/cryptdev.py +++ b/salt/modules/cryptdev.py @@ -12,12 +12,12 @@ import os import re # Import salt libraries -import salt.utils import salt.utils.files +import salt.utils.platform from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import filter, zip # pylint: disable=import-error,redefined-builtin # Set up logger @@ -31,7 +31,7 @@ def __virtual__(): ''' Only load on POSIX-like systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The cryptdev module cannot be loaded: not a POSIX-like system') return True diff --git a/salt/modules/csf.py b/salt/modules/csf.py index 353462c6a1..264a159cde 100644 --- a/salt/modules/csf.py +++ b/salt/modules/csf.py @@ -12,8 +12,8 @@ from __future__ import absolute_import import re # Import Salt Libs +import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError -import salt.utils from salt.ext.six.moves import map @@ -21,7 +21,7 @@ def __virtual__(): ''' Only load if csf exists on the system ''' - if salt.utils.which('csf') is None: + if salt.utils.path.which('csf') is None: return (False, 'The csf execution module cannot be loaded: csf unavailable.') else: @@ -77,7 +77,7 @@ def __csf_cmd(cmd): ''' Execute csf command ''' - csf_cmd = '{0} {1}'.format(salt.utils.which('csf'), cmd) + csf_cmd = '{0} {1}'.format(salt.utils.path.which('csf'), cmd) out = __salt__['cmd.run_all'](csf_cmd) if out['retcode'] != 0: diff --git a/salt/modules/cyg.py b/salt/modules/cyg.py index 3bce460079..2174be017f 100644 --- a/salt/modules/cyg.py +++ b/salt/modules/cyg.py @@ -16,10 +16,13 @@ import bz2 from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: disable=no-name-in-module,import-error # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.platform from salt.exceptions import SaltInvocationError +# Import 3rd-party libs +from salt.ext import six + LOG = logging.getLogger(__name__) @@ -31,8 +34,10 @@ __virtualname__ = 'cyg' def __virtual__(): - """Only works on Windows systems.""" - if salt.utils.is_windows(): + ''' + Only works on Windows systems + ''' + if salt.utils.platform.is_windows(): return __virtualname__ return (False, 'Module cyg: module only works on Windows systems.') @@ -306,7 +311,7 @@ def list_(package='', cyg_arch='x86_64'): args = ' '.join(['-c', '-d', package]) stdout = _cygcheck(args, cyg_arch=cyg_arch) lines = [] - if isinstance(stdout, str): + if isinstance(stdout, six.string_types): lines = str(stdout).splitlines() for line in lines: match = re.match(r'^([^ ]+) *([^ ]+)', line) diff --git a/salt/modules/daemontools.py b/salt/modules/daemontools.py index 367bb6cee3..326e97086e 100644 --- a/salt/modules/daemontools.py +++ b/salt/modules/daemontools.py @@ -21,7 +21,7 @@ import os.path import re # Import salt libs -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError # Function alias to not shadow built-ins. @@ -48,7 +48,7 @@ for service_dir in VALID_SERVICE_DIRS: def __virtual__(): # Ensure that daemontools is installed properly. BINS = frozenset(('svc', 'supervise', 'svok')) - if all(salt.utils.which(b) for b in BINS) and SERVICE_DIR: + if all(salt.utils.path.which(b) for b in BINS) and SERVICE_DIR: return __virtualname__ return False diff --git a/salt/modules/data.py b/salt/modules/data.py index 80b7540224..548016203a 100644 --- a/salt/modules/data.py +++ b/salt/modules/data.py @@ -15,7 +15,7 @@ import salt.utils.files import salt.payload # Import 3rd-party lib -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/datadog_api.py b/salt/modules/datadog_api.py index 127d16264a..e59ba305ff 100644 --- a/salt/modules/datadog_api.py +++ b/salt/modules/datadog_api.py @@ -2,10 +2,7 @@ ''' An execution module that interacts with the Datadog API -Common parameters: - -scope - The scope of the request +The following parameters are required for all functions. api_key The datadog API key @@ -45,6 +42,10 @@ def _initialize_connection(api_key, app_key): ''' Initialize Datadog connection ''' + if api_key is None: + raise SaltInvocationError('api_key must be specified') + if app_key is None: + raise SaltInvocationError('app_key must be specified') options = { 'api_key': api_key, 'app_key': app_key @@ -52,31 +53,36 @@ def _initialize_connection(api_key, app_key): datadog.initialize(**options) -def schedule_downtime(scope, api_key=None, app_key=None, monitor_id=None, - start=None, end=None, message=None, recurrence=None, - timezone=None, test=False): +def schedule_downtime(scope, + api_key=None, + app_key=None, + monitor_id=None, + start=None, + end=None, + message=None, + recurrence=None, + timezone=None, + test=False): ''' Schedule downtime for a scope of monitors. - monitor_id - The ID of the monitor - start - Start time in seconds since the epoch - end - End time in seconds since the epoch - message - A message to send in a notification for this downtime - recurrence - Repeat this downtime periodically - timezone - Specify the timezone - CLI Example: .. code-block:: bash - salt-call datadog.schedule_downtime 'host:app2' stop=$(date --date='30 - minutes' +%s) app_key= api_key= + salt-call datadog.schedule_downtime 'host:app2' \\ + stop=$(date --date='30 minutes' +%s) \\ + app_key='0123456789' \\ + api_key='9876543210' + + Optional arguments + + :param monitor_id: The ID of the monitor + :param start: Start time in seconds since the epoch + :param end: End time in seconds since the epoch + :param message: A message to send in a notification for this downtime + :param recurrence: Repeat this downtime periodically + :param timezone: Specify the timezone ''' ret = {'result': False, 'response': None, @@ -114,21 +120,25 @@ def schedule_downtime(scope, api_key=None, app_key=None, monitor_id=None, return ret -def cancel_downtime(api_key=None, app_key=None, scope=None, id=None): +def cancel_downtime(api_key=None, + app_key=None, + scope=None, + id=None): ''' Cancel a downtime by id or by scope. - Either scope or id is required. - - id - The ID of the downtime - CLI Example: .. code-block:: bash - salt-call datadog.cancel_downtime scope='host:app01' api_key= - app_key=` + salt-call datadog.cancel_downtime scope='host:app01' \\ + api_key='0123456789' \\ + app_key='9876543210'` + + Arguments - Either scope or id is required. + + :param id: The downtime ID + :param scope: The downtime scope ''' if api_key is None: raise SaltInvocationError('api_key must be specified') @@ -153,9 +163,9 @@ def cancel_downtime(api_key=None, app_key=None, scope=None, id=None): 'scope': scope } response = requests.post( - 'https://app.datadoghq.com/api/v1/downtime/cancel/by_scope', - params=params - ) + 'https://app.datadoghq.com/api/v1/downtime/cancel/by_scope', + params=params + ) if response.status_code == 200: ret['result'] = True ret['response'] = response.json() @@ -168,3 +178,87 @@ def cancel_downtime(api_key=None, app_key=None, scope=None, id=None): raise SaltInvocationError('One of id or scope must be specified') return ret + + +def post_event(api_key=None, + app_key=None, + title=None, + text=None, + date_happened=None, + priority=None, + host=None, + tags=None, + alert_type=None, + aggregation_key=None, + source_type_name=None): + ''' + Post an event to the Datadog stream. + + CLI Example + + .. code-block:: bash + + salt-call datadog.post_event api_key='0123456789' \\ + app_key='9876543210' \\ + title='Salt Highstate' \\ + text="Salt highstate was run on $(salt-call grains.get id)" \\ + tags='["service:salt", "event:highstate"]' + + Required arguments + + :param title: The event title. Limited to 100 characters. + :param text: The body of the event. Limited to 4000 characters. The text + supports markdown. + + Optional arguments + + :param date_happened: POSIX timestamp of the event. + :param priority: The priority of the event ('normal' or 'low'). + :param host: Host name to associate with the event. + :param tags: A list of tags to apply to the event. + :param alert_type: "error", "warning", "info" or "success". + :param aggregation_key: An arbitrary string to use for aggregation, + max length of 100 characters. + :param source_type_name: The type of event being posted. + ''' + _initialize_connection(api_key, app_key) + if title is None: + raise SaltInvocationError('title must be specified') + if text is None: + raise SaltInvocationError('text must be specified') + if alert_type not in [None, 'error', 'warning', 'info', 'success']: + # Datadog only supports these alert types but the API doesn't return an + # error for an incorrect alert_type, so we can do it here for now. + # https://github.com/DataDog/datadogpy/issues/215 + message = ('alert_type must be one of "error", "warning", "info", or ' + '"success"') + raise SaltInvocationError(message) + + ret = {'result': False, + 'response': None, + 'comment': ''} + + try: + response = datadog.api.Event.create(title=title, + text=text, + date_happened=date_happened, + priority=priority, + host=host, + tags=tags, + alert_type=alert_type, + aggregation_key=aggregation_key, + source_type_name=source_type_name + ) + except ValueError: + comment = ('Unexpected exception in Datadog Post Event API ' + 'call. Are your keys correct?') + ret['comment'] = comment + return ret + + ret['response'] = response + if 'status' in response.keys(): + ret['result'] = True + ret['comment'] = 'Successfully sent event' + else: + ret['comment'] = 'Error in posting event.' + return ret diff --git a/salt/modules/deb_apache.py b/salt/modules/deb_apache.py index 191d8e4080..2321b9cdcc 100644 --- a/salt/modules/deb_apache.py +++ b/salt/modules/deb_apache.py @@ -13,7 +13,8 @@ import os import logging # Import salt libs -import salt.utils +import salt.utils.decorators.path +import salt.utils.path log = logging.getLogger(__name__) @@ -27,7 +28,7 @@ def __virtual__(): Only load the module if apache is installed ''' cmd = _detect_os() - if salt.utils.which(cmd) and __grains__['os_family'] == 'Debian': + if salt.utils.path.which(cmd) and __grains__['os_family'] == 'Debian': return __virtualname__ return (False, 'apache execution module not loaded: apache not installed.') @@ -253,7 +254,7 @@ def check_conf_enabled(conf): return os.path.islink('/etc/apache2/conf-enabled/{0}'.format(conf_file)) -@salt.utils.decorators.which('a2enconf') +@salt.utils.decorators.path.which('a2enconf') def a2enconf(conf): ''' .. versionadded:: 2016.3.0 @@ -290,7 +291,7 @@ def a2enconf(conf): return ret -@salt.utils.decorators.which('a2disconf') +@salt.utils.decorators.path.which('a2disconf') def a2disconf(conf): ''' .. versionadded:: 2016.3.0 diff --git a/salt/modules/deb_postgres.py b/salt/modules/deb_postgres.py index 0f6976b6b3..d77ea9d236 100644 --- a/salt/modules/deb_postgres.py +++ b/salt/modules/deb_postgres.py @@ -10,7 +10,7 @@ import logging import pipes # Import salt libs -import salt.utils +import salt.utils.path # Import 3rd-party libs @@ -23,7 +23,7 @@ def __virtual__(): ''' Only load this module if the pg_createcluster bin exists ''' - if salt.utils.which('pg_createcluster'): + if salt.utils.path.which('pg_createcluster'): return __virtualname__ return (False, 'postgres execution module not loaded: pg_createcluste command not found.') @@ -52,7 +52,7 @@ def cluster_create(version, salt '*' postgres.cluster_create '9.3' locale='fr_FR' ''' - cmd = [salt.utils.which('pg_createcluster')] + cmd = [salt.utils.path.which('pg_createcluster')] if port: cmd += ['--port', str(port)] if locale: @@ -83,7 +83,7 @@ def cluster_list(verbose=False): salt '*' postgres.cluster_list verbose=True ''' - cmd = [salt.utils.which('pg_lsclusters'), '--no-header'] + cmd = [salt.utils.path.which('pg_lsclusters'), '--no-header'] ret = __salt__['cmd.run_all'](' '.join([pipes.quote(c) for c in cmd])) if ret.get('retcode', 0) != 0: log.error('Error listing clusters') @@ -127,7 +127,7 @@ def cluster_remove(version, salt '*' postgres.cluster_remove '9.3' 'main' stop=True ''' - cmd = [salt.utils.which('pg_dropcluster')] + cmd = [salt.utils.path.which('pg_dropcluster')] if stop: cmd += ['--stop'] cmd += [version, name] diff --git a/salt/modules/debbuild.py b/salt/modules/debbuild.py index a41cf09c50..37c7da1123 100644 --- a/salt/modules/debbuild.py +++ b/salt/modules/debbuild.py @@ -22,11 +22,14 @@ import time import traceback # Import salt libs -from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error -from salt.exceptions import SaltInvocationError, CommandExecutionError -import salt.utils import salt.utils.files +import salt.utils.path import salt.utils.vt +from salt.exceptions import SaltInvocationError, CommandExecutionError + +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error HAS_LIBS = False @@ -51,7 +54,7 @@ def __virtual__(): missing_util = False utils_reqd = ['gpg', 'debuild', 'pbuilder', 'reprepro'] for named_util in utils_reqd: - if not salt.utils.which(named_util): + if not salt.utils.path.which(named_util): missing_util = True break if HAS_LIBS and not missing_util: @@ -66,7 +69,7 @@ def _check_repo_sign_utils_support(name): ''' Check for specified command name in search path ''' - if salt.utils.which(name): + if salt.utils.path.which(name): return True else: raise CommandExecutionError( @@ -314,7 +317,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base spec_pathfile = _get_spec(tree_base, spec, template, saltenv) # build salt equivalents from scratch - if isinstance(sources, str): + if isinstance(sources, six.string_types): sources = sources.split(',') for src in sources: _get_src(tree_base, src, saltenv) @@ -633,7 +636,7 @@ def make_repo(repodir, if not older_gnupg: _check_repo_sign_utils_support('gpg2') - cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.which('gpg2')) + cmd = '{0} --with-keygrip --list-secret-keys'.format(salt.utils.path.which('gpg2')) local_keys2_keygrip = __salt__['cmd.run'](cmd, runas=runas) local_keys2 = iter(local_keys2_keygrip.splitlines()) try: diff --git a/salt/modules/debconfmod.py b/salt/modules/debconfmod.py index 98fd1aaaa4..2185b2af21 100644 --- a/salt/modules/debconfmod.py +++ b/salt/modules/debconfmod.py @@ -11,7 +11,9 @@ import re # Import salt libs import salt.utils +import salt.utils.path import salt.utils.files +import salt.utils.versions log = logging.getLogger(__name__) @@ -32,7 +34,7 @@ def __virtual__(): return (False, 'The debconfmod module could not be loaded: ' 'unsupported OS family') - if salt.utils.which('debconf-get-selections') is None: + if salt.utils.path.which('debconf-get-selections') is None: return (False, 'The debconfmod module could not be loaded: ' 'debconf-utils is not installed.') @@ -184,7 +186,7 @@ def set_file(path, saltenv='base', **kwargs): salt '*' debconf.set_file salt://pathto/pkg.selections ''' if '__env__' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'__env__\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/modules/debian_ip.py b/salt/modules/debian_ip.py index 59ccc7309e..60b60f893c 100644 --- a/salt/modules/debian_ip.py +++ b/salt/modules/debian_ip.py @@ -19,13 +19,13 @@ import time # Import third party libs import jinja2 import jinja2.exceptions -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO # pylint: disable=import-error,no-name-in-module # Import salt libs -import salt.utils import salt.utils.files import salt.utils.odict +import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net @@ -221,7 +221,7 @@ def _read_file(path): ''' try: with salt.utils.files.flopen(path, 'rb') as contents: - return [salt.utils.to_str(line) for line in contents.readlines()] + return [salt.utils.stringutils.to_str(line) for line in contents.readlines()] except (OSError, IOError): return '' diff --git a/salt/modules/defaults.py b/salt/modules/defaults.py index 9aead53611..8056aef431 100644 --- a/salt/modules/defaults.py +++ b/salt/modules/defaults.py @@ -7,11 +7,10 @@ import yaml import salt.fileclient import salt.utils +import salt.utils.dictupdate as dictupdate import salt.utils.files import salt.utils.url -from salt.utils import dictupdate - __virtualname__ = 'defaults' diff --git a/salt/modules/dig.py b/salt/modules/dig.py index 397bf6538b..e30e9d57c4 100644 --- a/salt/modules/dig.py +++ b/salt/modules/dig.py @@ -6,8 +6,8 @@ The 'dig' command line tool must be installed in order to use this module. from __future__ import absolute_import # Import salt libs -import salt.utils import salt.utils.network +import salt.utils.path # Import python libs import logging @@ -22,7 +22,7 @@ def __virtual__(): ''' Only load module if dig binary is present ''' - if salt.utils.which('dig'): + if salt.utils.path.which('dig'): return __virtualname__ return (False, 'The dig execution module cannot be loaded: ' 'the dig binary is not in the path.') diff --git a/salt/modules/disk.py b/salt/modules/disk.py index 968a9e9b7a..1e9ba7d95e 100644 --- a/salt/modules/disk.py +++ b/salt/modules/disk.py @@ -17,22 +17,23 @@ from salt.ext import six from salt.ext.six.moves import zip # Import salt libs -import salt.utils -import salt.utils.decorators as decorators -from salt.utils.decorators import depends +import salt.utils.decorators +import salt.utils.decorators.path +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) -HAS_HDPARM = salt.utils.which('hdparm') is not None -HAS_IOSTAT = salt.utils.which('iostat') is not None +HAS_HDPARM = salt.utils.path.which('hdparm') is not None +HAS_IOSTAT = salt.utils.path.which('iostat') is not None def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return False, 'This module doesn\'t work on Windows.' return True @@ -253,7 +254,7 @@ def percent(args=None): return ret -@decorators.which('blkid') +@salt.utils.decorators.path.which('blkid') def blkid(device=None): ''' Return block device attributes: UUID, LABEL, etc. This function only works @@ -402,8 +403,8 @@ def resize2fs(device): return True -@decorators.which('sync') -@decorators.which('mkfs') +@salt.utils.decorators.path.which('sync') +@salt.utils.decorators.path.which('mkfs') def format_(device, fs_type='ext4', inode_size=None, @@ -470,7 +471,7 @@ def format_(device, return all([mkfs_success, sync_success]) -@decorators.which_bin(['lsblk', 'df']) +@salt.utils.decorators.path.which_bin(['lsblk', 'df']) def fstype(device): ''' Return the filesystem name of the specified device @@ -486,14 +487,14 @@ def fstype(device): salt '*' disk.fstype /dev/sdX1 ''' - if salt.utils.which('lsblk'): + if salt.utils.path.which('lsblk'): lsblk_out = __salt__['cmd.run']('lsblk -o fstype {0}'.format(device)).splitlines() if len(lsblk_out) > 1: fs_type = lsblk_out[1].strip() if fs_type: return fs_type - if salt.utils.which('df'): + if salt.utils.path.which('df'): # the fstype was not set on the block device, so inspect the filesystem # itself for its type if __grains__['kernel'] == 'AIX' and os.path.isfile('/usr/sysv/bin/df'): @@ -512,7 +513,7 @@ def fstype(device): return '' -@depends(HAS_HDPARM) +@salt.utils.decorators.depends(HAS_HDPARM) def _hdparm(args, failhard=True): ''' Execute hdparm @@ -531,7 +532,7 @@ def _hdparm(args, failhard=True): return result['stdout'] -@depends(HAS_HDPARM) +@salt.utils.decorators.depends(HAS_HDPARM) def hdparms(disks, args=None): ''' Retrieve all info's for all disks @@ -608,7 +609,7 @@ def hdparms(disks, args=None): return out -@depends(HAS_HDPARM) +@salt.utils.decorators.depends(HAS_HDPARM) def hpa(disks, size=None): ''' Get/set Host Protected Area settings @@ -737,7 +738,7 @@ def smart_attributes(dev, attributes=None, values=None): return smart_attr -@depends(HAS_IOSTAT) +@salt.utils.decorators.depends(HAS_IOSTAT) def iostat(interval=1, count=5, disks=None): ''' Gather and return (averaged) IO stats. @@ -753,11 +754,11 @@ def iostat(interval=1, count=5, disks=None): salt '*' disk.iostat 1 5 disks=sda ''' - if salt.utils.is_linux(): + if salt.utils.platform.is_linux(): return _iostat_linux(interval, count, disks) - elif salt.utils.is_freebsd(): + elif salt.utils.platform.is_freebsd(): return _iostat_fbsd(interval, count, disks) - elif salt.utils.is_aix(): + elif salt.utils.platform.is_aix(): return _iostat_aix(interval, count, disks) diff --git a/salt/modules/djangomod.py b/salt/modules/djangomod.py index 76cb536d91..e74d263dc4 100644 --- a/salt/modules/djangomod.py +++ b/salt/modules/djangomod.py @@ -9,11 +9,11 @@ from __future__ import absolute_import import os # Import Salt libs -import salt.utils +import salt.utils.path import salt.exceptions # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Define the module's virtual name __virtualname__ = 'django' @@ -28,9 +28,9 @@ def _get_django_admin(bin_env): Return the django admin ''' if not bin_env: - if salt.utils.which('django-admin.py'): + if salt.utils.path.which('django-admin.py'): return 'django-admin.py' - elif salt.utils.which('django-admin'): + elif salt.utils.path.which('django-admin'): return 'django-admin' else: raise salt.exceptions.CommandExecutionError( diff --git a/salt/modules/dnsmasq.py b/salt/modules/dnsmasq.py index ebdac2524b..b62fcce90b 100644 --- a/salt/modules/dnsmasq.py +++ b/salt/modules/dnsmasq.py @@ -9,10 +9,13 @@ import logging import os # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.platform from salt.exceptions import CommandExecutionError +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -20,8 +23,12 @@ def __virtual__(): ''' Only work on POSIX-like systems. ''' - if salt.utils.is_windows(): - return (False, 'dnsmasq execution module cannot be loaded: only works on non-Windows systems.') + if salt.utils.platform.is_windows(): + return ( + False, + 'dnsmasq execution module cannot be loaded: only works on ' + 'non-Windows systems.' + ) return True @@ -110,7 +117,7 @@ def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs): ret_kwargs[key] = kwargs[key] if key in dnsopts: - if isinstance(dnsopts[key], str): + if isinstance(dnsopts[key], six.string_types): for config in includes: __salt__['file.sed'](path=config, before='^{0}=.*'.format(key), @@ -173,7 +180,7 @@ def _parse_dnamasq(filename): if '=' in line: comps = line.split('=') if comps[0] in fileopts: - if isinstance(fileopts[comps[0]], str): + if isinstance(fileopts[comps[0]], six.string_types): temp = fileopts[comps[0]] fileopts[comps[0]] = [temp] fileopts[comps[0]].append(comps[1].strip()) diff --git a/salt/modules/dnsutil.py b/salt/modules/dnsutil.py index 9018060b75..f05e15f903 100644 --- a/salt/modules/dnsutil.py +++ b/salt/modules/dnsutil.py @@ -1,6 +1,10 @@ # -*- coding: utf-8 -*- ''' -Compendium of generic DNS utilities +Compendium of generic DNS utilities. + +.. note:: + + Some functions in the ``dnsutil`` execution module depend on ``dig``. ''' # Import python libs from __future__ import absolute_import @@ -9,8 +13,8 @@ import socket import time # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -221,7 +225,7 @@ def _has_dig(): because they are also DNS utilities, a compatibility layer exists. This function helps add that layer. ''' - return salt.utils.which('dig') is not None + return salt.utils.path.which('dig') is not None def check_ip(ip_addr): @@ -232,7 +236,7 @@ def check_ip(ip_addr): .. code-block:: bash - salt ns1 dig.check_ip 127.0.0.1 + salt ns1 dnsutil.check_ip 127.0.0.1 ''' if _has_dig(): return __salt__['dig.check_ip'](ip_addr) @@ -242,7 +246,7 @@ def check_ip(ip_addr): def A(host, nameserver=None): ''' - Return the A record(s) for `host`. + Return the A record(s) for ``host``. Always returns a list. @@ -267,7 +271,7 @@ def A(host, nameserver=None): def AAAA(host, nameserver=None): ''' - Return the AAAA record(s) for `host`. + Return the AAAA record(s) for ``host``. Always returns a list. @@ -302,7 +306,7 @@ def NS(domain, resolve=True, nameserver=None): .. code-block:: bash - salt ns1 dig.NS google.com + salt ns1 dnsutil.NS google.com ''' if _has_dig(): @@ -323,7 +327,7 @@ def SPF(domain, record='SPF', nameserver=None): .. code-block:: bash - salt ns1 dig.SPF google.com + salt ns1 dnsutil.SPF google.com ''' if _has_dig(): return __salt__['dig.SPF'](domain, record, nameserver) @@ -346,7 +350,7 @@ def MX(domain, resolve=False, nameserver=None): .. code-block:: bash - salt ns1 dig.MX google.com + salt ns1 dnsutil.MX google.com ''' if _has_dig(): return __salt__['dig.MX'](domain, resolve, nameserver) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index fde75cb8fd..13820dd5c4 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -199,12 +199,15 @@ import subprocess # Import Salt libs from salt.exceptions import CommandExecutionError, SaltInvocationError -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin import salt.utils +import salt.utils.args import salt.utils.decorators import salt.utils.docker import salt.utils.files +import salt.utils.path +import salt.utils.stringutils import salt.utils.thin import salt.pillar import salt.exceptions @@ -230,7 +233,7 @@ except ImportError: HAS_LZMA = False # pylint: enable=import-error -HAS_NSENTER = bool(salt.utils.which('nsenter')) +HAS_NSENTER = bool(salt.utils.path.which('nsenter')) # Set up logging log = logging.getLogger(__name__) @@ -343,7 +346,7 @@ def _get_client(timeout=NOTSET, **kwargs): python_shell=False) try: docker_machine_json = \ - json.loads(salt.utils.to_str(docker_machine_json)) + json.loads(salt.utils.stringutils.to_str(docker_machine_json)) docker_machine_tls = \ docker_machine_json['HostOptions']['AuthOptions'] docker_machine_ip = docker_machine_json['Driver']['IPAddress'] @@ -389,7 +392,7 @@ def _docker_client(wrapped): ''' Ensure that the client is present ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) timeout = kwargs.pop('client_timeout', NOTSET) if 'docker.client' not in __context__ \ or not hasattr(__context__['docker.client'], 'timeout'): @@ -421,7 +424,7 @@ def _ensure_exists(wrapped): raise CommandExecutionError( 'Container \'{0}\' does not exist'.format(name) ) - return wrapped(name, *args, **salt.utils.clean_kwargs(**kwargs)) + return wrapped(name, *args, **salt.utils.args.clean_kwargs(**kwargs)) return wrapper @@ -434,7 +437,7 @@ def _refresh_mine_cache(wrapped): ''' refresh salt mine on exit. ''' - returned = wrapped(*args, **salt.utils.clean_kwargs(**kwargs)) + returned = wrapped(*args, **salt.utils.args.clean_kwargs(**kwargs)) __salt__['mine.send']('docker.ps', verbose=True, all=True, host=True) return returned return wrapper @@ -559,6 +562,21 @@ def _prep_pull(): __context__['docker._pull_status'] = [x[:12] for x in images(all=True)] +def _scrub_links(links, name): + ''' + Remove container name from HostConfig:Links values to enable comparing + container configurations correctly. + ''' + if isinstance(links, list): + ret = [] + for l in links: + ret.append(l.replace('/{0}/'.format(name), '/', 1)) + else: + ret = links + + return ret + + def _size_fmt(num): ''' Format bytes as human-readable file sizes @@ -619,7 +637,7 @@ def _client_wrapper(attr, *args, **kwargs): api_events = [] try: for event in ret: - api_events.append(json.loads(salt.utils.to_str(event))) + api_events.append(json.loads(salt.utils.stringutils.to_str(event))) except Exception as exc: raise CommandExecutionError( 'Unable to interpret API event: \'{0}\''.format(event), @@ -884,8 +902,15 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict][item] val2 = result2[conf_dict].get(item) - if val1 != val2: - ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + if item in ('OomKillDisable',): + if bool(val1) != bool(val2): + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + else: + if item == 'Links': + val1 = _scrub_links(val1, first) + val2 = _scrub_links(val2, second) + if val1 != val2: + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} # Check for optionally-present items that were in the second container # and not the first. for item in result2[conf_dict]: @@ -895,8 +920,15 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict].get(item) val2 = result2[conf_dict][item] - if val1 != val2: - ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + if item in ('OomKillDisable',): + if bool(val1) != bool(val2): + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + else: + if item == 'Links': + val1 = _scrub_links(val1, first) + val2 = _scrub_links(val2, second) + if val1 != val2: + ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} return ret @@ -978,6 +1010,10 @@ def login(*registries): cmd = ['docker', 'login', '-u', username, '-p', password] if registry.lower() != 'hub': cmd.append(registry) + log.debug( + 'Attempting to login to docker registry \'%s\' as user \'%s\'', + registry, username + ) login_cmd = __salt__['cmd.run_all']( cmd, python_shell=False, @@ -1803,7 +1839,7 @@ def create(image, generate one for you (it will be included in the return data). skip_translate - This function translates Salt CLI input into the format which + This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular @@ -2071,9 +2107,9 @@ def create(image, - ``dns_search="[foo1.domain.tld, foo2.domain.tld]"`` domainname - Set custom DNS search domains + The domain name to use for the container - Example: ``domainname=domain.tld,domain2.tld`` + Example: ``domainname=domain.tld`` entrypoint Entrypoint for the container. Either a string (e.g. ``"mycmd --arg1 @@ -2942,10 +2978,10 @@ def rm_(name, force=False, volumes=False, **kwargs): salt myminion docker.rm mycontainer salt myminion docker.rm mycontainer force=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) stop_ = kwargs.pop('stop', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if state(name) == 'running' and not (force or stop_): raise CommandExecutionError( @@ -3094,7 +3130,7 @@ def build(path=None, stream_data = [] for line in response: stream_data.extend( - json.loads(salt.utils.to_str(line), cls=DockerJSONDecoder) + json.loads(salt.utils.stringutils.to_str(line), cls=DockerJSONDecoder) ) errors = [] # Iterate through API response and collect information @@ -3812,7 +3848,6 @@ def save(name, if os.path.exists(path) and not overwrite: raise CommandExecutionError('{0} already exists'.format(path)) - compression = kwargs.get('compression') if compression is None: if path.endswith('.tar.gz') or path.endswith('.tgz'): compression = 'gzip' @@ -3918,7 +3953,7 @@ def save(name, ret['Size_Human'] = _size_fmt(ret['Size']) # Process push - if kwargs.get(push, False): + if kwargs.get('push', False): ret['Push'] = __salt__['cp.push'](path) return ret @@ -5396,7 +5431,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', salt myminion docker.sls_build imgname base=mybase mods=rails,web ''' - create_kwargs = salt.utils.clean_kwargs(**copy.deepcopy(kwargs)) + create_kwargs = salt.utils.args.clean_kwargs(**copy.deepcopy(kwargs)) for key in ('image', 'name', 'cmd', 'interactive', 'tty'): try: del create_kwargs[key] diff --git a/salt/modules/dpkg.py b/salt/modules/dpkg.py index dac43fdf02..0ca210a6e8 100644 --- a/salt/modules/dpkg.py +++ b/salt/modules/dpkg.py @@ -11,8 +11,10 @@ import re import datetime # Import salt libs -import salt.utils +import salt.utils # Can be removed once compare_dicts is moved +import salt.utils.args import salt.utils.files +import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) @@ -258,10 +260,10 @@ def _get_pkg_info(*packages, **kwargs): :param failhard: Throw an exception if no packages found. :return: ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) failhard = kwargs.pop('failhard', True) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if __grains__['os'] == 'Ubuntu' and __grains__['osrelease_info'] < (12, 4): bin_var = '${binary}' @@ -355,7 +357,7 @@ def _get_pkg_ds_avail(): :return: ''' avail = "/var/lib/dpkg/available" - if not salt.utils.which('dselect') or not os.path.exists(avail): + if not salt.utils.path.which('dselect') or not os.path.exists(avail): return dict() # Do not update with dselect, just read what is. @@ -406,10 +408,10 @@ def info(*packages, **kwargs): # However, this file is operated by dselect which has to be installed. dselect_pkg_avail = _get_pkg_ds_avail() - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) failhard = kwargs.pop('failhard', True) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) ret = dict() for pkg in _get_pkg_info(*packages, failhard=failhard): diff --git a/salt/modules/drac.py b/salt/modules/drac.py index fa481f91cf..51d44cab75 100644 --- a/salt/modules/drac.py +++ b/salt/modules/drac.py @@ -8,17 +8,17 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin log = logging.getLogger(__name__) def __virtual__(): - if salt.utils.which('racadm'): + if salt.utils.path.which('racadm'): return True return (False, 'The drac execution module cannot be loaded: racadm binary not in path.') diff --git a/salt/modules/dracr.py b/salt/modules/dracr.py index 0324382513..d42ae4b83d 100644 --- a/salt/modules/dracr.py +++ b/salt/modules/dracr.py @@ -13,10 +13,10 @@ import re # Import Salt libs from salt.exceptions import CommandExecutionError -import salt.utils +import salt.utils.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext.six.moves import map @@ -34,7 +34,7 @@ except (NameError, KeyError): def __virtual__(): - if salt.utils.which('racadm'): + if salt.utils.path.which('racadm'): return True return (False, 'The drac execution module cannot be loaded: racadm binary not in path.') diff --git a/salt/modules/dummyproxy_package.py b/salt/modules/dummyproxy_package.py index 64f5dacda1..66f0c43637 100644 --- a/salt/modules/dummyproxy_package.py +++ b/salt/modules/dummyproxy_package.py @@ -7,6 +7,7 @@ from __future__ import absolute_import # Import python libs import logging import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -20,12 +21,21 @@ def __virtual__(): Only work on systems that are a proxy minion ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'dummy': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'dummy': return __virtualname__ except KeyError: - return (False, 'The dummyproxy_package execution module failed to load. Check the proxy key in pillar or /etc/salt/proxy.') + return ( + False, + 'The dummyproxy_package execution module failed to load. Check ' + 'the proxy key in pillar or /etc/salt/proxy.' + ) - return (False, 'The dummyproxy_package execution module failed to load: only works on a dummy proxy minion.') + return ( + False, + 'The dummyproxy_package execution module failed to load: only works ' + 'on a dummy proxy minion.' + ) def list_pkgs(versions_as_list=False, **kwargs): diff --git a/salt/modules/dummyproxy_service.py b/salt/modules/dummyproxy_service.py index c085768b2c..1be19ff582 100644 --- a/salt/modules/dummyproxy_service.py +++ b/salt/modules/dummyproxy_service.py @@ -5,10 +5,11 @@ Provide the service module for the dummy proxy used in integration tests ''' # Import python libs from __future__ import absolute_import -import salt.utils - import logging +# Import Salt libs +import salt.utils.platform + log = logging.getLogger(__name__) __func_alias__ = { @@ -25,12 +26,21 @@ def __virtual__(): Only work on systems that are a proxy minion ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'dummy': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'dummy': return __virtualname__ except KeyError: - return (False, 'The dummyproxy_service execution module failed to load. Check the proxy key in pillar or /etc/salt/proxy.') + return ( + False, + 'The dummyproxy_service execution module failed to load. Check ' + 'the proxy key in pillar or /etc/salt/proxy.' + ) - return (False, 'The dummyproxy_service execution module failed to load: only works on the integration testsuite dummy proxy minion.') + return ( + False, + 'The dummyproxy_service execution module failed to load: only works ' + 'on the integration testsuite dummy proxy minion.' + ) def get_all(): diff --git a/salt/modules/ebuild.py b/salt/modules/ebuild.py index 2f1e4e28f8..ea41656eaf 100644 --- a/salt/modules/ebuild.py +++ b/salt/modules/ebuild.py @@ -22,10 +22,13 @@ import re # Import salt libs import salt.utils +import salt.utils.args +import salt.utils.path import salt.utils.pkg import salt.utils.systemd +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError -import salt.ext.six as six +from salt.ext import six # Import third party libs HAS_PORTAGE = False @@ -234,7 +237,7 @@ def latest_version(*names, **kwargs): ret[name] = '' installed = _cpv_to_version(_vartree().dep_bestmatch(name)) avail = _cpv_to_version(_porttree().dep_bestmatch(name)) - if avail and (not installed or salt.utils.compare_versions(ver1=installed, oper='<', ver2=avail, cmp_func=version_cmp)): + if avail and (not installed or salt.utils.versions.compare(ver1=installed, oper='<', ver2=avail, cmp_func=version_cmp)): ret[name] = avail # Return a string if only one package name passed @@ -422,7 +425,7 @@ def refresh_db(): # GPG sign verify is supported only for "webrsync" cmd = 'emerge-webrsync -q' # We prefer 'delta-webrsync' to 'webrsync' - if salt.utils.which('emerge-delta-webrsync'): + if salt.utils.path.which('emerge-delta-webrsync'): cmd = 'emerge-delta-webrsync -q' return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 else: @@ -432,7 +435,7 @@ def refresh_db(): # We fall back to "webrsync" if "rsync" fails for some reason cmd = 'emerge-webrsync -q' # We prefer 'delta-webrsync' to 'webrsync' - if salt.utils.which('emerge-delta-webrsync'): + if salt.utils.path.which('emerge-delta-webrsync'): cmd = 'emerge-delta-webrsync -q' return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 @@ -1109,10 +1112,10 @@ def version_cmp(pkg1, pkg2, **kwargs): # definition (and thus have it show up in the docs), we just pop it out of # the kwargs dict and then raise an exception if any kwargs other than # ignore_epoch were passed. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) kwargs.pop('ignore_epoch', None) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) regex = r'^~?([^:\[]+):?[^\[]*\[?.*$' ver1 = re.match(regex, pkg1) diff --git a/salt/modules/eix.py b/salt/modules/eix.py index 728d237ba3..d9748792db 100644 --- a/salt/modules/eix.py +++ b/salt/modules/eix.py @@ -5,14 +5,14 @@ Support for Eix from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.path def __virtual__(): ''' Only work on Gentoo systems with eix installed ''' - if __grains__['os'] == 'Gentoo' and salt.utils.which('eix'): + if __grains__['os'] == 'Gentoo' and salt.utils.path.which('eix'): return 'eix' return (False, 'The eix execution module cannot be loaded: either the system is not Gentoo or the eix binary is not in the path.') @@ -30,7 +30,7 @@ def sync(): cmd = 'eix-sync -q -C "--ask" -C "n"' if 'makeconf.features_contains'in __salt__ and __salt__['makeconf.features_contains']('webrsync-gpg'): # GPG sign verify is supported only for "webrsync" - if salt.utils.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' + if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' @@ -39,7 +39,7 @@ def sync(): if __salt__['cmd.retcode'](cmd) == 0: return True # We fall back to "webrsync" if "rsync" fails for some reason - if salt.utils.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' + if salt.utils.path.which('emerge-delta-webrsync'): # We prefer 'delta-webrsync' to 'webrsync' cmd += ' -W' else: cmd += ' -w' diff --git a/salt/modules/elasticsearch.py b/salt/modules/elasticsearch.py index df9eeb9a19..9be0ce20f5 100644 --- a/salt/modules/elasticsearch.py +++ b/salt/modules/elasticsearch.py @@ -50,7 +50,7 @@ Module to provide Elasticsearch compatibility to Salt ''' from __future__ import absolute_import -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, SaltInvocationError # Import Python libs import logging @@ -83,10 +83,11 @@ def _get_instance(hosts=None, profile=None): Return the elasticsearch instance ''' es = None - proxies = {} + proxies = None use_ssl = False - ca_certs = False - verify_certs = False + ca_certs = None + verify_certs = True + http_auth = None if profile is None: profile = 'elasticsearch' @@ -96,37 +97,25 @@ def _get_instance(hosts=None, profile=None): elif isinstance(profile, dict): _profile = profile if _profile: - hosts = _profile.get('host', None) + hosts = _profile.get('host', hosts) if not hosts: - hosts = _profile.get('hosts', None) - proxies = _profile.get('proxies', {}) + hosts = _profile.get('hosts', hosts) + proxies = _profile.get('proxies', None) use_ssl = _profile.get('use_ssl', False) - ca_certs = _profile.get('ca_certs', False) - verify_certs = _profile.get('verify_certs', False) + ca_certs = _profile.get('ca_certs', None) + verify_certs = _profile.get('verify_certs', True) username = _profile.get('username', None) password = _profile.get('password', None) + if username and password: + http_auth = (username, password) + if not hosts: hosts = ['127.0.0.1:9200'] if isinstance(hosts, string_types): hosts = [hosts] try: - if proxies == {}: - es = elasticsearch.Elasticsearch( - hosts, - use_ssl=use_ssl, - ca_certs=ca_certs, - verify_certs=verify_certs, - ) - elif username and password: - es = elasticsearch.Elasticsearch( - hosts, - use_ssl=use_ssl, - ca_certs=ca_certs, - verify_certs=verify_certs, - http_auth=(username, password) - ) - else: + if proxies: # Custom connection class to use requests module with proxies class ProxyConnection(RequestsHttpConnection): def __init__(self, *args, **kwargs): @@ -137,14 +126,23 @@ def _get_instance(hosts=None, profile=None): es = elasticsearch.Elasticsearch( hosts, connection_class=ProxyConnection, - proxies=_profile.get('proxies', {}), + proxies=proxies, use_ssl=use_ssl, ca_certs=ca_certs, verify_certs=verify_certs, + http_auth=http_auth, ) + else: + es = elasticsearch.Elasticsearch( + hosts, + use_ssl=use_ssl, + ca_certs=ca_certs, + verify_certs=verify_certs, + http_auth=http_auth, + ) - if not es.ping(): - raise CommandExecutionError('Could not connect to Elasticsearch host/ cluster {0}, is it unhealthy?'.format(hosts)) + # Try the connection + es.info() except elasticsearch.exceptions.TransportError as e: raise CommandExecutionError('Could not connect to Elasticsearch host/ cluster {0} due to {1}'.format(hosts, str(e))) return es @@ -261,7 +259,7 @@ def cluster_stats(nodes=None, hosts=None, profile=None): raise CommandExecutionError("Cannot retrieve cluster stats, server returned code {0} with message {1}".format(e.status_code, e.error)) -def alias_create(indices, alias, hosts=None, body=None, profile=None): +def alias_create(indices, alias, hosts=None, body=None, profile=None, source=None): ''' Create an alias for a specific index/indices @@ -271,13 +269,21 @@ def alias_create(indices, alias, hosts=None, body=None, profile=None): Alias name body Optional definition such as routing or filter as defined in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html + source + URL of file specifying optional definition such as routing or filter. Cannot be used in combination with ``body``. CLI example:: salt myminion elasticsearch.alias_create testindex_v1 testindex ''' es = _get_instance(hosts, profile) - + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: result = es.indices.put_alias(index=indices, name=alias, body=body) return result.get('acknowledged', False) @@ -285,7 +291,7 @@ def alias_create(indices, alias, hosts=None, body=None, profile=None): raise CommandExecutionError("Cannot create alias {0} in index {1}, server returned code {2} with message {3}".format(alias, indices, e.status_code, e.error)) -def alias_delete(indices, aliases, hosts=None, body=None, profile=None): +def alias_delete(indices, aliases, hosts=None, body=None, profile=None, source=None): ''' Delete an alias of an index @@ -299,7 +305,13 @@ def alias_delete(indices, aliases, hosts=None, body=None, profile=None): salt myminion elasticsearch.alias_delete testindex_v1 testindex ''' es = _get_instance(hosts, profile) - + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: result = es.indices.delete_alias(index=indices, name=aliases) @@ -355,7 +367,7 @@ def alias_get(indices=None, aliases=None, hosts=None, profile=None): raise CommandExecutionError("Cannot get alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error)) -def document_create(index, doc_type, body=None, id=None, hosts=None, profile=None): +def document_create(index, doc_type, body=None, id=None, hosts=None, profile=None, source=None): ''' Create a document in a specified index @@ -365,6 +377,8 @@ def document_create(index, doc_type, body=None, id=None, hosts=None, profile=Non Type of the document body Document to store + source + URL of file specifying document to store. Cannot be used in combination with ``body``. id Optional unique document identifier for specified doc_type (empty for random) @@ -373,7 +387,13 @@ def document_create(index, doc_type, body=None, id=None, hosts=None, profile=Non salt myminion elasticsearch.document_create testindex doctype1 '{}' ''' es = _get_instance(hosts, profile) - + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: return es.index(index=index, doc_type=doc_type, body=body, id=id) except elasticsearch.TransportError as e: @@ -455,7 +475,7 @@ def document_get(index, id, doc_type='_all', hosts=None, profile=None): raise CommandExecutionError("Cannot retrieve document {0} from index {1}, server returned code {2} with message {3}".format(id, index, e.status_code, e.error)) -def index_create(index, body=None, hosts=None, profile=None): +def index_create(index, body=None, hosts=None, profile=None, source=None): ''' Create an index @@ -463,6 +483,8 @@ def index_create(index, body=None, hosts=None, profile=None): Index name body Index definition, such as settings and mappings as defined in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html + source + URL to file specifying index definition. Cannot be used in combination with ``body``. CLI example:: @@ -470,7 +492,13 @@ def index_create(index, body=None, hosts=None, profile=None): salt myminion elasticsearch.index_create testindex2 '{"settings" : {"index" : {"number_of_shards" : 3, "number_of_replicas" : 2}}}' ''' es = _get_instance(hosts, profile) - + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: result = es.indices.create(index=index, body=body) return result.get('acknowledged', False) and result.get("shards_acknowledged", True) @@ -604,7 +632,7 @@ def index_close(index, allow_no_indices=True, expand_wildcards='open', ignore_un raise CommandExecutionError("Cannot close index {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) -def mapping_create(index, doc_type, body, hosts=None, profile=None): +def mapping_create(index, doc_type, body=None, hosts=None, profile=None, source=None): ''' Create a mapping in a given index @@ -614,12 +642,21 @@ def mapping_create(index, doc_type, body, hosts=None, profile=None): Name of the document type body Mapping definition as specified in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html + source + URL to file specifying mapping definition. Cannot be used in combination with ``body``. CLI example:: salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }' ''' es = _get_instance(hosts, profile) + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) @@ -677,23 +714,33 @@ def mapping_get(index, doc_type, hosts=None, profile=None): raise CommandExecutionError("Cannot retrieve mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) -def index_template_create(name, body, hosts=None, profile=None): +def index_template_create(name, body=None, hosts=None, profile=None, source=None): ''' Create an index template name Index template name + body Template definition as specified in http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html + source + URL to file specifying template definition. Cannot be used in combination with ``body``. + CLI example:: salt myminion elasticsearch.index_template_create testindex_templ '{ "template": "logstash-*", "order": 1, "settings": { "number_of_shards": 1 } }' ''' es = _get_instance(hosts, profile) + if source and body: + message = 'Either body or source should be specified but not both.' + raise SaltInvocationError(message) + if source: + body = __salt__['cp.get_file_str']( + source, + saltenv=__opts__.get('saltenv', 'base')) try: result = es.indices.put_template(name=name, body=body) - return result.get('acknowledged', False) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot create template {0}, server returned code {1} with message {2}".format(name, e.status_code, e.error)) diff --git a/salt/modules/environ.py b/salt/modules/environ.py index 112c176c3c..26972f4ca9 100644 --- a/salt/modules/environ.py +++ b/salt/modules/environ.py @@ -3,17 +3,16 @@ Support for getting and setting the environment variables of the current salt process. ''' -from __future__ import absolute_import - -# Import salt libs -import salt.utils as utils - # Import python libs +from __future__ import absolute_import import os import logging +# Import Salt libs +import salt.utils.platform + # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -61,7 +60,7 @@ def setval(key, val, false_unsets=False, permanent=False): salt '*' environ.setval baz bar permanent=True salt '*' environ.setval baz bar permanent=HKLM ''' - is_windows = utils.is_windows() + is_windows = salt.utils.platform.is_windows() if is_windows: permanent_hive = 'HKCU' permanent_key = 'Environment' diff --git a/salt/modules/eselect.py b/salt/modules/eselect.py index 09b04fd735..6766462227 100644 --- a/salt/modules/eselect.py +++ b/salt/modules/eselect.py @@ -7,7 +7,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -16,7 +16,7 @@ def __virtual__(): ''' Only work on Gentoo systems with eselect installed ''' - if __grains__['os'] == 'Gentoo' and salt.utils.which('eselect'): + if __grains__['os'] == 'Gentoo' and salt.utils.path.which('eselect'): return 'eselect' return (False, 'The eselect execution module cannot be loaded: either the system is not Gentoo or the eselect binary is not in the path.') diff --git a/salt/modules/esxdatacenter.py b/salt/modules/esxdatacenter.py new file mode 100644 index 0000000000..f36ff27928 --- /dev/null +++ b/salt/modules/esxdatacenter.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +''' +Module used to access the esxdatacenter proxy connection methods +''' +from __future__ import absolute_import + +# Import python libs +import logging +import salt.utils.platform + + +log = logging.getLogger(__name__) + +__proxyenabled__ = ['esxdatacenter'] +# Define the module's virtual name +__virtualname__ = 'esxdatacenter' + + +def __virtual__(): + ''' + Only work on proxy + ''' + if salt.utils.platform.is_proxy(): + return __virtualname__ + return (False, 'Must be run on a proxy minion') + + +def get_details(): + return __proxy__['esxdatacenter.get_details']() diff --git a/salt/modules/esxi.py b/salt/modules/esxi.py index 144a84f242..a4c1f8ddcc 100644 --- a/salt/modules/esxi.py +++ b/salt/modules/esxi.py @@ -30,7 +30,9 @@ from __future__ import absolute_import # Import python libs import logging -import salt.utils + +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) @@ -43,7 +45,7 @@ def __virtual__(): ''' Only work on proxy ''' - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return __virtualname__ return (False, 'The esxi execution module failed to load: ' 'only available on proxy minions.') diff --git a/salt/modules/event.py b/salt/modules/event.py index 08b551fc3a..4c5259ec23 100644 --- a/salt/modules/event.py +++ b/salt/modules/event.py @@ -16,7 +16,7 @@ import salt.crypt import salt.utils.event import salt.payload import salt.transport -import salt.ext.six as six +from salt.ext import six __proxyenabled__ = ['*'] log = logging.getLogger(__name__) diff --git a/salt/modules/extfs.py b/salt/modules/extfs.py index eabcf5f116..87ca37e53c 100644 --- a/salt/modules/extfs.py +++ b/salt/modules/extfs.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -17,8 +17,12 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): - return (False, 'The extfs execution module cannot be loaded: only available on non-Windows systems.') + if salt.utils.platform.is_windows(): + return ( + False, + 'The extfs execution module cannot be loaded: only available on ' + 'non-Windows systems.' + ) return True diff --git a/salt/modules/file.py b/salt/modules/file.py index 6e0d68c178..c43a371afe 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -34,7 +34,7 @@ from collections import Iterable, Mapping from functools import reduce # pylint: disable=redefined-builtin # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range, zip from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -47,11 +47,16 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.args import salt.utils.atomicfile -import salt.utils.find import salt.utils.filebuffer import salt.utils.files +import salt.utils.find +import salt.utils.itertools import salt.utils.locales +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils import salt.utils.templates import salt.utils.url from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message @@ -78,8 +83,12 @@ def __virtual__(): Only work on POSIX-like systems ''' # win_file takes care of windows - if salt.utils.is_windows(): - return (False, 'The file execution module cannot be loaded: only available on non-Windows systems - use win_file instead.') + if salt.utils.platform.is_windows(): + return ( + False, + 'The file execution module cannot be loaded: only available on ' + 'non-Windows systems - use win_file instead.' + ) return True @@ -121,12 +130,12 @@ def _binary_replace(old, new): new_isbin = not salt.utils.istextfile(new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): - return 'Replace binary file' + return u'Replace binary file' elif old_isbin: - return 'Replace binary file with text file' + return u'Replace binary file with text file' elif new_isbin: - return 'Replace text file with binary file' - return '' + return u'Replace text file with binary file' + return u'' def _get_bkroot(): @@ -549,17 +558,13 @@ def lsattr(path): if not os.path.exists(path): raise SaltInvocationError("File or directory does not exist.") - # Put quotes around path to ensure we can handle spaces in filename. - if not salt.utils.is_quoted(path): - path = '"{0}"'.format(path) - - cmd = 'lsattr {0}'.format(path) + cmd = ['lsattr', path] result = __salt__['cmd.run'](cmd, python_shell=False) results = {} for line in result.splitlines(): if not line.startswith('lsattr'): - vals = line.split() + vals = line.split(None, 1) results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0]) return results @@ -598,7 +603,7 @@ def chattr(*args, **kwargs): salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai salt '*' file.chattr foo3.txt operator=remove attributes=i version=2 ''' - args = [arg if salt.utils.is_quoted(arg) else '"{0}"'.format(arg) + args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg) for arg in args] operator = kwargs.pop('operator', None) @@ -844,18 +849,21 @@ def check_hash(path, file_hash): hash The hash to check against the file specified in the ``path`` argument. - For versions 2016.11.4 and newer, the hash can be specified without an + + .. versionchanged:: 2016.11.4 + + For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type - in the format ``:`` (e.g. - ``md5:e138491e9d5b97023cea823fe17bac22``). + in the format ``=`` (e.g. + ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 - salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22 + salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) @@ -1099,7 +1107,7 @@ def sed(path, cmd = ['sed'] cmd.append('-i{0}'.format(backup) if backup else '-i') - cmd.extend(salt.utils.shlex_split(options)) + cmd.extend(salt.utils.args.shlex_split(options)) cmd.append( r'{limit}{negate_match}s/{before}/{after}/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', @@ -1146,7 +1154,7 @@ def sed_contains(path, options = options.replace('-r', '-E') cmd = ['sed'] - cmd.extend(salt.utils.shlex_split(options)) + cmd.extend(salt.utils.args.shlex_split(options)) cmd.append( r'{limit}s/{before}/$/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', @@ -1468,7 +1476,7 @@ def comment_line(path, if not found: return False - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) @@ -1533,7 +1541,7 @@ def comment_line(path, else: os.remove(temp_file) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) # Return a diff using the two dictionaries @@ -1859,7 +1867,7 @@ def line(path, content=None, match=None, mode=None, location=None, with salt.utils.files.fopen(path, mode='r') as fp_: body = fp_.read() - body_before = hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() + body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest() after = _regex_to_static(body, after) before = _regex_to_static(body, before) match = _regex_to_static(body, match) @@ -1987,7 +1995,7 @@ def line(path, content=None, match=None, mode=None, location=None, "Unable to ensure line without knowing " "where to put it before and/or after.") - changed = body_before != hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() + changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest() if backup and changed and __opts__['test'] is False: try: @@ -2032,6 +2040,7 @@ def replace(path, show_changes=True, ignore_if_missing=False, preserve_inode=True, + backslash_literal=False, ): ''' .. versionadded:: 0.17.0 @@ -2132,6 +2141,14 @@ def replace(path, filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). + backslash_literal : False + .. versionadded:: 2016.11.7 + + Interpret backslashes as literal backslashes for the repl and not + escape characters. This will help when using append/prepend so that + the backslashes are not interpreted for the repl on the second run of + the state. + If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the @@ -2180,7 +2197,7 @@ def replace(path, ) flags_num = _get_flags(flags) - cpattern = re.compile(salt.utils.to_bytes(pattern), flags_num) + cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == 'file': bufsize = filesize @@ -2189,23 +2206,23 @@ def replace(path, has_changes = False orig_file = [] # used for show_changes and change detection new_file = [] # used for show_changes and change detection - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) # Avoid TypeErrors by forcing repl to be bytearray related to mmap # Replacement text may contains integer: 123 for example - repl = salt.utils.to_bytes(str(repl)) + repl = salt.utils.stringutils.to_bytes(str(repl)) if not_found_content: - not_found_content = salt.utils.to_bytes(not_found_content) + not_found_content = salt.utils.stringutils.to_bytes(not_found_content) found = False temp_file = None - content = salt.utils.to_str(not_found_content) if not_found_content and \ + content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \ (prepend_if_not_found or append_if_not_found) \ - else salt.utils.to_str(repl) + else salt.utils.stringutils.to_str(repl) try: # First check the whole file, determine whether to make the replacement @@ -2222,13 +2239,16 @@ def replace(path, access=mmap.ACCESS_READ) except (ValueError, mmap.error): # size of file in /proc is 0, but contains data - r_data = salt.utils.to_bytes("".join(r_file)) + r_data = salt.utils.stringutils.to_bytes("".join(r_file)) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: - result, nrepl = re.subn(cpattern, repl, r_data, count) + result, nrepl = re.subn(cpattern, + repl.replace('\\', '\\\\') if backslash_literal else repl, + r_data, + count) # found anything? (even if no change) if nrepl > 0: @@ -2239,7 +2259,7 @@ def replace(path, if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. - if re.search(salt.utils.to_bytes('^{0}$'.format(re.escape(content))), + if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))), r_data, flags=flags_num): # Content was found, so set found. @@ -2282,10 +2302,12 @@ def replace(path, r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) - result, nrepl = re.subn(cpattern, repl, - r_data, count) + result, nrepl = re.subn(cpattern, + repl.replace('\\', '\\\\') if backslash_literal else repl, + r_data, + count) try: - w_file.write(salt.utils.to_str(result)) + w_file.write(salt.utils.stringutils.to_str(result)) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " @@ -2325,7 +2347,7 @@ def replace(path, try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: - fh_.write(salt.utils.to_str(line)) + fh_.write(salt.utils.stringutils.to_str(line)) finally: fh_.close() @@ -2367,12 +2389,12 @@ def replace(path, "Exception: {1}".format(temp_file, exc) ) - if not dry_run and not salt.utils.is_windows(): + if not dry_run and not salt.utils.platform.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) def get_changes(): - orig_file_as_str = [salt.utils.to_str(x) for x in orig_file] - new_file_as_str = [salt.utils.to_str(x) for x in new_file] + orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file] + new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file] return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str)) if show_changes: @@ -2689,7 +2711,7 @@ def patch(originalfile, patchfile, options='', dry_run=False): salt '*' file.patch /opt/file.txt /tmp/file.txt.patch ''' - patchpath = salt.utils.which('patch') + patchpath = salt.utils.path.which('patch') if not patchpath: raise CommandExecutionError( 'patch executable not found. Is the distribution\'s patch ' @@ -2697,7 +2719,7 @@ def patch(originalfile, patchfile, options='', dry_run=False): ) cmd = [patchpath] - cmd.extend(salt.utils.shlex_split(options)) + cmd.extend(salt.utils.args.shlex_split(options)) if dry_run: if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'): cmd.append('-C') @@ -2877,7 +2899,7 @@ def append(path, *args, **kwargs): # Make sure we have a newline at the end of the file. Do this in binary # mode so SEEK_END with nonzero offset will work. with salt.utils.files.fopen(path, 'rb+') as ofile: - linesep = salt.utils.to_bytes(os.linesep) + linesep = salt.utils.stringutils.to_bytes(os.linesep) try: ofile.seek(-len(linesep), os.SEEK_END) except IOError as exc: @@ -3272,7 +3294,7 @@ def copy(src, dst, recurse=False, remove_existing=False): if not os.path.exists(src): raise CommandExecutionError('No such file or directory \'{0}\''.format(src)) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): pre_user = get_user(src) pre_group = get_group(src) pre_mode = salt.utils.normalize_mode(get_mode(src)) @@ -3295,7 +3317,7 @@ def copy(src, dst, recurse=False, remove_existing=False): 'Could not copy \'{0}\' to \'{1}\''.format(src, dst) ) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): check_perms(dst, None, pre_user, pre_group, pre_mode) return True @@ -4309,7 +4331,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) perms['lmode'] = salt.utils.normalize_mode(cur['mode']) is_dir = os.path.isdir(name) - if not salt.utils.is_windows and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir: # List attributes on file perms['lattrs'] = ''.join(lsattr(name)[name]) # Remove attributes on file so changes can be enforced. @@ -4341,20 +4363,20 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if user: if isinstance(user, int): user = uid_to_user(user) - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and user_to_uid(user) != user_to_uid(perms['luser']) ) or ( - not salt.utils.is_windows() and user != perms['luser'] + not salt.utils.platform.is_windows() and user != perms['luser'] ): perms['cuser'] = user if group: if isinstance(group, int): group = gid_to_group(group) - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and group_to_gid(group) != group_to_gid(perms['lgroup']) ) or ( - not salt.utils.is_windows() and group != perms['lgroup'] + not salt.utils.platform.is_windows() and group != perms['lgroup'] ): perms['cgroup'] = group @@ -4376,12 +4398,12 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if user: if isinstance(user, int): user = uid_to_user(user) - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and user_to_uid(user) != user_to_uid( get_user(name, follow_symlinks=follow_symlinks)) and user != '' ) or ( - not salt.utils.is_windows() and + not salt.utils.platform.is_windows() and user != get_user(name, follow_symlinks=follow_symlinks) and user != '' ): @@ -4396,11 +4418,11 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if group: if isinstance(group, int): group = gid_to_group(group) - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and group_to_gid(group) != group_to_gid( get_group(name, follow_symlinks=follow_symlinks)) and user != '') or ( - not salt.utils.is_windows() and + not salt.utils.platform.is_windows() and group != get_group(name, follow_symlinks=follow_symlinks) and user != '' ): @@ -4420,7 +4442,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) if __opts__['test'] is True and ret['changes']: ret['result'] = None - if not salt.utils.is_windows and not is_dir: + if not salt.utils.platform.is_windows() and not is_dir: # Replace attributes on file if it had been removed if perms['lattrs']: chattr(name, operator='add', attributes=perms['lattrs']) @@ -4663,20 +4685,11 @@ def check_file_meta( if not sfn and source: sfn = __salt__['cp.cache_file'](source, saltenv) if sfn: - if __salt__['config.option']('obfuscate_templates'): - changes['diff'] = '' - else: - # Check to see if the files are bins - bdiff = _binary_replace(name, sfn) - if bdiff: - changes['diff'] = bdiff - else: - with salt.utils.files.fopen(sfn, 'r') as src: - slines = src.readlines() - with salt.utils.files.fopen(name, 'r') as name_: - nlines = name_.readlines() - changes['diff'] = \ - ''.join(difflib.unified_diff(nlines, slines)) + try: + changes['diff'] = get_diff( + sfn, name, template=True, show_filenames=False) + except CommandExecutionError as exc: + changes['diff'] = exc.strerror else: changes['sum'] = 'Checksum differs' @@ -4684,28 +4697,25 @@ def check_file_meta( # Write a tempfile with the static contents tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.files.fopen(tmp, 'w') as tmp_: - tmp_.write(str(contents)) + tmp_.write(salt.utils.stringutils.to_str(contents)) # Compare the static contents with the named file - with salt.utils.files.fopen(tmp, 'r') as src: - slines = src.readlines() - with salt.utils.files.fopen(name, 'r') as name_: - nlines = name_.readlines() + try: + differences = get_diff(name, tmp, show_filenames=False) + except CommandExecutionError as exc: + log.error('Failed to diff files: {0}'.format(exc)) + differences = exc.strerror __clean_tmp(tmp) - if ''.join(nlines) != ''.join(slines): + if differences: if __salt__['config.option']('obfuscate_templates'): changes['diff'] = '' else: - if salt.utils.istextfile(name): - changes['diff'] = \ - ''.join(difflib.unified_diff(nlines, slines)) - else: - changes['diff'] = 'Replace binary file with text file' + changes['diff'] = differences - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): # Check owner if (user is not None and user != lstats['user'] @@ -4735,44 +4745,113 @@ def check_file_meta( return changes -def get_diff( - minionfile, - masterfile, - saltenv='base'): +def get_diff(file1, + file2, + saltenv='base', + show_filenames=True, + show_changes=True, + template=False): ''' - Return unified diff of file compared to file on master + Return unified diff of two files - CLI Example: + file1 + The first file to feed into the diff utility + + .. versionchanged:: Oxygen + Can now be either a local or remote file. In earlier releases, + thuis had to be a file local to the minion. + + file2 + The second file to feed into the diff utility + + .. versionchanged:: Oxygen + Can now be either a local or remote file. In earlier releases, this + had to be a file on the salt fileserver (i.e. + ``salt://somefile.txt``) + + show_filenames : True + Set to ``False`` to hide the filenames in the top two lines of the + diff. + + show_changes : True + If set to ``False``, and there are differences, then instead of a diff + a simple message stating that show_changes is set to ``False`` will be + returned. + + template : False + Set to ``True`` if two templates are being compared. This is not useful + except for within states, with the ``obfuscate_templates`` option set + to ``True``. + + .. versionadded:: Oxygen + + CLI Examples: .. code-block:: bash salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc + salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt ''' - minionfile = os.path.expanduser(minionfile) + files = (file1, file2) + paths = [] + errors = [] - ret = '' + for filename in files: + try: + # Local file paths will just return the same path back when passed + # to cp.cache_file. + cached_path = __salt__['cp.cache_file'](filename, saltenv) + if cached_path is False: + errors.append( + u'File {0} not found'.format( + salt.utils.stringutils.to_unicode(filename) + ) + ) + continue + paths.append(cached_path) + except MinionError as exc: + errors.append(salt.utils.stringutils.to_unicode(exc.__str__())) + continue - if not os.path.exists(minionfile): - ret = 'File {0} does not exist on the minion'.format(minionfile) + if errors: + raise CommandExecutionError( + 'Failed to cache one or more files', + info=errors + ) + + args = [] + for idx, filename in enumerate(files): + try: + with salt.utils.files.fopen(filename, 'r') as fp_: + args.append(fp_.readlines()) + except (IOError, OSError) as exc: + raise CommandExecutionError( + 'Failed to read {0}: {1}'.format( + salt.utils.stringutils.to_str(filename), + exc.strerror + ) + ) + + if args[0] != args[1]: + if template and __salt__['config.option']('obfuscate_templates'): + ret = u'' + elif not show_changes: + ret = u'' + else: + bdiff = _binary_replace(*files) + if bdiff: + ret = bdiff + else: + if show_filenames: + args.extend( + [salt.utils.stringutils.to_str(x) for x in files] + ) + ret = salt.utils.locales.sdecode( + ''.join(difflib.unified_diff(*args)) # pylint: disable=no-value-for-parameter + ) return ret - sfn = __salt__['cp.cache_file'](masterfile, saltenv) - if sfn: - with salt.utils.files.fopen(sfn, 'r') as src: - slines = src.readlines() - with salt.utils.files.fopen(minionfile, 'r') as name_: - nlines = name_.readlines() - if ''.join(nlines) != ''.join(slines): - bdiff = _binary_replace(minionfile, sfn) - if bdiff: - ret += bdiff - else: - ret += ''.join(difflib.unified_diff(nlines, slines, - minionfile, masterfile)) - else: - ret = 'Failed to copy file from master' - - return ret + return u'' def manage_file(name, @@ -4978,19 +5057,11 @@ def manage_file(name, elif not show_changes: ret['changes']['diff'] = '' else: - # Check to see if the files are bins - bdiff = _binary_replace(real_name, sfn) - if bdiff: - ret['changes']['diff'] = bdiff - else: - with salt.utils.files.fopen(sfn, 'r') as src: - slines = src.readlines() - with salt.utils.files.fopen(real_name, 'r') as name_: - nlines = name_.readlines() - - sndiff = ''.join(difflib.unified_diff(nlines, slines)) - if sndiff: - ret['changes']['diff'] = sndiff + try: + ret['changes']['diff'] = get_diff( + real_name, sfn, show_filenames=False) + except CommandExecutionError as exc: + ret['changes']['diff'] = exc.strerror # Pre requisites are met, and the file needs to be replaced, do it try: @@ -5007,7 +5078,7 @@ def manage_file(name, # Write the static contents to a temporary file tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.files.fopen(tmp, 'w') as tmp_: @@ -5015,27 +5086,21 @@ def manage_file(name, log.debug('File will be encoded with {0}'.format(encoding)) tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors)) else: - tmp_.write(str(contents)) + tmp_.write(salt.utils.stringutils.to_str(contents)) - # Compare contents of files to know if we need to replace - with salt.utils.files.fopen(tmp, 'r') as src: - slines = src.readlines() - with salt.utils.files.fopen(real_name, 'r') as name_: - nlines = name_.readlines() - different = ''.join(slines) != ''.join(nlines) + try: + differences = get_diff( + real_name, tmp, show_filenames=False, + show_changes=show_changes, template=True) - if different: - if __salt__['config.option']('obfuscate_templates'): - ret['changes']['diff'] = '' - elif not show_changes: - ret['changes']['diff'] = '' - else: - if salt.utils.istextfile(real_name): - ret['changes']['diff'] = \ - ''.join(difflib.unified_diff(nlines, slines)) - else: - ret['changes']['diff'] = \ - 'Replace binary file with text file' + except CommandExecutionError as exc: + ret.setdefault('warnings', []).append( + 'Failed to detect changes to file: {0}'.format(exc.strerror) + ) + differences = '' + + if differences: + ret['changes']['diff'] = differences # Pre requisites are met, the file needs to be replaced, do it try: @@ -5086,7 +5151,7 @@ def manage_file(name, ret['changes']['diff'] = \ 'Replace symbolic link with regular file' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = check_perms(name, ret, kwargs.get('win_owner'), @@ -5098,7 +5163,9 @@ def manage_file(name, ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks) if ret['changes']: - ret['comment'] = 'File {0} updated'.format(name) + ret['comment'] = u'File {0} updated'.format( + salt.utils.locales.sdecode(name) + ) elif not ret['changes'] and ret['result']: ret['comment'] = u'File {0} is in the correct state'.format( @@ -5112,7 +5179,7 @@ def manage_file(name, def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): # check for existence of windows drive letter - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) @@ -5129,7 +5196,7 @@ def manage_file(name, mode_list[idx] = str(int(mode_list[idx]) | 1) dir_mode = ''.join(mode_list) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # This function resides in win_file.py and will be available # on Windows. The local function will be overridden # pylint: disable=E1121 @@ -5217,7 +5284,7 @@ def manage_file(name, # Write the static contents to a temporary file tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.files.fopen(tmp, 'w') as tmp_: @@ -5225,7 +5292,7 @@ def manage_file(name, log.debug('File will be encoded with {0}'.format(encoding)) tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors)) else: - tmp_.write(str(contents)) + tmp_.write(salt.utils.stringutils.to_str(contents)) # Copy into place salt.utils.files.copyfile(tmp, @@ -5243,14 +5310,14 @@ def manage_file(name, # This is a new file, if no mode specified, use the umask to figure # out what mode to use for the new file. - if mode is None and not salt.utils.is_windows(): + if mode is None and not salt.utils.platform.is_windows(): # Get current umask mask = os.umask(0) os.umask(mask) # Calculate the mode value that results from the umask mode = oct((0o777 ^ mask) & 0o666) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = check_perms(name, ret, kwargs.get('win_owner'), @@ -5710,7 +5777,7 @@ def list_backups(path, limit=None): bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # ':' is an illegal filesystem path character on Windows src_dir = parent_dir.replace(':', '_') else: @@ -5724,7 +5791,7 @@ def list_backups(path, limit=None): files = {} for fname in [x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))]: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # ':' is an illegal filesystem path character on Windows strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename) else: @@ -5735,7 +5802,7 @@ def list_backups(path, limit=None): # File didn't match the strp format string, so it's not a backup # for this file. Move on to the next one. continue - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): str_format = '%a %b %d %Y %H-%M-%S.%f' else: str_format = '%a %b %d %Y %H:%M:%S.%f' @@ -5865,7 +5932,7 @@ def restore_backup(path, backup_id): '{1}'.format(backup['Location'], path) # Try to set proper ownership - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): try: fstat = os.stat(path) except (OSError, IOError): @@ -5969,9 +6036,9 @@ def grep(path, split_opts = [] for opt in opts: try: - split = salt.utils.shlex_split(opt) + split = salt.utils.args.shlex_split(opt) except AttributeError: - split = salt.utils.shlex_split(str(opt)) + split = salt.utils.args.shlex_split(str(opt)) if len(split) > 1: raise SaltInvocationError( 'Passing multiple command line arguments in a single string ' diff --git a/salt/modules/firewalld.py b/salt/modules/firewalld.py index 4cb0831b90..8cf75b9f88 100644 --- a/salt/modules/firewalld.py +++ b/salt/modules/firewalld.py @@ -12,7 +12,7 @@ import re # Import Salt Libs from salt.exceptions import CommandExecutionError -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -21,7 +21,7 @@ def __virtual__(): ''' Check to see if firewall-cmd exists ''' - if salt.utils.which('firewall-cmd'): + if salt.utils.path.which('firewall-cmd'): return True return (False, 'The firewalld execution module cannot be loaded: the firewall-cmd binary is not in the path.') @@ -31,7 +31,7 @@ def __firewall_cmd(cmd): ''' Return the firewall-cmd location ''' - firewall_cmd = '{0} {1}'.format(salt.utils.which('firewall-cmd'), cmd) + firewall_cmd = '{0} {1}'.format(salt.utils.path.which('firewall-cmd'), cmd) out = __salt__['cmd.run_all'](firewall_cmd) if out['retcode'] != 0: diff --git a/salt/modules/freebsd_update.py b/salt/modules/freebsd_update.py index e59d43a288..9ffb2ac8ea 100644 --- a/salt/modules/freebsd_update.py +++ b/salt/modules/freebsd_update.py @@ -16,8 +16,8 @@ from __future__ import absolute_import import logging # Import salt libs -import salt -import salt.ext.six as six +import salt.utils.path +from salt.ext import six from salt.exceptions import CommandNotFoundError log = logging.getLogger(__name__) @@ -49,7 +49,7 @@ def _cmd(**kwargs): executed. It checks if any arguments are given to freebsd-update and appends them accordingly. ''' - update_cmd = salt.utils.which('freebsd-update') + update_cmd = salt.utils.path.which('freebsd-update') if not update_cmd: raise CommandNotFoundError('"freebsd-update" command not found') diff --git a/salt/modules/freebsdjail.py b/salt/modules/freebsdjail.py index 17e8be6db5..9b2e749680 100644 --- a/salt/modules/freebsdjail.py +++ b/salt/modules/freebsdjail.py @@ -10,8 +10,9 @@ import re import subprocess # Import salt libs -import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.stringutils # Define the module's virtual name __virtualname__ = 'jail' @@ -125,7 +126,7 @@ def show_config(jail): ret = {} if subprocess.call(["jls", "-nq", "-j", jail]) == 0: jls = subprocess.check_output(["jls", "-nq", "-j", jail]) # pylint: disable=minimum-python-version - jailopts = salt.utils.shlex_split(salt.utils.to_str(jls)) + jailopts = salt.utils.args.shlex_split(salt.utils.stringutils.to_str(jls)) for jailopt in jailopts: if '=' not in jailopt: ret[jailopt.strip().rstrip(";")] = '1' diff --git a/salt/modules/freebsdpkg.py b/salt/modules/freebsdpkg.py index 8d9241a77a..34d242864f 100644 --- a/salt/modules/freebsdpkg.py +++ b/salt/modules/freebsdpkg.py @@ -83,7 +83,7 @@ import re import salt.utils import salt.utils.pkg from salt.exceptions import CommandExecutionError, MinionError -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/freebsdports.py b/salt/modules/freebsdports.py index 1776a0dad1..1f2c0c3e32 100644 --- a/salt/modules/freebsdports.py +++ b/salt/modules/freebsdports.py @@ -27,7 +27,7 @@ import salt.utils import salt.utils.files from salt.ext.six import string_types from salt.exceptions import SaltInvocationError, CommandExecutionError -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/freebsdservice.py b/salt/modules/freebsdservice.py index 552cb424a2..f5d791b0d5 100644 --- a/salt/modules/freebsdservice.py +++ b/salt/modules/freebsdservice.py @@ -17,7 +17,7 @@ import fnmatch import re # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.decorators as decorators import salt.utils.files from salt.exceptions import CommandNotFoundError @@ -51,11 +51,11 @@ def _cmd(jail=None): Support for jail (representing jid or jail name) keyword argument in kwargs ''' - service = salt.utils.which('service') + service = salt.utils.path.which('service') if not service: raise CommandNotFoundError('\'service\' command not found') if jail: - jexec = salt.utils.which('jexec') + jexec = salt.utils.path.which('jexec') if not jexec: raise CommandNotFoundError('\'jexec\' command not found') service = '{0} {1} {2}'.format(jexec, jail, service) @@ -71,7 +71,7 @@ def _get_jail_path(jail): jail The jid or jail name ''' - jls = salt.utils.which('jls') + jls = salt.utils.path.which('jls') if not jls: raise CommandNotFoundError('\'jls\' command not found') jails = __salt__['cmd.run_stdout']('{0} -n jid name path'.format(jls)) diff --git a/salt/modules/gem.py b/salt/modules/gem.py index 32e1e4ba9c..ade9fa1881 100644 --- a/salt/modules/gem.py +++ b/salt/modules/gem.py @@ -8,13 +8,11 @@ from __future__ import absolute_import import re import logging -# Import salt libs +# Import Salt libs import salt.utils.itertools +import salt.utils.platform from salt.exceptions import CommandExecutionError -# Import salt libs -import salt.utils - __func_alias__ = { 'list_': 'list' } @@ -49,7 +47,7 @@ def _gem(command, ruby=None, runas=None, gem_bin=None): if __salt__['rvm.is_installed'](runas=runas): return __salt__['rvm.do'](ruby, cmdline, runas=runas) - if not salt.utils.is_windows() \ + if not salt.utils.platform.is_windows() \ and __salt__['rbenv.is_installed'](runas=runas): if ruby is None: return __salt__['rbenv.do'](cmdline, runas=runas) diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index 5680f00829..a1a45025c9 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -17,7 +17,7 @@ except ImportError: from pipes import quote as _cmd_quote # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.yast import salt.utils.preseed import salt.utils.kickstart @@ -549,7 +549,7 @@ def avail_platforms(): for platform in CMD_MAP: ret[platform] = True for cmd in CMD_MAP[platform]: - if not salt.utils.which(cmd): + if not salt.utils.path.which(cmd): ret[platform] = False return ret @@ -663,7 +663,7 @@ def ldd_deps(filename, ret=None): salt myminion genesis.ldd_deps /bin/bash ''' if not os.path.exists(filename): - filename = salt.utils.which(filename) + filename = salt.utils.path.which(filename) if ret is None: ret = [] diff --git a/salt/modules/git.py b/salt/modules/git.py index 6c54a374ce..d52922ea34 100644 --- a/salt/modules/git.py +++ b/salt/modules/git.py @@ -12,8 +12,11 @@ import re # Import salt libs import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.itertools +import salt.utils.path +import salt.utils.platform import salt.utils.url from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion @@ -30,7 +33,7 @@ def __virtual__(): ''' Only load if git exists on the system ''' - if salt.utils.which('git') is None: + if salt.utils.path.which('git') is None: return (False, 'The git execution module cannot be loaded: git unavailable.') else: @@ -64,10 +67,10 @@ def _config_getter(get_opt, Common code for config.get_* functions, builds and runs the git CLI command and returns the result dict for the calling function to parse. ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if cwd is None: if not global_: @@ -133,7 +136,7 @@ def _format_opts(opts): if not isinstance(opts, six.string_types): opts = [str(opts)] else: - opts = salt.utils.shlex_split(opts) + opts = salt.utils.args.shlex_split(opts) try: if opts[-1] == '--': # Strip the '--' if it was passed at the end of the opts string, @@ -185,15 +188,24 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None, identity = [identity] # try each of the identities, independently + tmp_identity_file = None for id_file in identity: if 'salt://' in id_file: - _id_file = id_file - id_file = __salt__['cp.cache_file'](id_file, saltenv) + with salt.utils.files.set_umask(0o077): + tmp_identity_file = salt.utils.mkstemp() + _id_file = id_file + id_file = __salt__['cp.get_file'](id_file, + tmp_identity_file, + saltenv) if not id_file: log.error('identity {0} does not exist.'.format(_id_file)) + __salt__['file.remove'](tmp_identity_file) continue else: - __salt__['file.set_mode'](id_file, '0600') + if user: + os.chown(id_file, + __salt__['file.user_to_uid'](user), + -1) else: if not __salt__['file.file_exists'](id_file): missing_keys.append(id_file) @@ -210,7 +222,7 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None, salt.utils.templates.TEMPLATE_DIRNAME, 'git/ssh-id-wrapper' ) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): for suffix in ('', ' (x86)'): ssh_exe = ( 'C:\\Program Files{0}\\Git\\bin\\ssh.exe' @@ -261,9 +273,14 @@ def _git_run(command, cwd=None, user=None, password=None, identity=None, redirect_stderr=redirect_stderr, **kwargs) finally: - if not salt.utils.is_windows() and 'GIT_SSH' in env: + if not salt.utils.platform.is_windows() and 'GIT_SSH' in env: os.remove(env['GIT_SSH']) + # Cleanup the temporary identity file + if tmp_identity_file and os.path.exists(tmp_identity_file): + log.debug('Removing identity file {0}'.format(tmp_identity_file)) + __salt__['file.remove'](tmp_identity_file) + # If the command was successful, no need to try additional IDs if result['retcode'] == 0: return result @@ -560,10 +577,10 @@ def archive(cwd, # allows us to accept 'format' as an argument to this function without # shadowing the format() global, while also not allowing unwanted arguments # to be passed. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) format_ = kwargs.pop('format', None) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) command = ['git'] + _format_git_opts(git_opts) command.append('archive') @@ -921,7 +938,7 @@ def clone(cwd, # https://github.com/saltstack/salt/issues/15519#issuecomment-128531310 # On Windows, just fall back to None (runs git clone command using the # home directory as the cwd). - clone_cwd = '/tmp' if not salt.utils.is_windows() else None + clone_cwd = '/tmp' if not salt.utils.platform.is_windows() else None _git_run(command, cwd=clone_cwd, user=user, @@ -1267,11 +1284,11 @@ def config_set(key, salt myminion git.config_set user.email me@example.com cwd=/path/to/repo salt myminion git.config_set user.email foo@bar.com global=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) add_ = kwargs.pop('add', False) global_ = kwargs.pop('global', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if cwd is None: if not global_: @@ -1394,11 +1411,11 @@ def config_unset(key, salt myminion git.config_unset /path/to/repo foo.bar salt myminion git.config_unset /path/to/repo foo.bar all=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) all_ = kwargs.pop('all', False) global_ = kwargs.pop('global', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if cwd is None: if not global_: @@ -2222,10 +2239,10 @@ def list_worktrees(cwd, if not _check_worktree_support(failhard=True): return {} cwd = _expand_path(cwd, user) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) all_ = kwargs.pop('all', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if all_ and stale: raise CommandExecutionError( @@ -2679,9 +2696,9 @@ def merge(cwd, # .. or merge another rev salt myminion git.merge /path/to/repo rev=upstream/foo ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cwd = _expand_path(cwd, user) command = ['git'] + _format_git_opts(git_opts) @@ -2811,10 +2828,10 @@ def merge_base(cwd, salt myminion git.merge_base /path/to/repo refs=mybranch fork_point=upstream/master ''' cwd = _expand_path(cwd, user) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) all_ = kwargs.pop('all', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if all_ and (independent or is_ancestor or fork_point): raise SaltInvocationError( @@ -3177,9 +3194,9 @@ def push(cwd, # Delete remote branch 'upstream/temp' salt myminion git.push /path/to/repo upstream :temp ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cwd = _expand_path(cwd, user) command = ['git'] + _format_git_opts(git_opts) @@ -3269,7 +3286,7 @@ def rebase(cwd, command.extend(opts) if not isinstance(rev, six.string_types): rev = str(rev) - command.extend(salt.utils.shlex_split(rev)) + command.extend(salt.utils.args.shlex_split(rev)) return _git_run(command, cwd=cwd, user=user, @@ -4189,10 +4206,10 @@ def submodule(cwd, # Unregister submodule (2015.8.0 and later) salt myminion git.submodule /path/to/repo/sub/repo deinit ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) init_ = kwargs.pop('init', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cwd = _expand_path(cwd, user) if init_: @@ -4454,10 +4471,10 @@ def worktree_add(cwd, salt myminion git.worktree_add /path/to/repo/main ../hotfix branch=hotfix21 ref=v2.1.9.3 ''' _check_worktree_support() - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) branch_ = kwargs.pop('branch', None) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cwd = _expand_path(cwd, user) if branch_ and detach: diff --git a/salt/modules/github.py b/salt/modules/github.py index b8ea1fcb77..8bbf155d6a 100644 --- a/salt/modules/github.py +++ b/salt/modules/github.py @@ -36,7 +36,7 @@ import logging # Import Salt Libs from salt.exceptions import CommandExecutionError -import salt.ext.six as six +from salt.ext import six import salt.utils.http # Import third party libs diff --git a/salt/modules/glusterfs.py b/salt/modules/glusterfs.py index fb26e1c610..ce25ed63d8 100644 --- a/salt/modules/glusterfs.py +++ b/salt/modules/glusterfs.py @@ -10,10 +10,13 @@ import sys import xml.etree.ElementTree as ET # Import salt libs -import salt.utils -import salt.utils.cloud as suc +import salt.utils.cloud +import salt.utils.path from salt.exceptions import SaltInvocationError, CommandExecutionError +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -21,7 +24,7 @@ def __virtual__(): ''' Only load this module if the gluster command exists ''' - if salt.utils.which('gluster'): + if salt.utils.path.which('gluster'): return True return (False, 'glusterfs server is not installed') @@ -201,7 +204,7 @@ def peer(name): ''' - if suc.check_name(name, 'a-zA-Z0-9._-'): + if salt.utils.cloud.check_name(name, 'a-zA-Z0-9._-'): raise SaltInvocationError( 'Invalid characters in peer name "{0}"'.format(name)) @@ -254,7 +257,7 @@ def create_volume(name, bricks, stripe=False, replica=False, device_vg=False, "gluster2:/export/vol2/brick"]' replica=2 start=True ''' # If single brick given as a string, accept it - if isinstance(bricks, str): + if isinstance(bricks, six.string_types): bricks = [bricks] # Error for block devices with multiple bricks @@ -531,7 +534,7 @@ def add_volume_bricks(name, bricks): cmd = 'volume add-brick {0}'.format(name) - if isinstance(bricks, str): + if isinstance(bricks, six.string_types): bricks = [bricks] volume_bricks = [x['path'] for x in volinfo[name]['bricks'].values()] diff --git a/salt/modules/gpg.py b/salt/modules/gpg.py index 57d875b16f..ac3427edd2 100644 --- a/salt/modules/gpg.py +++ b/salt/modules/gpg.py @@ -21,13 +21,13 @@ import re import time # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.exceptions import SaltInvocationError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Set up logging log = logging.getLogger(__name__) @@ -86,7 +86,7 @@ def _gpg(): Returns the path to the gpg binary ''' # Get the path to the gpg binary. - return salt.utils.which('gpg') + return salt.utils.path.which('gpg') def __virtual__(): diff --git a/salt/modules/grains.py b/salt/modules/grains.py index bfd06b0f39..e59b4b1005 100644 --- a/salt/modules/grains.py +++ b/salt/modules/grains.py @@ -16,7 +16,7 @@ import yaml from functools import reduce # pylint: disable=redefined-builtin # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils import salt.utils.compat import salt.utils.files diff --git a/salt/modules/guestfs.py b/salt/modules/guestfs.py index d521a935e6..a9f80a693f 100644 --- a/salt/modules/guestfs.py +++ b/salt/modules/guestfs.py @@ -13,7 +13,7 @@ import hashlib import logging # Import Salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -22,7 +22,7 @@ def __virtual__(): ''' Only load if libguestfs python bindings are installed ''' - if salt.utils.which('guestmount'): + if salt.utils.path.which('guestmount'): return 'guestfs' return (False, 'The guestfs execution module cannot be loaded: guestmount binary not in path.') diff --git a/salt/modules/hadoop.py b/salt/modules/hadoop.py index 3a29c49821..65668619bb 100644 --- a/salt/modules/hadoop.py +++ b/salt/modules/hadoop.py @@ -12,7 +12,7 @@ Support for hadoop from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.path __authorized_modules__ = ['version', 'namenode', 'dfsadmin', 'dfs', 'fs'] @@ -21,7 +21,7 @@ def __virtual__(): ''' Check if hadoop is present, then load the module ''' - if salt.utils.which('hadoop') or salt.utils.which('hdfs'): + if salt.utils.path.which('hadoop') or salt.utils.path.which('hdfs'): return 'hadoop' return (False, 'The hadoop execution module cannot be loaded: hadoop or hdfs binary not in path.') @@ -40,7 +40,7 @@ def _hadoop_cmd(module, command, *args): E.g.: hadoop dfs -ls / ''' tool = 'hadoop' - if salt.utils.which('hdfs'): + if salt.utils.path.which('hdfs'): tool = 'hdfs' out = None diff --git a/salt/modules/hashutil.py b/salt/modules/hashutil.py index 40e1f55cb7..f7fb98cc1d 100644 --- a/salt/modules/hashutil.py +++ b/salt/modules/hashutil.py @@ -11,10 +11,11 @@ import hmac # Import Salt libs import salt.exceptions -import salt.ext.six as six +from salt.ext import six import salt.utils import salt.utils.files import salt.utils.hashutils +import salt.utils.stringutils if six.PY2: import StringIO @@ -281,7 +282,7 @@ def github_signature(string, shared_secret, challenge_hmac): key = shared_secret hashtype, challenge = challenge_hmac.split('=') if six.PY3: - msg = salt.utils.to_bytes(msg) - key = salt.utils.to_bytes(key) + msg = salt.utils.stringutils.to_bytes(msg) + key = salt.utils.stringutils.to_bytes(key) hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype)) return hmac_hash.hexdigest() == challenge diff --git a/salt/modules/heat.py b/salt/modules/heat.py index 81474b67c4..d3e01f8d9a 100644 --- a/salt/modules/heat.py +++ b/salt/modules/heat.py @@ -48,7 +48,7 @@ import logging import yaml # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.exceptions import SaltInvocationError diff --git a/salt/modules/hg.py b/salt/modules/hg.py index 3ff524cf32..d4992eb34a 100644 --- a/salt/modules/hg.py +++ b/salt/modules/hg.py @@ -10,6 +10,7 @@ import logging # Import salt libs from salt.exceptions import CommandExecutionError import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -18,7 +19,7 @@ def __virtual__(): ''' Only load if hg is installed ''' - if salt.utils.which('hg') is None: + if salt.utils.path.which('hg') is None: return (False, 'The hg execution module cannot be loaded: hg unavailable.') else: diff --git a/salt/modules/hosts.py b/salt/modules/hosts.py index 4dfdb635a7..953345fa35 100644 --- a/salt/modules/hosts.py +++ b/salt/modules/hosts.py @@ -12,7 +12,7 @@ import salt.utils.files import salt.utils.odict as odict # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin diff --git a/salt/modules/htpasswd.py b/salt/modules/htpasswd.py index 02f48457c2..60e2ad7c10 100644 --- a/salt/modules/htpasswd.py +++ b/salt/modules/htpasswd.py @@ -14,7 +14,7 @@ import os import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -25,7 +25,7 @@ def __virtual__(): ''' Only load the module if htpasswd is installed ''' - if salt.utils.which('htpasswd'): + if salt.utils.path.which('htpasswd'): return __virtualname__ return (False, 'The htpasswd execution mdule cannot be loaded: htpasswd binary not in path.') @@ -35,8 +35,6 @@ def useradd(pwfile, user, password, opts='', runas=None): Add a user to htpasswd file using the htpasswd command. If the htpasswd file does not exist, it will be created. - .. deprecated:: 2016.3.0 - pwfile Path to htpasswd file diff --git a/salt/modules/icinga2.py b/salt/modules/icinga2.py index 81f442dfc5..899c39f7cf 100644 --- a/salt/modules/icinga2.py +++ b/salt/modules/icinga2.py @@ -13,7 +13,8 @@ import logging import subprocess # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform log = logging.getLogger(__name__) @@ -23,10 +24,10 @@ def __virtual__(): Only load this module if the mysql libraries exist ''' # TODO: This could work on windows with some love - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The module cannot be loaded on windows.') - if salt.utils.which('icinga2'): + if salt.utils.path.which('icinga2'): return True return (False, 'Icinga2 not installed.') diff --git a/salt/modules/ilo.py b/salt/modules/ilo.py index a740d9c0e5..6a4139afec 100644 --- a/salt/modules/ilo.py +++ b/salt/modules/ilo.py @@ -7,7 +7,7 @@ Manage HP ILO from __future__ import absolute_import from salt._compat import ElementTree as ET -import salt.utils +import salt.utils.path import os import tempfile @@ -20,7 +20,7 @@ def __virtual__(): ''' Make sure hponcfg tool is accessible ''' - if salt.utils.which('hponcfg'): + if salt.utils.path.which('hponcfg'): return True return (False, 'ilo execution module not loaded: the hponcfg binary is not in the path.') diff --git a/salt/modules/infoblox.py b/salt/modules/infoblox.py index f51fccd774..939cdf3ca0 100644 --- a/salt/modules/infoblox.py +++ b/salt/modules/infoblox.py @@ -1,508 +1,611 @@ # -*- coding: utf-8 -*- ''' -Module for managing Infoblox +This module have been tested on infoblox API v1.2.1, +other versions of the API are likly workable. -Will look for pillar data infoblox:server, infoblox:user, infoblox:password if not passed to functions +:depends: libinfoblox, https://github.com/steverweber/libinfoblox -.. versionadded:: 2016.3.0 + libinfoblox can be installed using `pip install libinfoblox` + +API documents can be found on your infoblox server at: + + https://INFOBLOX/wapidoc + +:configuration: The following configuration defaults can be + defined (pillar or config files '/etc/salt/master.d/infoblox.conf'): + + .. code-block:: python + + infoblox.config: + api_sslverify: True + api_url: 'https://INFOBLOX/wapi/v1.2.1' + api_user: 'username' + api_key: 'password' + + Many of the functions accept `api_opts` to override the API config. + + .. code-block:: bash + + salt-call infoblox.get_host name=my.host.com \ + api_url: 'https://INFOBLOX/wapi/v1.2.1' \ + api_user=admin \ + api_key=passs -:depends: - - requests ''' from __future__ import absolute_import -# Import salt libs -from salt.exceptions import CommandExecutionError -from salt.exceptions import SaltInvocationError -import logging - -log = logging.getLogger(__name__) +import time +IMPORT_ERR = None try: - import json - import requests - HAS_IMPORTS = True -except ImportError: - HAS_IMPORTS = False + import libinfoblox +except Exception as ex: + IMPORT_ERR = str(ex) +__virtualname__ = 'infoblox' def __virtual__(): - if HAS_IMPORTS: - return True - return (False, 'The infoblox execution module cannot be loaded: ' - 'python requests and/or json libraries are not available.') + return (IMPORT_ERR is None, IMPORT_ERR) -def _conn_info_check(infoblox_server=None, - infoblox_user=None, - infoblox_password=None): +cache = {} + + +def _get_config(**api_opts): ''' - get infoblox stuff from pillar if not passed + Return configuration + user passed api_opts override salt config.get vars ''' - - if infoblox_server is None: - infoblox_server = __salt__['pillar.get']('infoblox:server', None) - if infoblox_user is None: - infoblox_user = __salt__['pillar.get']('infoblox:user', None) - log.debug('Infoblox username is "{0}"'.format(infoblox_user)) - if infoblox_password is None: - infoblox_password = __salt__['pillar.get']('infoblox:password', None) - - return infoblox_server, infoblox_user, infoblox_password + config = { + 'api_sslverify': True, + 'api_url': 'https://INFOBLOX/wapi/v1.2.1', + 'api_user': '', + 'api_key': '', + } + if '__salt__' in globals(): + config_key = '{0}.config'.format(__virtualname__) + config.update(__salt__['config.get'](config_key, {})) + # pylint: disable=C0201 + for k in set(config.keys()) & set(api_opts.keys()): + config[k] = api_opts[k] + return config -def _process_return_data(retData): +def _get_infoblox(**api_opts): + config = _get_config(**api_opts) + # TODO: perhaps cache in __opts__ + cache_key = 'infoblox_session_{0},{1},{2}'.format( + config['api_url'], config['api_user'], config['api_key']) + if cache_key in cache: + timedelta = int(time.time()) - cache[cache_key]['time'] + if cache[cache_key]['obj'] and timedelta < 60: + return cache[cache_key]['obj'] + c = {} + c['time'] = int(time.time()) + c['obj'] = libinfoblox.Session(api_sslverify=config['api_sslverify'], api_url=config['api_url'], + api_user=config['api_user'], api_key=config['api_key']) + cache[cache_key] = c + return c['obj'] + + +def diff_objects(obja, objb): ''' - generic return processing + Diff two complex infoblox objects. + This is used from salt states to detect changes in objects. + + Using `func:nextavailableip` will not cause a diff if the ipaddres is in range ''' - if retData.status_code == 200: - if retData.json(): - return retData + return libinfoblox.diff_obj(obja, objb) + + +def is_ipaddr_in_ipfunc_range(ipaddr, ipfunc): + ''' + Return true if the ipaddress is in the range of the nextavailableip function + + CLI Example: + + salt-call infoblox.is_ipaddr_in_ipfunc_range \ + ipaddr="10.0.2.2" ipfunc="func:nextavailableip:10.0.0.0/8" + ''' + return libinfoblox.is_ipaddr_in_ipfunc_range(ipaddr, ipfunc) + + +def update_host(name, data, **api_opts): + ''' + Update host record. This is a helper call to update_object. + + Find a hosts `_ref` then call update_object with the record data. + + CLI Example: + + salt-call infoblox.update_host name=fqdn data={} + ''' + o = get_host(name=name, **api_opts) + return update_object(objref=o['_ref'], data=data, **api_opts) + + +def update_object(objref, data, **api_opts): + ''' + Update raw infoblox object. + This is a low level api call. + + CLI Example: + + .. code-block:: bash + + salt-call infoblox.update_object objref=[ref_of_object] data={} + ''' + if '__opts__' in globals() and __opts__['test']: + return {'Test': 'Would attempt to update object: {0}'.format(objref)} + infoblox = _get_infoblox(**api_opts) + return infoblox.update_object(objref, data) + + +def delete_object(objref, **api_opts): + ''' + Delete infoblox object. + This is a low level api call. + + CLI Example: + + salt-call infoblox.delete_object objref=[ref_of_object] + ''' + if '__opts__' in globals() and __opts__['test']: + return {'Test': 'Would attempt to delete object: {0}'.format(objref)} + infoblox = _get_infoblox(**api_opts) + return infoblox.delete_object(objref) + + +def create_object(object_type, data, **api_opts): + ''' + Create raw infoblox object + This is a low level api call. + + CLI Example: + + salt-call infoblox.update_object object_type=record:host data={} + ''' + if '__opts__' in globals() and __opts__['test']: + return {'Test': 'Would attempt to create object: {0}'.format(object_type)} + infoblox = _get_infoblox(**api_opts) + return infoblox.create_object(object_type, data) + + +def get_object(objref, data=None, return_fields=None, max_results=None, + ensure_none_or_one_result=False, **api_opts): + ''' + Get raw infoblox object. + This is a low level api call. + + CLI Example: + + salt-call infoblox.get_object objref=[_ref of object] + ''' + if not data: + data = {} + infoblox = _get_infoblox(**api_opts) + return infoblox.get_object(objref, data, return_fields, + max_results, ensure_none_or_one_result) + + +def create_cname(data, **api_opts): + ''' + Create a cname record. + + CLI Example: + + salt-call infoblox.create_cname data={ \ + "comment": "cname to example server", \ + "name": "example.example.com", \ + "zone": "example.com", \ + "view": "Internal", \ + "canonical": "example-ha-0.example.com" \ + } + ''' + infoblox = _get_infoblox(**api_opts) + host = infoblox.create_cname(data=data) + return host + + +def get_cname(name=None, canonical=None, return_fields=None, **api_opts): + ''' + Get CNAME information. + + CLI Example: + + salt-call infoblox.get_cname name=example.example.com + salt-call infoblox.get_cname canonical=example-ha-0.example.com + ''' + infoblox = _get_infoblox(**api_opts) + o = infoblox.get_cname(name=name, canonical=canonical, return_fields=return_fields) + return o + + +def update_cname(name, data, **api_opts): + ''' + Update CNAME. This is a helper call to update_object. + + Find a CNAME `_ref` then call update_object with the record data. + + CLI Example: + + salt-call infoblox.update_cname name=example.example.com data="{ + 'canonical':'example-ha-0.example.com', + 'use_ttl':true, + 'ttl':200, + 'comment':'Salt managed CNAME'}" + ''' + o = get_cname(name=name, **api_opts) + if not o: + raise Exception('CNAME record not found') + return update_object(objref=o['_ref'], data=data, **api_opts) + + +def delete_cname(name=None, canonical=None, **api_opts): + ''' + Delete CNAME. This is a helper call to delete_object. + + If record is not found, return True + + CLI Example: + + salt-call infoblox.delete_cname name=example.example.com + salt-call infoblox.delete_cname canonical=example-ha-0.example.com + ''' + cname = get_cname(name=name, canonical=canonical, **api_opts) + if cname: + return delete_object(cname['_ref'], **api_opts) + return True + + +def get_host(name=None, ipv4addr=None, mac=None, return_fields=None, **api_opts): + ''' + Get host information + + CLI Example: + + salt-call infoblox.get_host hostname.domain.ca + salt-call infoblox.get_host ipv4addr=123.123.122.12 + salt-call infoblox.get_host mac=00:50:56:84:6e:ae + + return_fields= + https://INFOBLOX/wapidoc/objects/record.host.html#fields-list + + return_fields='ipv4addrs,aliases,name,configure_for_dns,extattrs,disable,view,comment,zone' + ''' + infoblox = _get_infoblox(**api_opts) + host = infoblox.get_host(name=name, mac=mac, ipv4addr=ipv4addr, return_fields=return_fields) + return host + + +def get_host_advanced(name=None, ipv4addr=None, mac=None, **api_opts): + ''' + Get all host information + + CLI Example: + + salt-call infoblox.get_host_advanced hostname.domain.ca + ''' + infoblox = _get_infoblox(**api_opts) + host = infoblox.get_host_advanced(name=name, mac=mac, ipv4addr=ipv4addr) + return host + + +def get_host_domainname(name, domains=None, **api_opts): + ''' + Get host domain name + + If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot. + + If domains are provided, the best match out of the list is returned. + + If none are found the return is None + + dots at end of names are ignored. + + CLI Example: + + salt-call uwl.get_host_domainname name=localhost.t.domain.com \ + domains=['domain.com', 't.domain.com.'] + + # returns: t.domain.com + ''' + name = name.lower().rstrip('.') + if not domains: + data = get_host(name=name, **api_opts) + if data and 'zone' in data: + return data['zone'].lower() else: - log.debug('no data returned from infoblox') - return None + if name.count('.') > 1: + return name[name.find('.')+1:] + return name + match = '' + for d in domains: + d = d.lower().rstrip('.') + if name.endswith(d) and len(d) > len(match): + match = d + if len(match) > 0: + return match + return None + + +def get_host_hostname(name, domains=None, **api_opts): + ''' + Get hostname + + If no domains are passed, the hostname is checked for a zone in infoblox, if no zone split on first dot. + + If domains are provided, the best match out of the list is truncated from the fqdn leaving the hostname. + + If no matching domains are found the fqdn is returned. + + dots at end of names are ignored. + + CLI Example: + + salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com \ + domains="['domain.com', 't.domain.com']" + #returns: localhost.xxx + + salt-call infoblox.get_host_hostname fqdn=localhost.xxx.t.domain.com + #returns: localhost + ''' + name = name.lower().rstrip('.') + if not domains: + return name.split('.')[0] + domain = get_host_domainname(name, domains, **api_opts) + if domain and domain in name: + return name.rsplit('.' + domain)[0] + return name + + +def get_host_mac(name=None, allow_array=False, **api_opts): + ''' + Get mac address from host record. + + Use `allow_array` to return possible mutiple values. + + CLI Example: + + salt-call infoblox.get_host_mac host=localhost.domain.com + ''' + data = get_host(name=name, **api_opts) + if data and 'ipv4addrs' in data: + l = [] + for a in data['ipv4addrs']: + if 'mac' in a: + l.append(a['mac']) + if allow_array: + return l + if l: + return l[0] + return None + + +def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts): + ''' + Get ipv4 address from host record. + + Use `allow_array` to return possible mutiple values. + + CLI Example: + + salt-call infoblox.get_host_ipv4 host=localhost.domain.com + salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae + ''' + data = get_host(name=name, mac=mac, **api_opts) + if data and 'ipv4addrs' in data: + l = [] + for a in data['ipv4addrs']: + if 'ipv4addr' in a: + l.append(a['ipv4addr']) + if allow_array: + return l + if l: + return l[0] + return None + + +def get_host_ipv4addr_info(ipv4addr=None, mac=None, + discovered_data=None, + return_fields=None, **api_opts): + ''' + Get host ipv4addr information + + return_fields='mac,host,configure_for_dhcp,ipv4addr' + + CLI Example: + + salt-call infoblox.get_ipv4addr ipv4addr=123.123.122.12 + salt-call infoblox.get_ipv4addr mac=00:50:56:84:6e:ae + salt-call infoblox.get_ipv4addr mac=00:50:56:84:6e:ae return_fields=host + return_fields='mac,host,configure_for_dhcp,ipv4addr' + ''' + infoblox = _get_infoblox(**api_opts) + return infoblox.get_host_ipv4addr_object(ipv4addr, mac, discovered_data, return_fields) + + +def get_host_ipv6addr_info(ipv6addr=None, mac=None, + discovered_data=None, + return_fields=None, **api_opts): + ''' + Get host ipv6addr information + + CLI Example: + + salt-call infoblox.get_host_ipv6addr_info ipv6addr=2001:db8:85a3:8d3:1349:8a2e:370:7348 + ''' + infoblox = _get_infoblox(**api_opts) + return infoblox.get_host_ipv6addr_object(ipv6addr, mac, discovered_data, return_fields) + + +def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts): + ''' + Get list of all networks. + This is helpfull when looking up subnets to + use with func:nextavailableip + + This call is offen slow and not cached! + + some return_fields + comment,network,network_view,ddns_domainname,disable,enable_ddns + + CLI Example: + + salt-call infoblox.get_network + ''' + infoblox = _get_infoblox(**api_opts) + return infoblox.get_network(ipv4addr=ipv4addr, network=network, return_fields=return_fields) + + +def delete_host(name=None, mac=None, ipv4addr=None, **api_opts): + ''' + Delete host + + CLI Example: + + salt-call infoblox.delete_host name=example.domain.com + salt-call infoblox.delete_host ipv4addr=123.123.122.12 + salt-call infoblox.delete_host ipv4addr=123.123.122.12 mac=00:50:56:84:6e:ae + ''' + if '__opts__' in globals() and __opts__['test']: + return {'Test': 'Would attempt to delete host'} + infoblox = _get_infoblox(**api_opts) + return infoblox.delete_host(name, mac, ipv4addr) + + +def create_host(data, **api_opts): + ''' + Add host record + + Avoid race conditions, use func:nextavailableip for ipv[4,6]addrs: + - func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default + - func:nextavailableip:10.0.0.0/8 + - func:nextavailableip:10.0.0.0/8,external + - func:nextavailableip:10.0.0.3-10.0.0.10 + + See your infoblox API for full `data` format. + + CLI Example: + + salt-call infoblox.create_host \ + data = + {'name': 'hostname.example.ca', + 'aliases': ['hostname.math.example.ca'], + 'extattrs': [{'Business Contact': {'value': 'example@example.ca'}}, + {'Pol8 Classification': {'value': 'Restricted'}}, + {'Primary OU': {'value': 'CS'}}, + {'Technical Contact': {'value': 'example@example.ca'}}], + 'ipv4addrs': [{'configure_for_dhcp': True, + 'ipv4addr': 'func:nextavailableip:129.97.139.0/24', + 'mac': '00:50:56:84:6e:ae'}], + 'ipv6addrs': [], } + ''' + return create_object('record:host', data, **api_opts) + + +def get_ipv4_range(start_addr=None, end_addr=None, return_fields=None, **api_opts): + ''' + Get ip range + + CLI Example: + + salt-call infoblox.get_ipv4_range start_addr=123.123.122.12 + ''' + infoblox = _get_infoblox(**api_opts) + return infoblox.get_range(start_addr, end_addr, return_fields) + + +def delete_ipv4_range(start_addr=None, end_addr=None, **api_opts): + ''' + Delete ip range. + + CLI Example: + + salt-call infoblox.delete_ipv4_range start_addr=123.123.122.12 + ''' + r = get_ipv4_range(start_addr, end_addr, **api_opts) + if r: + return delete_object(r['_ref'], **api_opts) else: - msg = 'Unsuccessful error code {0} returned'.format(retData.status_code) - raise CommandExecutionError(msg) - - -def delete_record(name, - dns_view, - record_type, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - delete a record - - name - name of the record - - dns_view - the DNS view to remove the record from - - record_type - the record type (a, cname, host, etc) - - infoblox_server - the infoblox server hostname (can also use the infoblox:server pillar) - - infoblox_user - the infoblox user to connect with (can also use the infoblox:user pillar) - - infoblox_password - the infoblox user's password (can also use the infolblox:password pillar) - - infoblox_api_version - the infoblox api version to use - - sslVerify - should ssl verification be done on the connection to the Infoblox REST API - - CLI Example: - - .. code-block:: bash - - salt my-minion infoblox.delete_record some.dns.record MyInfobloxView A sslVerify=False - ''' - infoblox_server, infoblox_user, infoblox_password = _conn_info_check(infoblox_server, - infoblox_user, - infoblox_password) - if infoblox_server is None and infoblox_user is None and infoblox_password is None: - _throw_no_creds() - return None - - record_type = record_type.lower() - currentRecords = get_record(name, - record_type, - infoblox_server, - infoblox_user, - infoblox_password, - dns_view, - infoblox_api_version, - sslVerify) - if currentRecords: - for currentRecord in currentRecords: - url = 'https://{0}/wapi/{1}/{2}'.format(infoblox_server, - infoblox_api_version, - currentRecord['Record ID']) - ret = requests.delete(url, - auth=(infoblox_user, infoblox_password), - headers={'Content-Type': 'application/json'}, - verify=sslVerify) - if ret.status_code == 200: - return True - else: - msg = 'Unsuccessful error code {0} returned -- full json dump {1}'.format(ret.status_code, ret.json()) - raise CommandExecutionError(msg) - return False - - -def update_record(name, - value, - dns_view, - record_type, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - update an entry to an infoblox dns view - - name - the dns name - - value - the value for the record - - record_type - the record type (a, cname, etc) - - dns_view - the DNS view to add the record to - - infoblox_server - the infoblox server hostname (can also use the infoblox:server pillar) - - infoblox_user - the infoblox user to connect with (can also use the infoblox:user pillar) - - infoblox_password - the infoblox user's password (can also use the infolblox:password pillar) - - infoblox_api_version - the infoblox api version to use - - sslVerify - should ssl verification be done on the connection to the Infoblox REST API - - CLI Example: - - .. code-block:: bash - - salt '*' infoblox.update_record alias.network.name canonical.network.name MyInfobloxView cname sslVerify=False - ''' - - infoblox_server, infoblox_user, infoblox_password = _conn_info_check(infoblox_server, - infoblox_user, - infoblox_password) - if infoblox_server is None and infoblox_user is None and infoblox_password is None: - _throw_no_creds() - return None - - record_type = record_type.lower() - currentRecords = get_record(name, - record_type, - infoblox_server, - infoblox_user, - infoblox_password, - dns_view, - infoblox_api_version, - sslVerify) - if currentRecords: - for currentRecord in currentRecords: - url = 'https://{0}/wapi/{1}/{2}'.format( - infoblox_server, - infoblox_api_version, - currentRecord['Record ID']) - data = None - if record_type == 'cname': - data = json.dumps({'canonical': value}) - elif record_type == 'a': - data = json.dumps({'ipv4addr': value}) - elif record_type == 'host': - data = {'ipv4addrs': []} - for i in value: - data['ipv4addrs'].append({'ipv4addr': i}) - data = json.dumps(data) - ret = requests.put(url, - data, - auth=(infoblox_user, infoblox_password), - headers={'Content-Type': 'application/json'}, - verify=sslVerify) - if ret.status_code == 200: - return True - else: - msg = 'Unsuccessful status code {0} returned.'.format(ret.status_code) - raise CommandExecutionError(msg) - else: - msg = 'Record {0} of type {1} was not found'.format(name, record_type) - log.error(msg) - return False - - -def add_record(name, - value, - record_type, - dns_view, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - add a record to an infoblox dns view - - name - the record name - - value - the value for the entry - can make use of infoblox functions for next available IP, like 'func:nextavailableip:10.1.0.0/24' - - record_type - the record type (cname, a, host, etc) - - dns_view - the DNS view to add the record to - - infoblox_server - the infoblox server hostname (can also use the infoblox:server pillar) - - infoblox_user - the infoblox user to connect with (can also use the infoblox:user pillar) - - infoblox_password - the infoblox user's password (can also use the infolblox:password pillar) - - infoblox_api_version - the infoblox api version to use - - sslVerify - should ssl verification be done on the connection to the Infoblox REST API - - CLI Example: - - .. code-block:: bash - - salt 'myminion' infoblox.add_record alias.network.name canonical.network.name MyView - ''' - - infoblox_server, infoblox_user, infoblox_password = _conn_info_check(infoblox_server, - infoblox_user, - infoblox_password) - if infoblox_server is None and infoblox_user is None and infoblox_password is None: - _throw_no_creds() - return None - - record_type = record_type.lower() - - data = None - url = None - if record_type == 'cname': - data = json.dumps({'name': name, 'canonical': value, 'view': dns_view}) - log.debug('cname data {0}'.format(data)) - elif record_type == 'host': - data = json.dumps({'name': name, 'ipv4addrs': [{'ipv4addr': value}], 'view': dns_view}) - log.debug('host record data {0}'.format(data)) - elif record_type == 'a': - data = json.dumps({'name': name, 'ipv4addr': value, 'view': dns_view}) - log.debug('a record data {0}'.format(data)) - - url = 'https://{0}/wapi/{1}/record:{2}'.format(infoblox_server, - infoblox_api_version, - record_type) - - ret = requests.post(url, - data, - auth=(infoblox_user, infoblox_password), - headers={'Content-Type': 'application/json'}, - verify=sslVerify) - if ret.status_code == 201: return True - else: - msg = 'Unsuccessful error code {0} returned -- full json dump {1}'.format(ret.status_code, ret.json()) - raise CommandExecutionError(msg) -def _throw_no_creds(): +def create_ipv4_range(data, **api_opts): ''' - helper function to log no credentials found error - ''' - msg = 'An infoblox server, username, and password must be specified or configured via pillar' - raise SaltInvocationError(msg) + Create a ipv4 range - -def get_network(network_name, - network_view=None, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - get a network from infoblox - - network_name - The name of the network in IPAM - - network_view - The name of the network view the network belongs to - - infoblox_server - the infoblox server hostname (can also use the infoblox:server pillar) - - infoblox_user - the infoblox user to connect with (can also use the infoblox:user pillar) - - infoblox_password - the infoblox user's password (can also use the infolblox:password pillar) - - infoblox_api_version - the infoblox api version to use - - sslVerify - should ssl verification be done on the connection to the Infoblox REST API + This is a helper function to `create_object` + See your infoblox API for full `data` format. CLI Example: - .. code-block:: bash - - salt myminion infoblox.get_network '10.0.0.0/8' + salt-call infoblox.create_ipv4_range data={ + start_addr: '129.97.150.160', + end_addr: '129.97.150.170'} ''' - - records = [] - infoblox_server, infoblox_user, infoblox_password = _conn_info_check(infoblox_server, - infoblox_user, - infoblox_password) - if infoblox_server is None and infoblox_user is None and infoblox_password is None: - _throw_no_creds() - return None - - url = 'https://{0}/wapi/{1}/network?network={2}{3}'.format( - infoblox_server, - infoblox_api_version, - network_name, - ('' if network_view is None else '&network_view=' + network_view)) - log.debug('Requst url is "{0}"'.format(url)) - ret = _process_return_data(requests.get(url, - auth=(infoblox_user, infoblox_password), - verify=sslVerify)) - if ret: - for entry in ret.json(): - log.debug('Infoblox record returned: {0}'.format(entry)) - tEntry = {} - data = _parse_record_data(entry) - for key in data: - tEntry[key] = data[key] - records.append(tEntry) - return records - else: - return False + return create_object('range', data, **api_opts) -def get_record(record_name, - record_type='host', - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - dns_view=None, - infoblox_api_version='v1.4.2', - sslVerify=True): +def create_a(data, **api_opts): ''' - get a record from infoblox + Create A record. - record_name - name of the record to search for - - record_type - type of reacord to search for (host, cname, a, etc...defaults to host) - - infoblox_server - the infoblox server hostname (can also use the infoblox:server pillar) - - infoblox_user - the infoblox user to connect with (can also use the infoblox:user pillar) - - infoblox_password - the infoblox user's password (can also use the infolblox:password pillar) - - dns_view - the infoblox DNS view to search, if not specified all views are searched - - infoblox_api_version - the infoblox api version to use - - sslVerify - should ssl verification be done on the connection to the Infoblox REST API + This is a helper function to `create_object`. + See your infoblox API for full `data` format. CLI Example: - .. code-block:: bash - - salt myminion infoblox.get_record some.host.com A sslVerify=False + salt-call infoblox.create_a \ + data = + name: 'fastlinux.math.example.ca' + ipv4addr: '127.0.0.1' + view: External ''' - - # TODO - verify record type (A, AAAA, CNAME< HOST, MX, PTR, SVR, TXT, host_ipv4addr, host_ipv6addr, naptr) - records = [] - - infoblox_server, infoblox_user, infoblox_password = _conn_info_check(infoblox_server, - infoblox_user, - infoblox_password) - - record_type = record_type.lower() - if infoblox_server is None and infoblox_user is None and infoblox_password is None: - _throw_no_creds() - return None - - url = 'https://{0}/wapi/{1}/record:{3}?name:={2}{4}{5}'.format( - infoblox_server, - infoblox_api_version, - record_name, - record_type, - ('' if dns_view is None else '&view=' + dns_view), - ('&_return_fields%2B=aliases' if record_type == 'host' else '') - ) - log.debug('Requst url is "{0}"'.format(url)) - ret = _process_return_data(requests.get(url, - auth=(infoblox_user, infoblox_password), - verify=sslVerify)) - if ret: - for entry in ret.json(): - log.debug('Infoblox record returned: {0}'.format(entry)) - tEntry = {} - data = _parse_record_data(entry) - for key in data: - tEntry[key] = data[key] - records.append(tEntry) - return records - else: - return False + return create_object('record:a', data, **api_opts) -def _parse_record_data(entry_data): - ''' - returns the right value data we'd be interested in for the specified record type +def get_a(name=None, ipv4addr=None, allow_array=True, **api_opts): ''' + Get A record - ret = {} - ipv4addrs = [] - aliases = [] - if 'canonical' in entry_data: - ret['Canonical Name'] = entry_data['canonical'] - if 'ipv4addrs' in entry_data: - for ipaddrs in entry_data['ipv4addrs']: - ipv4addrs.append(ipaddrs['ipv4addr']) - ret['IP Addresses'] = ipv4addrs - if 'ipv4addr' in entry_data: - ret['IP Address'] = entry_data['ipv4addr'] - if 'aliases' in entry_data: - for alias in entry_data['aliases']: - aliases.append(alias) - ret['Aliases'] = aliases - if 'name' in entry_data: - ret['Name'] = entry_data['name'] - if 'view' in entry_data: - ret['DNS View'] = entry_data['view'] - if 'network_view' in entry_data: - ret['Network View'] = entry_data['network_view'] - if 'comment' in entry_data: - ret['Comment'] = entry_data['comment'] - if 'network' in entry_data: - ret['Network'] = entry_data['network'] - if '_ref' in entry_data: - ret['Record ID'] = entry_data['_ref'] + CLI Example: + + salt-call infoblox.get_a name=abc.example.com + salt-call infoblox.get_a ipv4addr=192.168.3.5 + ''' + data = {} + if name: + data['name'] = name + if ipv4addr: + data['ipv4addr'] = ipv4addr + r = get_object('record:a', data=data, **api_opts) + if r and len(r) > 1 and not allow_array: + raise Exception('More than one result, use allow_array to return the data') + return r + + +def delete_a(name=None, ipv4addr=None, allow_array=False, **api_opts): + ''' + Delete A record + + If the A record is used as a round robin you can set + `allow_array=true to delete all records for the hostname. + + CLI Example: + + salt-call infoblox.delete_a name=abc.example.com + salt-call infoblox.delete_a ipv4addr=192.168.3.5 + salt-call infoblox.delete_a name=acname.example.com allow_array=true + ''' + r = get_a(name, ipv4addr, allow_array=False, **api_opts) + if not r: + return True + if len(r) == 0: + return True + if len(r) > 1 and not allow_array: + raise Exception('More than one result, use allow_array to override') + ret = [] + for ri in r: + ret.append(delete_object(ri['_ref'], **api_opts)) return ret diff --git a/salt/modules/ini_manage.py b/salt/modules/ini_manage.py index b8fd0f9a81..5b74e5a1b4 100644 --- a/salt/modules/ini_manage.py +++ b/salt/modules/ini_manage.py @@ -20,7 +20,7 @@ import re import json # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError from salt.utils.odict import OrderedDict diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py index e54982a1fa..e8c1123daf 100644 --- a/salt/modules/inspectlib/collector.py +++ b/salt/modules/inspectlib/collector.py @@ -28,10 +28,10 @@ from salt.modules.inspectlib import kiwiproc from salt.modules.inspectlib.entities import (AllowedDir, IgnoredDir, Package, PayloadFile, PackageCfgFile) -import salt.utils +import salt.utils # Can be removed when reinit_crypto is moved import salt.utils.files -from salt.utils import fsutils -from salt.utils import reinit_crypto +import salt.utils.fsutils +import salt.utils.stringutils from salt.exceptions import CommandExecutionError try: @@ -101,13 +101,13 @@ class Inspector(EnvLoader): # Get list of all available packages data = dict() - for pkg_name in salt.utils.to_str(self._syscall('dpkg-query', None, None, + for pkg_name in salt.utils.stringutils.to_str(self._syscall('dpkg-query', None, None, '-Wf', "${binary:Package}\\n")[0]).split(os.linesep): pkg_name = pkg_name.strip() if not pkg_name: continue data[pkg_name] = list() - for pkg_cfg_item in salt.utils.to_str(self._syscall('dpkg-query', None, None, '-Wf', "${Conffiles}\\n", + for pkg_cfg_item in salt.utils.stringutils.to_str(self._syscall('dpkg-query', None, None, '-Wf', "${Conffiles}\\n", pkg_name)[0]).split(os.linesep): pkg_cfg_item = pkg_cfg_item.strip() if not pkg_cfg_item: @@ -132,7 +132,7 @@ class Inspector(EnvLoader): pkg_name = None pkg_configs = [] - out = salt.utils.to_str(out) + out = salt.utils.stringutils.to_str(out) for line in out.split(os.linesep): line = line.strip() if not line: @@ -162,10 +162,10 @@ class Inspector(EnvLoader): cfgs = list() cfg_data = list() if self.grains_core.os_data().get('os_family') == 'Debian': - cfg_data = salt.utils.to_str(self._syscall("dpkg", None, None, '--verify', + cfg_data = salt.utils.stringutils.to_str(self._syscall("dpkg", None, None, '--verify', pkg_name)[0]).split(os.linesep) elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']: - cfg_data = salt.utils.to_str(self._syscall("rpm", None, None, '-V', '--nodeps', '--nodigest', + cfg_data = salt.utils.stringutils.to_str(self._syscall("rpm", None, None, '-V', '--nodeps', '--nodigest', '--nosignature', '--nomtime', '--nolinkto', pkg_name)[0]).split(os.linesep) for line in cfg_data: @@ -254,12 +254,12 @@ class Inspector(EnvLoader): links = set() files = set() - for pkg_name in salt.utils.to_str(self._syscall("dpkg-query", None, None, + for pkg_name in salt.utils.stringutils.to_str(self._syscall("dpkg-query", None, None, '-Wf', '${binary:Package}\\n')[0]).split(os.linesep): pkg_name = pkg_name.strip() if not pkg_name: continue - for resource in salt.utils.to_str(self._syscall("dpkg", None, None, '-L', pkg_name)[0]).split(os.linesep): + for resource in salt.utils.stringutils.to_str(self._syscall("dpkg", None, None, '-L', pkg_name)[0]).split(os.linesep): resource = resource.strip() if not resource or resource in ['/', './', '.']: continue @@ -280,7 +280,7 @@ class Inspector(EnvLoader): links = set() files = set() - for line in salt.utils.to_str(self._syscall("rpm", None, None, '-qlav')[0]).split(os.linesep): + for line in salt.utils.stringutils.to_str(self._syscall("rpm", None, None, '-qlav')[0]).split(os.linesep): line = line.strip() if not line: continue @@ -375,7 +375,7 @@ class Inspector(EnvLoader): # Add ignored filesystems ignored_fs = set() ignored_fs |= set(self.IGNORE_PATHS) - mounts = fsutils._get_mounts() + mounts = salt.utils.fsutils._get_mounts() for device, data in mounts.items(): if device in self.IGNORE_MOUNTS: for mpt in data: @@ -504,10 +504,10 @@ if __name__ == '__main__': # Double-fork stuff try: if os.fork() > 0: - reinit_crypto() + salt.utils.reinit_crypto() sys.exit(0) else: - reinit_crypto() + salt.utils.reinit_crypto() except OSError as ex: sys.exit(1) @@ -517,12 +517,12 @@ if __name__ == '__main__': try: pid = os.fork() if pid > 0: - reinit_crypto() + salt.utils.reinit_crypto() with salt.utils.files.fopen(os.path.join(pidfile, EnvLoader.PID_FILE), 'w') as fp_: fp_.write('{0}\n'.format(pid)) sys.exit(0) except OSError as ex: sys.exit(1) - reinit_crypto() + salt.utils.reinit_crypto() main(dbfile, pidfile, mode) diff --git a/salt/modules/inspector.py b/salt/modules/inspector.py index df0004ed2e..708ad14450 100644 --- a/salt/modules/inspector.py +++ b/salt/modules/inspector.py @@ -26,8 +26,8 @@ from salt.modules.inspectlib.exceptions import (InspectorQueryException, InspectorKiwiProcessorException) # Import Salt libs -import salt.utils import salt.utils.fsutils +import salt.utils.platform from salt.exceptions import CommandExecutionError from salt.exceptions import get_error_message as _get_error_message @@ -38,7 +38,7 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - return not salt.utils.is_windows() and 'inspector' + return not salt.utils.platform.is_windows() and 'inspector' def _(module): diff --git a/salt/modules/introspect.py b/salt/modules/introspect.py index 0d6a0d9ad9..2e434ad643 100644 --- a/salt/modules/introspect.py +++ b/salt/modules/introspect.py @@ -9,7 +9,7 @@ from __future__ import absolute_import import os # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def running_service_owners( diff --git a/salt/modules/ipset.py b/salt/modules/ipset.py index e202fb3ddc..366d966552 100644 --- a/salt/modules/ipset.py +++ b/salt/modules/ipset.py @@ -8,9 +8,9 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map, range -import salt.utils +import salt.utils.path # Import third-party libs if six.PY3: @@ -108,7 +108,7 @@ def __virtual__(): ''' Only load the module if ipset is installed ''' - if salt.utils.which('ipset'): + if salt.utils.path.which('ipset'): return True return (False, 'The ipset execution modules cannot be loaded: ipset binary not in path.') @@ -117,7 +117,7 @@ def _ipset_cmd(): ''' Return correct command ''' - return salt.utils.which('ipset') + return salt.utils.path.which('ipset') def version(): diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py index b45fbd2e50..36382a694d 100644 --- a/salt/modules/iptables.py +++ b/salt/modules/iptables.py @@ -36,8 +36,9 @@ import uuid import string # Import salt libs -import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.path from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS from salt.exceptions import SaltException from salt.ext import six @@ -50,7 +51,7 @@ def __virtual__(): ''' Only load the module if iptables is installed ''' - if not salt.utils.which('iptables'): + if not salt.utils.path.which('iptables'): return (False, 'The iptables execution module cannot be loaded: iptables not installed.') return True @@ -61,9 +62,9 @@ def _iptables_cmd(family='ipv4'): Return correct command based on the family, e.g. ipv4 or ipv6 ''' if family == 'ipv6': - return salt.utils.which('ip6tables') + return salt.utils.path.which('ip6tables') else: - return salt.utils.which('iptables') + return salt.utils.path.which('iptables') def _has_option(option, family='ipv4'): @@ -998,7 +999,7 @@ def _parse_conf(conf_file=None, in_mem=False, family='ipv4'): ret[table][chain]['rules'] = [] ret[table][chain]['rules_comment'] = {} elif line.startswith('-A'): - args = salt.utils.shlex_split(line) + args = salt.utils.args.shlex_split(line) index = 0 while index + 1 < len(args): swap = args[index] == '!' and args[index + 1].startswith('-') diff --git a/salt/modules/iwtools.py b/salt/modules/iwtools.py index 2b782a4f84..d766fabe4b 100644 --- a/salt/modules/iwtools.py +++ b/salt/modules/iwtools.py @@ -5,7 +5,7 @@ Support for Wireless Tools for Linux from __future__ import absolute_import import logging -import salt.utils +import salt.utils.path from salt.exceptions import SaltInvocationError @@ -16,7 +16,7 @@ def __virtual__(): ''' Only load the module if iwconfig is installed ''' - if salt.utils.which('iwconfig'): + if salt.utils.path.which('iwconfig'): return True return (False, 'The iwtools execution module cannot be loaded: ' 'iwconfig is not installed.') diff --git a/salt/modules/jboss7.py b/salt/modules/jboss7.py index 45991628d8..a771057f70 100644 --- a/salt/modules/jboss7.py +++ b/salt/modules/jboss7.py @@ -28,11 +28,11 @@ import re import logging # Import Salt libs +import salt.utils.dictdiffer as dictdiffer from salt.exceptions import SaltInvocationError -from salt.utils import dictdiffer # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/jboss7_cli.py b/salt/modules/jboss7_cli.py index 89c7d203c4..df5b05d0de 100644 --- a/salt/modules/jboss7_cli.py +++ b/salt/modules/jboss7_cli.py @@ -47,7 +47,7 @@ import time from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/k8s.py b/salt/modules/k8s.py index 973ba1b211..f47b9f1cd1 100644 --- a/salt/modules/k8s.py +++ b/salt/modules/k8s.py @@ -21,7 +21,7 @@ import re import json import logging as logger import base64 -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module # TODO Remove requests dependency diff --git a/salt/modules/kapacitor.py b/salt/modules/kapacitor.py index edfd424834..aa09153608 100644 --- a/salt/modules/kapacitor.py +++ b/salt/modules/kapacitor.py @@ -20,15 +20,15 @@ from __future__ import absolute_import import json import logging -import salt.utils import salt.utils.http +import salt.utils.path from salt.utils.decorators import memoize LOG = logging.getLogger(__name__) def __virtual__(): - return 'kapacitor' if salt.utils.which('kapacitor') else False + return 'kapacitor' if salt.utils.path.which('kapacitor') else False @memoize diff --git a/salt/modules/kerberos.py b/salt/modules/kerberos.py index a5a77a5891..1293e60c69 100644 --- a/salt/modules/kerberos.py +++ b/salt/modules/kerberos.py @@ -25,13 +25,13 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): - if salt.utils.which('kadmin'): + if salt.utils.path.which('kadmin'): return True return (False, 'The kerberos execution module not loaded: kadmin not in path') @@ -262,7 +262,7 @@ def create_keytab(name, keytab, enctypes=None): .. code-block:: bash - salt 'kdc.example.com' host/host1.example.com host1.example.com.keytab + salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab ''' ret = {} diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py index e3a19a8c45..50182232f7 100644 --- a/salt/modules/kernelpkg_linux_apt.py +++ b/salt/modules/kernelpkg_linux_apt.py @@ -8,7 +8,7 @@ import logging import re # Import Salt libs -import salt.ext.six as six +from salt.ext import six try: from salt.utils.versions import LooseVersion as _LooseVersion diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py index 4dc318f4c3..b0fabe516b 100644 --- a/salt/modules/kernelpkg_linux_yum.py +++ b/salt/modules/kernelpkg_linux_yum.py @@ -7,7 +7,7 @@ import functools import logging # Import Salt libs -import salt.ext.six as six +from salt.ext import six try: from salt.utils.versions import LooseVersion as _LooseVersion diff --git a/salt/modules/keyboard.py b/salt/modules/keyboard.py index c7168f56c4..2f5c504be9 100644 --- a/salt/modules/keyboard.py +++ b/salt/modules/keyboard.py @@ -9,7 +9,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -18,7 +18,7 @@ def __virtual__(): ''' Only works with systemd or on supported POSIX-like systems ''' - if salt.utils.which('localectl') \ + if salt.utils.path.which('localectl') \ or __grains__['os_family'] in ('RedHat', 'Debian', 'Gentoo'): return True return (False, 'The keyboard exeuction module cannot be loaded: ' @@ -36,7 +36,7 @@ def get_sys(): salt '*' keyboard.get_sys ''' cmd = '' - if salt.utils.which('localectl'): + if salt.utils.path.which('localectl'): cmd = 'localectl | grep Keymap | sed -e"s/: /=/" -e"s/^[ \t]*//"' elif 'RedHat' in __grains__['os_family']: cmd = 'grep LAYOUT /etc/sysconfig/keyboard | grep -vE "^#"' @@ -59,7 +59,7 @@ def set_sys(layout): salt '*' keyboard.set_sys dvorak ''' - if salt.utils.which('localectl'): + if salt.utils.path.which('localectl'): __salt__['cmd.run']('localectl set-keymap {0}'.format(layout)) elif 'RedHat' in __grains__['os_family']: __salt__['file.sed']('/etc/sysconfig/keyboard', diff --git a/salt/modules/keystone.py b/salt/modules/keystone.py index 02765f0546..ecd061deaf 100644 --- a/salt/modules/keystone.py +++ b/salt/modules/keystone.py @@ -54,11 +54,10 @@ from __future__ import absolute_import import logging # Import Salt Libs -import salt.ext.six as six import salt.utils.http -from salt.ext import six -# Import third party libs +# Import 3rd-party libs +from salt.ext import six HAS_KEYSTONE = False try: # pylint: disable=import-error @@ -343,7 +342,7 @@ def endpoint_list(profile=None, **connection_args): for endpoint in kstone.endpoints.list(): ret[endpoint.id] = dict((value, getattr(endpoint, value)) for value in dir(endpoint) if not value.startswith('_') and - isinstance(getattr(endpoint, value), (six.text_type, dict, bool, str))) + isinstance(getattr(endpoint, value), (six.string_types, dict, bool))) return ret @@ -496,7 +495,7 @@ def role_list(profile=None, **connection_args): for role in kstone.roles.list(): ret[role.name] = dict((value, getattr(role, value)) for value in dir(role) if not value.startswith('_') and - isinstance(getattr(role, value), (six.text_type, dict, bool, str))) + isinstance(getattr(role, value), (six.string_types, dict, bool))) return ret @@ -560,7 +559,7 @@ def service_get(service_id=None, name=None, profile=None, **connection_args): service = kstone.services.get(service_id) ret[service.name] = dict((value, getattr(service, value)) for value in dir(service) if not value.startswith('_') and - isinstance(getattr(service, value), (six.text_type, dict, bool, str))) + isinstance(getattr(service, value), (six.string_types, dict, bool))) return ret @@ -579,7 +578,7 @@ def service_list(profile=None, **connection_args): for service in kstone.services.list(): ret[service.name] = dict((value, getattr(service, value)) for value in dir(service) if not value.startswith('_') and - isinstance(getattr(service, value), (six.text_type, dict, bool, str))) + isinstance(getattr(service, value), (six.string_types, dict, bool))) return ret @@ -722,7 +721,7 @@ def tenant_get(tenant_id=None, name=None, profile=None, tenant = getattr(kstone, _TENANTS, None).get(tenant_id) ret[tenant.name] = dict((value, getattr(tenant, value)) for value in dir(tenant) if not value.startswith('_') and - isinstance(getattr(tenant, value), (six.text_type, dict, bool, str))) + isinstance(getattr(tenant, value), (six.string_types, dict, bool))) return ret @@ -775,7 +774,7 @@ def tenant_list(profile=None, **connection_args): for tenant in getattr(kstone, _TENANTS, None).list(): ret[tenant.name] = dict((value, getattr(tenant, value)) for value in dir(tenant) if not value.startswith('_') and - isinstance(getattr(tenant, value), (six.text_type, dict, bool, str))) + isinstance(getattr(tenant, value), (six.string_types, dict, bool))) return ret @@ -839,7 +838,7 @@ def tenant_update(tenant_id=None, name=None, description=None, return dict((value, getattr(updated, value)) for value in dir(updated) if not value.startswith('_') and - isinstance(getattr(updated, value), (six.text_type, dict, bool, str))) + isinstance(getattr(updated, value), (six.string_types, dict, bool))) def project_update(project_id=None, name=None, description=None, @@ -918,7 +917,7 @@ def user_list(profile=None, **connection_args): for user in kstone.users.list(): ret[user.name] = dict((value, getattr(user, value, None)) for value in dir(user) if not value.startswith('_') and - isinstance(getattr(user, value, None), (six.text_type, dict, bool, str))) + isinstance(getattr(user, value, None), (six.string_types, dict, bool))) tenant_id = getattr(user, 'tenantId', None) if tenant_id: ret[user.name]['tenant_id'] = tenant_id @@ -955,7 +954,7 @@ def user_get(user_id=None, name=None, profile=None, **connection_args): ret[user.name] = dict((value, getattr(user, value, None)) for value in dir(user) if not value.startswith('_') and - isinstance(getattr(user, value, None), (six.text_type, dict, bool, str))) + isinstance(getattr(user, value, None), (six.string_types, dict, bool))) tenant_id = getattr(user, 'tenantId', None) if tenant_id: @@ -1317,7 +1316,7 @@ tenant_id=7167a092ece84bae8cead4bf9d15bb3b for role in kstone.roles.list(user=user_id, project=tenant_id): ret[role.name] = dict((value, getattr(role, value)) for value in dir(role) if not value.startswith('_') and - isinstance(getattr(role, value), (six.text_type, dict, bool, str))) + isinstance(getattr(role, value), (six.string_types, dict, bool))) else: for role in kstone.roles.roles_for_user(user=user_id, tenant=tenant_id): ret[role.name] = {'id': role.id, diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py index 15531c36db..fb6c5020a4 100644 --- a/salt/modules/kubernetes.py +++ b/salt/modules/kubernetes.py @@ -9,20 +9,37 @@ Module for handling kubernetes calls. kubernetes.user: admin kubernetes.password: verybadpass kubernetes.api_url: 'http://127.0.0.1:8080' + kubernetes.certificate-authority-data: '...' + kubernetes.client-certificate-data: '....n + kubernetes.client-key-data: '...' + kubernetes.certificate-authority-file: '/path/to/ca.crt' + kubernetes.client-certificate-file: '/path/to/client.crt' + kubernetes.client-key-file: '/path/to/client.key' + These settings can be also overrided by adding `api_url`, `api_user`, -or `api_password` parameters when calling a function: +`api_password`, `api_certificate_authority_file`, `api_client_certificate_file` +or `api_client_key_file` parameters when calling a function: + +The data format for `kubernetes.*-data` values is the same as provided in `kubeconfig`. +It's base64 encoded certificates/keys in one line. + +For an item only one field should be provided. Either a `data` or a `file` entry. +In case both are provided the `file` entry is prefered. .. code-block:: bash salt '*' kubernetes.nodes api_url=http://k8s-api-server:port api_user=myuser api_password=pass +.. versionadded: 2017.7.0 ''' # Import Python Futures from __future__ import absolute_import +import os.path import base64 import logging import yaml +import tempfile from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems @@ -39,6 +56,14 @@ try: except ImportError: HAS_LIBS = False +try: + # There is an API change in Kubernetes >= 2.0.0. + from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment + from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec +except ImportError: + from kubernetes.client import AppsV1beta1Deployment + from kubernetes.client import AppsV1beta1DeploymentSpec + log = logging.getLogger(__name__) @@ -64,16 +89,31 @@ def _setup_conn(**kwargs): 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') + ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') + client_cert = __salt__['config.option']('kubernetes.client-certificate-data') + client_key = __salt__['config.option']('kubernetes.client-key-data') + ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') + client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') + client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided - if kwargs.get('api_url'): - host = kwargs['api_url'] + if 'api_url' in kwargs: + host = kwargs.get('api_url') - if kwargs.get('api_user'): - username = kwargs['api_user'] + if 'api_user' in kwargs: + username = kwargs.get('api_user') - if kwargs.get('api_password'): - password = kwargs['api_password'] + if 'api_password' in kwargs: + password = kwargs.get('api_password') + + if 'api_certificate_authority_file' in kwargs: + ca_cert_file = kwargs.get('api_certificate_authority_file') + + if 'api_client_certificate_file' in kwargs: + client_cert_file = kwargs.get('api_client_certificate_file') + + if 'api_client_key_file' in kwargs: + client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or @@ -86,6 +126,45 @@ def _setup_conn(**kwargs): kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password + if ca_cert_file: + kubernetes.client.configuration.ssl_ca_cert = ca_cert_file + elif ca_cert: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: + ca.write(base64.b64decode(ca_cert)) + kubernetes.client.configuration.ssl_ca_cert = ca.name + else: + kubernetes.client.configuration.ssl_ca_cert = None + + if client_cert_file: + kubernetes.client.configuration.cert_file = client_cert_file + elif client_cert: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: + c.write(base64.b64decode(client_cert)) + kubernetes.client.configuration.cert_file = c.name + else: + kubernetes.client.configuration.cert_file = None + + if client_key_file: + kubernetes.client.configuration.key_file = client_key_file + if client_key: + with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: + k.write(base64.b64decode(client_key)) + kubernetes.client.configuration.key_file = k.name + else: + kubernetes.client.configuration.key_file = None + + +def _cleanup(**kwargs): + ca = kubernetes.client.configuration.ssl_ca_cert + cert = kubernetes.client.configuration.cert_file + key = kubernetes.client.configuration.key_file + if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): + salt.utils.safe_rm(cert) + if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): + salt.utils.safe_rm(key) + if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): + salt.utils.safe_rm(ca) + def ping(**kwargs): ''' @@ -127,6 +206,8 @@ def nodes(**kwargs): 'Exception when calling CoreV1Api->list_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def node(name, **kwargs): @@ -149,6 +230,8 @@ def node(name, **kwargs): 'Exception when calling CoreV1Api->list_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() for k8s_node in api_response.items: if k8s_node.metadata.name == name: @@ -203,6 +286,8 @@ def node_add_label(node_name, label_name, label_value, **kwargs): 'Exception when calling CoreV1Api->patch_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() return None @@ -236,6 +321,8 @@ def node_remove_label(node_name, label_name, **kwargs): 'Exception when calling CoreV1Api->patch_node: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() return None @@ -264,6 +351,8 @@ def namespaces(**kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def deployments(namespace='default', **kwargs): @@ -291,6 +380,8 @@ def deployments(namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def services(namespace='default', **kwargs): @@ -317,6 +408,8 @@ def services(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def pods(namespace='default', **kwargs): @@ -343,6 +436,8 @@ def pods(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def secrets(namespace='default', **kwargs): @@ -369,6 +464,8 @@ def secrets(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def configmaps(namespace='default', **kwargs): @@ -395,6 +492,8 @@ def configmaps(namespace='default', **kwargs): 'CoreV1Api->list_namespaced_config_map: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_deployment(name, namespace='default', **kwargs): @@ -422,6 +521,8 @@ def show_deployment(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_service(name, namespace='default', **kwargs): @@ -448,6 +549,8 @@ def show_service(name, namespace='default', **kwargs): 'CoreV1Api->read_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_pod(name, namespace='default', **kwargs): @@ -474,6 +577,8 @@ def show_pod(name, namespace='default', **kwargs): 'CoreV1Api->read_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_namespace(name, **kwargs): @@ -499,6 +604,8 @@ def show_namespace(name, **kwargs): 'CoreV1Api->read_namespace: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_secret(name, namespace='default', decode=False, **kwargs): @@ -534,6 +641,8 @@ def show_secret(name, namespace='default', decode=False, **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def show_configmap(name, namespace='default', **kwargs): @@ -563,6 +672,8 @@ def show_configmap(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_deployment(name, namespace='default', **kwargs): @@ -594,6 +705,8 @@ def delete_deployment(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_service(name, namespace='default', **kwargs): @@ -623,6 +736,8 @@ def delete_service(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_pod(name, namespace='default', **kwargs): @@ -654,6 +769,8 @@ def delete_pod(name, namespace='default', **kwargs): 'CoreV1Api->delete_namespaced_pod: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_namespace(name, **kwargs): @@ -682,6 +799,8 @@ def delete_namespace(name, **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_secret(name, namespace='default', **kwargs): @@ -713,6 +832,8 @@ def delete_secret(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def delete_configmap(name, namespace='default', **kwargs): @@ -745,6 +866,8 @@ def delete_configmap(name, namespace='default', **kwargs): '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_deployment( @@ -761,7 +884,7 @@ def create_deployment( ''' body = __create_object_body( kind='Deployment', - obj_class=kubernetes.client.V1beta1Deployment, + obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, @@ -789,6 +912,8 @@ def create_deployment( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_pod( @@ -833,6 +958,8 @@ def create_pod( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_service( @@ -876,6 +1003,8 @@ def create_service( 'CoreV1Api->create_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_secret( @@ -921,6 +1050,8 @@ def create_secret( 'CoreV1Api->create_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_configmap( @@ -962,6 +1093,8 @@ def create_configmap( 'CoreV1Api->create_namespaced_config_map: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def create_namespace( @@ -996,6 +1129,8 @@ def create_namespace( '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_deployment(name, @@ -1012,7 +1147,7 @@ def replace_deployment(name, ''' body = __create_object_body( kind='Deployment', - obj_class=kubernetes.client.V1beta1Deployment, + obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, @@ -1040,6 +1175,8 @@ def replace_deployment(name, '{0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_service(name, @@ -1089,6 +1226,8 @@ def replace_service(name, 'CoreV1Api->replace_namespaced_service: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_secret(name, @@ -1134,6 +1273,8 @@ def replace_secret(name, 'CoreV1Api->replace_namespaced_secret: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def replace_configmap(name, @@ -1173,6 +1314,8 @@ def replace_configmap(name, 'CoreV1Api->replace_namespaced_configmap: {0}'.format(exc) ) raise CommandExecutionError(exc) + finally: + _cleanup() def __create_object_body(kind, @@ -1275,9 +1418,9 @@ def __dict_to_object_meta(name, namespace, metadata): def __dict_to_deployment_spec(spec): ''' - Converts a dictionary into kubernetes V1beta1DeploymentSpec instance. + Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' - spec_obj = kubernetes.client.V1beta1DeploymentSpec() + spec_obj = AppsV1beta1DeploymentSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) diff --git a/salt/modules/launchctl.py b/salt/modules/launchctl.py index 8991dc3463..fa951b2bee 100644 --- a/salt/modules/launchctl.py +++ b/salt/modules/launchctl.py @@ -21,10 +21,12 @@ import re # Import salt libs import salt.utils +import salt.utils.platform +import salt.utils.stringutils import salt.utils.decorators as decorators import salt.utils.files from salt.utils.versions import LooseVersion as _LooseVersion -import salt.ext.six as six +from salt.ext import six # Set up logging log = logging.getLogger(__name__) @@ -39,7 +41,7 @@ def __virtual__(): ''' Only work on MacOS ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') @@ -103,7 +105,7 @@ def _available_services(): plist = plistlib.readPlistFromString(plist_xml) else: plist = plistlib.readPlistFromBytes( - salt.utils.to_bytes(plist_xml)) + salt.utils.stringutils.to_bytes(plist_xml)) try: available_services[plist.Label.lower()] = { diff --git a/salt/modules/layman.py b/salt/modules/layman.py index 3e201ba4ce..0cb322557f 100644 --- a/salt/modules/layman.py +++ b/salt/modules/layman.py @@ -4,7 +4,7 @@ Support for Layman ''' from __future__ import absolute_import -import salt.utils +import salt.utils.path import salt.exceptions @@ -12,7 +12,7 @@ def __virtual__(): ''' Only work on Gentoo systems with layman installed ''' - if __grains__['os'] == 'Gentoo' and salt.utils.which('layman'): + if __grains__['os'] == 'Gentoo' and salt.utils.path.which('layman'): return 'layman' return (False, 'layman execution module cannot be loaded: only available on Gentoo with layman installed.') diff --git a/salt/modules/ldap3.py b/salt/modules/ldap3.py index 8b29a77af8..d4c9488ed3 100644 --- a/salt/modules/ldap3.py +++ b/salt/modules/ldap3.py @@ -23,7 +23,7 @@ try: except ImportError: pass import logging -import salt.ext.six as six +from salt.ext import six import sys log = logging.getLogger(__name__) diff --git a/salt/modules/libcloud_compute.py b/salt/modules/libcloud_compute.py index 3e226072d7..8bab210a30 100644 --- a/salt/modules/libcloud_compute.py +++ b/salt/modules/libcloud_compute.py @@ -37,8 +37,8 @@ import logging import os.path # Import salt libs +import salt.utils.args import salt.utils.compat -from salt.utils import clean_kwargs from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) @@ -106,7 +106,7 @@ def list_nodes(profile, **libcloud_kwargs): salt myminion libcloud_compute.list_nodes profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) nodes = conn.list_nodes(**libcloud_kwargs) ret = [] for node in nodes: @@ -135,7 +135,7 @@ def list_sizes(profile, location_id=None, **libcloud_kwargs): salt myminion libcloud_compute.list_sizes profile1 us-east1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if location_id is not None: locations = [loc for loc in conn.list_locations() if loc.id == location_id] if len(locations) == 0: @@ -168,7 +168,7 @@ def list_locations(profile, **libcloud_kwargs): salt myminion libcloud_compute.list_locations profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) locations = conn.list_locations(**libcloud_kwargs) ret = [] @@ -242,7 +242,7 @@ def list_volumes(profile, **libcloud_kwargs): salt myminion libcloud_compute.list_volumes profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volumes = conn.list_volumes(**libcloud_kwargs) ret = [] @@ -271,7 +271,7 @@ def list_volume_snapshots(volume_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.list_volume_snapshots vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) snapshots = conn.list_volume_snapshots(volume, **libcloud_kwargs) @@ -309,7 +309,7 @@ def create_volume(size, name, profile, location_id=None, **libcloud_kwargs): salt myminion libcloud_compute.create_volume 1000 vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if location_id is not None: location = _get_by_id(conn.list_locations(), location_id) else: @@ -344,7 +344,7 @@ def create_volume_snapshot(volume_id, profile, name=None, **libcloud_kwargs): salt myminion libcloud_compute.create_volume_snapshot vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) snapshot = conn.create_volume_snapshot(volume, name=name, **libcloud_kwargs) @@ -377,7 +377,7 @@ def attach_volume(node_id, volume_id, profile, device=None, **libcloud_kwargs): salt myminion libcloud_compute.detach_volume vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) node = _get_by_id(conn.list_nodes(), node_id) return conn.attach_volume(node, volume, device=device, **libcloud_kwargs) @@ -403,7 +403,7 @@ def detach_volume(volume_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.detach_volume vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) return conn.detach_volume(volume, **libcloud_kwargs) @@ -428,7 +428,7 @@ def destroy_volume(volume_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.destroy_volume vol1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) return conn.destroy_volume(volume, **libcloud_kwargs) @@ -456,7 +456,7 @@ def destroy_volume_snapshot(volume_id, snapshot_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.destroy_volume_snapshot snap1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) volume = _get_by_id(conn.list_volumes(), volume_id) snapshot = _get_by_id(conn.list_volume_snapshots(volume), snapshot_id) return conn.destroy_volume_snapshot(snapshot, **libcloud_kwargs) @@ -482,7 +482,7 @@ def list_images(profile, location_id=None, **libcloud_kwargs): salt myminion libcloud_compute.list_images profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if location_id is not None: location = _get_by_id(conn.list_locations(), location_id) else: @@ -522,7 +522,7 @@ def create_image(node_id, name, profile, description=None, **libcloud_kwargs): salt myminion libcloud_compute.create_image server1 my_image profile1 description='test image' ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) node = _get_by_id(conn.list_nodes(), node_id) return _simple_image(conn.create_image(node, name, description=description, **libcloud_kwargs)) @@ -547,7 +547,7 @@ def delete_image(image_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.delete_image image1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) image = _get_by_id(conn.list_images(), image_id) return conn.delete_image(image, **libcloud_kwargs) @@ -572,7 +572,7 @@ def get_image(image_id, profile, **libcloud_kwargs): salt myminion libcloud_compute.get_image image1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) image = conn.get_image(image_id, **libcloud_kwargs) return _simple_image(image) @@ -606,7 +606,7 @@ def copy_image(source_region, image_id, name, profile, description=None, **libcl salt myminion libcloud_compute.copy_image us-east1 image1 'new image' profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) image = conn.get_image(image_id, **libcloud_kwargs) new_image = conn.copy_image(source_region, image, name, description=description, **libcloud_kwargs) @@ -630,7 +630,7 @@ def list_key_pairs(profile, **libcloud_kwargs): salt myminion libcloud_compute.list_key_pairs profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) keys = conn.list_key_pairs(**libcloud_kwargs) ret = [] @@ -659,7 +659,7 @@ def get_key_pair(name, profile, **libcloud_kwargs): salt myminion libcloud_compute.get_key_pair pair1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return _simple_key_pair(conn.get_key_pair(name, **libcloud_kwargs)) @@ -683,7 +683,7 @@ def create_key_pair(name, profile, **libcloud_kwargs): salt myminion libcloud_compute.create_key_pair pair1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return _simple_key_pair(conn.create_key_pair(name, **libcloud_kwargs)) @@ -715,7 +715,7 @@ def import_key_pair(name, key, profile, key_type=None, **libcloud_kwargs): salt myminion libcloud_compute.import_key_pair pair1 /path/to/key profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) if os.path.exists(key) or key_type == 'FILE': return _simple_key_pair(conn.import_key_pair_from_file(name, key, @@ -746,7 +746,7 @@ def delete_key_pair(name, profile, **libcloud_kwargs): salt myminion libcloud_compute.delete_key_pair pair1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) key = conn.get_key_pair(name) return conn.delete_key_pair(key, **libcloud_kwargs) @@ -770,7 +770,7 @@ def extra(method, profile, **libcloud_kwargs): salt myminion libcloud_compute.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) diff --git a/salt/modules/libcloud_loadbalancer.py b/salt/modules/libcloud_loadbalancer.py index 239ec05602..a181d13738 100644 --- a/salt/modules/libcloud_loadbalancer.py +++ b/salt/modules/libcloud_loadbalancer.py @@ -36,9 +36,9 @@ from __future__ import absolute_import import logging # Import salt libs +import salt.utils.args import salt.utils.compat -import salt.ext.six as six -from salt.utils import clean_kwargs +from salt.ext import six from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) @@ -118,7 +118,7 @@ def list_balancers(profile, **libcloud_kwargs): salt myminion libcloud_storage.list_balancers profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) balancers = conn.list_balancers(**libcloud_kwargs) ret = [] for balancer in balancers: @@ -146,7 +146,7 @@ def list_protocols(profile, **libcloud_kwargs): salt myminion libcloud_storage.list_protocols profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.list_protocols(**libcloud_kwargs) @@ -194,7 +194,7 @@ def create_balancer(name, port, protocol, profile, algorithm=None, members=None, else: raise ValueError("members must be of type list") - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) conn = _get_driver(profile=profile) balancer = conn.create_balancer(name, port, protocol, algorithm, starting_members, **libcloud_kwargs) return _simple_balancer(balancer) @@ -223,7 +223,7 @@ def destroy_balancer(balancer_id, profile, **libcloud_kwargs): salt myminion libcloud_storage.destroy_balancer balancer_1 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) balancer = conn.get_balancer(balancer_id) return conn.destroy_balancer(balancer, **libcloud_kwargs) @@ -250,7 +250,7 @@ def get_balancer_by_name(name, profile, **libcloud_kwargs): salt myminion libcloud_storage.get_balancer_by_name my_balancer profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) balancers = conn.list_balancers(**libcloud_kwargs) match = [b for b in balancers if b.name == name] if len(match) == 1: @@ -283,7 +283,7 @@ def get_balancer(balancer_id, profile, **libcloud_kwargs): salt myminion libcloud_storage.get_balancer balancer123 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) balancer = conn.get_balancer(balancer_id, **libcloud_kwargs) return _simple_balancer(balancer) @@ -307,7 +307,7 @@ def list_supported_algorithms(profile, **libcloud_kwargs): salt myminion libcloud_storage.list_supported_algorithms profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.list_supported_algorithms(**libcloud_kwargs) @@ -337,7 +337,7 @@ def balancer_attach_member(balancer_id, ip, port, profile, extra=None, **libclou salt myminion libcloud_storage.balancer_attach_member balancer123 1.2.3.4 80 profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) member = Member(id=None, ip=ip, port=port, balancer=None, extra=extra) balancer = conn.get_balancer(balancer_id) member_saved = conn.balancer_attach_member(balancer, member, **libcloud_kwargs) @@ -379,7 +379,7 @@ def balancer_detach_member(balancer_id, member_id, profile, **libcloud_kwargs): raise ValueError("Bad argument, found no records") else: member = match[0] - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.balancer_detach_member(balancer=balancer, member=member, **libcloud_kwargs) @@ -404,7 +404,7 @@ def list_balancer_members(balancer_id, profile, **libcloud_kwargs): ''' conn = _get_driver(profile=profile) balancer = conn.get_balancer(balancer_id) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) members = conn.balancer_list_members(balancer=balancer, **libcloud_kwargs) return [_simple_member(member) for member in members] @@ -428,7 +428,7 @@ def extra(method, profile, **libcloud_kwargs): salt myminion libcloud_loadbalancer.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) diff --git a/salt/modules/libcloud_storage.py b/salt/modules/libcloud_storage.py index b769621642..9099c26991 100644 --- a/salt/modules/libcloud_storage.py +++ b/salt/modules/libcloud_storage.py @@ -36,8 +36,8 @@ from __future__ import absolute_import import logging # Import salt libs +import salt.utils.args import salt.utils.compat -from salt.utils import clean_kwargs from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) @@ -102,7 +102,7 @@ def list_containers(profile, **libcloud_kwargs): salt myminion libcloud_storage.list_containers profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) containers = conn.list_containers(**libcloud_kwargs) ret = [] for container in containers: @@ -134,7 +134,7 @@ def list_container_objects(container_name, profile, **libcloud_kwargs): ''' conn = _get_driver(profile=profile) container = conn.get_container(container_name) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) objects = conn.list_container_objects(container, **libcloud_kwargs) ret = [] for obj in objects: @@ -169,7 +169,7 @@ def create_container(container_name, profile, **libcloud_kwargs): salt myminion libcloud_storage.create_container MyFolder profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.create_container(container_name, **libcloud_kwargs) return { 'name': container.name, @@ -197,7 +197,7 @@ def get_container(container_name, profile, **libcloud_kwargs): salt myminion libcloud_storage.get_container MyFolder profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.get_container(container_name, **libcloud_kwargs) return { 'name': container.name, @@ -228,7 +228,7 @@ def get_container_object(container_name, object_name, profile, **libcloud_kwargs salt myminion libcloud_storage.get_container_object MyFolder MyFile.xyz profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) obj = conn.get_container_object(container_name, object_name, **libcloud_kwargs) return { 'name': obj.name, @@ -282,7 +282,7 @@ def download_object(container_name, object_name, destination_path, profile, ''' conn = _get_driver(profile=profile) obj = conn.get_object(container_name, object_name) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs) @@ -328,7 +328,7 @@ def upload_object(file_path, container_name, object_name, profile, extra=None, ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.get_container(container_name) obj = conn.upload_object(file_path, container, object_name, extra, verify_hash, headers, **libcloud_kwargs) return obj.name @@ -361,7 +361,7 @@ def delete_object(container_name, object_name, profile, **libcloud_kwargs): salt myminion libcloud_storage.delete_object MyFolder me.jpg profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) obj = conn.get_object(container_name, object_name, **libcloud_kwargs) return conn.delete_object(obj) @@ -390,7 +390,7 @@ def delete_container(container_name, profile, **libcloud_kwargs): salt myminion libcloud_storage.delete_container MyFolder profile1 ''' conn = _get_driver(profile=profile) - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.get_container(container_name) return conn.delete_container(container, **libcloud_kwargs) @@ -414,7 +414,7 @@ def extra(method, profile, **libcloud_kwargs): salt myminion libcloud_storage.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' - libcloud_kwargs = clean_kwargs(**libcloud_kwargs) + libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) diff --git a/salt/modules/linux_acl.py b/salt/modules/linux_acl.py index a7fa3cbd1c..d9959b7cc1 100644 --- a/salt/modules/linux_acl.py +++ b/salt/modules/linux_acl.py @@ -5,7 +5,7 @@ Support for Linux File Access Control Lists from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError # Define the module's virtual name @@ -16,7 +16,7 @@ def __virtual__(): ''' Only load the module if getfacl is installed ''' - if salt.utils.which('getfacl'): + if salt.utils.path.which('getfacl'): return __virtualname__ return (False, 'The linux_acl execution module cannot be loaded: the getfacl binary is not in the path.') @@ -213,8 +213,10 @@ def modfacl(acl_type, acl_name='', perms='', *args, **kwargs): salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True + salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen raise_err=True ''' recursive = kwargs.pop('recursive', False) + raise_err = kwargs.pop('raise_err', False) _raise_on_no_files(*args) @@ -228,7 +230,7 @@ def modfacl(acl_type, acl_name='', perms='', *args, **kwargs): for dentry in args: cmd += ' "{0}"'.format(dentry) - __salt__['cmd.run'](cmd, python_shell=False) + __salt__['cmd.run'](cmd, python_shell=False, raise_err=raise_err) return True diff --git a/salt/modules/linux_ip.py b/salt/modules/linux_ip.py index 3ae4be13f9..d848898f25 100644 --- a/salt/modules/linux_ip.py +++ b/salt/modules/linux_ip.py @@ -2,9 +2,14 @@ ''' The networking module for Non-RH/Deb Linux distros ''' +# Import Python libs from __future__ import absolute_import -import salt.utils + +# Import Salt libs import salt.utils.files +import salt.utils.path +import salt.utils.platform + from salt.ext.six.moves import zip __virtualname__ = 'ip' @@ -14,7 +19,7 @@ def __virtual__(): ''' Confine this module to Non-RH/Deb Linux distros ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'Module linux_ip: Windows systems are not supported.') if __grains__['os_family'] == 'RedHat': return (False, 'Module linux_ip: RedHat systems are not supported.') @@ -22,7 +27,7 @@ def __virtual__(): return (False, 'Module linux_ip: Debian systems are not supported.') if __grains__['os_family'] == 'NILinuxRT': return (False, 'Module linux_ip: NILinuxRT systems are not supported.') - if not salt.utils.which('ip'): + if not salt.utils.path.which('ip'): return (False, 'The linux_ip execution module cannot be loaded: ' 'the ip binary is not in the path.') return __virtualname__ diff --git a/salt/modules/linux_lvm.py b/salt/modules/linux_lvm.py index be77285e25..ed6411d899 100644 --- a/salt/modules/linux_lvm.py +++ b/salt/modules/linux_lvm.py @@ -8,8 +8,8 @@ from __future__ import absolute_import import os.path # Import salt libs -import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six from salt.exceptions import CommandExecutionError # Define the module's virtual name @@ -20,7 +20,7 @@ def __virtual__(): ''' Only load the module if lvm is installed ''' - if salt.utils.which('lvm'): + if salt.utils.path.which('lvm'): return __virtualname__ return (False, 'The linux_lvm execution module cannot be loaded: the lvm binary is not in the path.') @@ -399,7 +399,7 @@ def lvcreate(lvname, elif k in valid: extra_arguments.extend(['--{0}'.format(k), '{0}'.format(v)]) - cmd = [salt.utils.which('lvcreate')] + cmd = [salt.utils.path.which('lvcreate')] if thinvolume: cmd.extend(['--thin', '-n', lvname]) @@ -430,7 +430,7 @@ def lvcreate(lvname, cmd.extend(extra_arguments) if force: - cmd.append('-yes') + cmd.append('--yes') out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() lvdev = '/dev/{0}/{1}'.format(vgname, lvname) diff --git a/salt/modules/linux_sysctl.py b/salt/modules/linux_sysctl.py index 7fcaf3f354..11ef359125 100644 --- a/salt/modules/linux_sysctl.py +++ b/salt/modules/linux_sysctl.py @@ -10,7 +10,7 @@ import os import re # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.ext.six import string_types from salt.exceptions import CommandExecutionError diff --git a/salt/modules/localemod.py b/salt/modules/localemod.py index 3a9d0ec36a..e998df1b83 100644 --- a/salt/modules/localemod.py +++ b/salt/modules/localemod.py @@ -15,11 +15,12 @@ try: except ImportError: pass -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.locales +import salt.utils.path +import salt.utils.platform import salt.utils.systemd -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) @@ -32,7 +33,7 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'Cannot load locale module: windows platforms are unsupported') return __virtualname__ @@ -185,7 +186,7 @@ def set_locale(locale): ) elif 'Debian' in __grains__['os_family']: # this block only applies to Debian without systemd - update_locale = salt.utils.which('update-locale') + update_locale = salt.utils.path.which('update-locale') if update_locale is None: raise CommandExecutionError( 'Cannot set locale: "update-locale" was not found.') @@ -318,7 +319,7 @@ def gen_locale(locale, **kwargs): append_if_not_found=True ) - if salt.utils.which('locale-gen'): + if salt.utils.path.which('locale-gen'): cmd = ['locale-gen'] if on_gentoo: cmd.append('--generate') @@ -326,7 +327,7 @@ def gen_locale(locale, **kwargs): cmd.append(salt.utils.locales.normalize_locale(locale)) else: cmd.append(locale) - elif salt.utils.which('localedef'): + elif salt.utils.path.which('localedef'): cmd = ['localedef', '--force', '-i', locale_search_str, '-f', locale_info['codeset'], '{0}.{1}'.format(locale_search_str, locale_info['codeset']), diff --git a/salt/modules/locate.py b/salt/modules/locate.py index 7dcfc08ee2..28474eb344 100644 --- a/salt/modules/locate.py +++ b/salt/modules/locate.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -17,8 +17,12 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): - return (False, 'The locate execution module cannot be loaded: only available on non-Windows systems.') + if salt.utils.platform.is_windows(): + return ( + False, + 'The locate execution module cannot be loaded: only available on ' + 'non-Windows systems.' + ) return True diff --git a/salt/modules/logadm.py b/salt/modules/logadm.py index ccde169100..9d1d498818 100644 --- a/salt/modules/logadm.py +++ b/salt/modules/logadm.py @@ -14,6 +14,7 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.args import salt.utils.decorators as decorators import salt.utils.files @@ -268,7 +269,7 @@ def rotate(name, pattern=None, conf_file=default_conf, **kwargs): ''' ## cleanup kwargs - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name into kwargs if 'entryname' not in kwargs and name and not name.startswith('/'): diff --git a/salt/modules/logrotate.py b/salt/modules/logrotate.py index 1863e2b48f..f0e71dc25b 100644 --- a/salt/modules/logrotate.py +++ b/salt/modules/logrotate.py @@ -9,9 +9,9 @@ import os import logging # Import salt libs -from salt.exceptions import SaltInvocationError -import salt.utils import salt.utils.files +import salt.utils.platform +from salt.exceptions import SaltInvocationError _LOG = logging.getLogger(__name__) _DEFAULT_CONF = '/etc/logrotate.conf' @@ -27,8 +27,12 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): - return (False, 'The logrotate execution module cannot be loaded: only available on non-Windows systems.') + if salt.utils.platform.is_windows(): + return ( + False, + 'The logrotate execution module cannot be loaded: only available ' + 'on non-Windows systems.' + ) return True diff --git a/salt/modules/lvs.py b/salt/modules/lvs.py index 11ad739627..001280236c 100644 --- a/salt/modules/lvs.py +++ b/salt/modules/lvs.py @@ -7,7 +7,7 @@ from __future__ import absolute_import # Import python libs # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.decorators as decorators from salt.exceptions import SaltException @@ -20,7 +20,7 @@ __func_alias__ = { # Cache the output of running which('ipvsadm') @decorators.memoize def __detect_os(): - return salt.utils.which('ipvsadm') + return salt.utils.path.which('ipvsadm') def __virtual__(): diff --git a/salt/modules/lxc.py b/salt/modules/lxc.py index 568a6e0311..3a9b4bc9a2 100644 --- a/salt/modules/lxc.py +++ b/salt/modules/lxc.py @@ -25,19 +25,20 @@ import re import random # Import salt libs -import salt -import salt.utils.odict import salt.utils +import salt.utils.args +import salt.utils.cloud import salt.utils.dictupdate import salt.utils.files import salt.utils.network +import salt.utils.odict +import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError -import salt.utils.cloud import salt.config from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error,no-name-in-module from salt.ext.six.moves import range # pylint: disable=redefined-builtin from salt.ext.six.moves.urllib.parse import urlparse as _urlparse @@ -62,7 +63,7 @@ _marker = object() def __virtual__(): - if salt.utils.which('lxc-start'): + if salt.utils.path.which('lxc-start'): return __virtualname__ # To speed up the whole thing, we decided to not use the # subshell way and assume things are in place for lxc @@ -71,7 +72,7 @@ def __virtual__(): # lxc-version presence is not sufficient, in lxc1.0 alpha # (precise backports), we have it and it is sufficient # for the module to execute. - # elif salt.utils.which('lxc-version'): + # elif salt.utils.path.which('lxc-version'): # passed = False # try: # passed = subprocess.check_output( @@ -570,7 +571,7 @@ def _get_profile(key, name, **kwargs): raise CommandExecutionError('lxc.{0} must be a dictionary'.format(key)) # Overlay the kwargs to override matched profile data - overrides = salt.utils.clean_kwargs(**copy.deepcopy(kwargs)) + overrides = salt.utils.args.clean_kwargs(**copy.deepcopy(kwargs)) profile_match = salt.utils.dictupdate.update( copy.deepcopy(profile_match), overrides @@ -894,7 +895,8 @@ def _get_lxc_default_data(**kwargs): ret['lxc.start.auto'] = '0' memory = kwargs.get('memory') if memory is not None: - ret['lxc.cgroup.memory.limit_in_bytes'] = memory * 1024 + # converting the config value from MB to bytes + ret['lxc.cgroup.memory.limit_in_bytes'] = memory * 1024 * 1024 cpuset = kwargs.get('cpuset') if cpuset: ret['lxc.cgroup.cpuset.cpus'] = cpuset @@ -3098,7 +3100,7 @@ def set_dns(name, dnsservers=None, searchdomains=None, path=None): # operation. # - We also teach resolvconf to use the aforementioned dns. # - We finally also set /etc/resolv.conf in all cases - rstr = __salt__['test.rand_str']() + rstr = __salt__['test.random_hash']() # no tmp here, apparmor won't let us execute ! script = '/sbin/{0}_dns.sh'.format(rstr) DNS_SCRIPT = "\n".join([ @@ -3159,7 +3161,7 @@ def running_systemd(name, cache=True, path=None): k = 'lxc.systemd.test.{0}{1}'.format(name, path) ret = __context__.get(k, None) if ret is None or not cache: - rstr = __salt__['test.rand_str']() + rstr = __salt__['test.random_hash']() # no tmp here, apparmor won't let us execute ! script = '/sbin/{0}_testsystemd.sh'.format(rstr) # ubuntu already had since trusty some bits of systemd but was @@ -3496,7 +3498,7 @@ def bootstrap(name, pub_key=pub_key, priv_key=priv_key) if needs_install or force_install or unconditional_install: if install: - rstr = __salt__['test.rand_str']() + rstr = __salt__['test.random_hash']() configdir = '/var/tmp/.c_{0}'.format(rstr) cmd = 'install -m 0700 -d {0}'.format(configdir) diff --git a/salt/modules/mac_assistive.py b/salt/modules/mac_assistive.py index 2ca69fd113..f894587e51 100644 --- a/salt/modules/mac_assistive.py +++ b/salt/modules/mac_assistive.py @@ -15,7 +15,7 @@ import re import logging # Import salt libs -import salt.utils +import salt.utils.platform from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion @@ -28,10 +28,14 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin() and _LooseVersion(__grains__['osrelease']) >= '10.9': + if salt.utils.platform.is_darwin() \ + and _LooseVersion(__grains__['osrelease']) >= '10.9': return True - return False, 'The assistive module cannot be loaded: must be run on ' \ - 'macOS 10.9 or newer.' + return ( + False, + 'The assistive module cannot be loaded: must be run on ' + 'macOS 10.9 or newer.' + ) def install(app_id, enable=True): diff --git a/salt/modules/mac_brew.py b/salt/modules/mac_brew.py index 541d538b92..27252ebd89 100644 --- a/salt/modules/mac_brew.py +++ b/salt/modules/mac_brew.py @@ -18,9 +18,11 @@ import logging # Import salt libs import salt.utils +import salt.utils.path import salt.utils.pkg +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip # Import third party libs @@ -37,7 +39,7 @@ def __virtual__(): Confine this module to Mac OS with Homebrew. ''' - if salt.utils.which('brew') and __grains__['os'] == 'MacOS': + if salt.utils.path.which('brew') and __grains__['os'] == 'MacOS': return __virtualname__ return (False, 'The brew module could not be loaded: brew not found or grain os != MacOS') @@ -127,7 +129,7 @@ def list_pkgs(versions_as_list=False, **kwargs): name_and_versions = line.split(' ') name = name_and_versions[0] installed_versions = name_and_versions[1:] - key_func = functools.cmp_to_key(salt.utils.version_cmp) + key_func = functools.cmp_to_key(salt.utils.versions.version_cmp) newest_version = sorted(installed_versions, key=key_func).pop() except ValueError: continue diff --git a/salt/modules/mac_defaults.py b/salt/modules/mac_defaults.py index cda1f1b243..b43ae6f48c 100644 --- a/salt/modules/mac_defaults.py +++ b/salt/modules/mac_defaults.py @@ -8,8 +8,8 @@ Set defaults on Mac OS from __future__ import absolute_import import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'macdefaults' @@ -19,7 +19,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return __virtualname__ return False diff --git a/salt/modules/mac_desktop.py b/salt/modules/mac_desktop.py index d9c77845c6..8bd2f40930 100644 --- a/salt/modules/mac_desktop.py +++ b/salt/modules/mac_desktop.py @@ -4,8 +4,8 @@ macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name @@ -16,7 +16,7 @@ def __virtual__(): ''' Only load on Mac systems ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' diff --git a/salt/modules/mac_group.py b/salt/modules/mac_group.py index 9a2f02ca85..971aa74ed1 100644 --- a/salt/modules/mac_group.py +++ b/salt/modules/mac_group.py @@ -13,6 +13,7 @@ except ImportError: # Import Salt Libs import salt.utils import salt.utils.itertools +import salt.utils.stringutils from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.modules.mac_user import _dscl, _flush_dscl_cache @@ -48,7 +49,7 @@ def add(name, gid=None, **kwargs): raise CommandExecutionError( 'Group \'{0}\' already exists'.format(name) ) - if salt.utils.contains_whitespace(name): + if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Group name cannot contain whitespace') if name.startswith('_'): raise SaltInvocationError( @@ -96,7 +97,7 @@ def delete(name): salt '*' group.delete foo ''' - if salt.utils.contains_whitespace(name): + if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Group name cannot contain whitespace') if name.startswith('_'): raise SaltInvocationError( @@ -183,7 +184,7 @@ def info(name): salt '*' group.info foo ''' - if salt.utils.contains_whitespace(name): + if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Group name cannot contain whitespace') try: # getgrnam seems to cache weirdly, so don't use it diff --git a/salt/modules/mac_keychain.py b/salt/modules/mac_keychain.py index 652e593ace..9e3977cf4e 100644 --- a/salt/modules/mac_keychain.py +++ b/salt/modules/mac_keychain.py @@ -18,8 +18,8 @@ try: except ImportError: HAS_DEPS = False -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'keychain' @@ -36,7 +36,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin() and _quote is not None: + if salt.utils.platform.is_darwin() and _quote is not None: return __virtualname__ return False diff --git a/salt/modules/mac_package.py b/salt/modules/mac_package.py index 287689187f..a18b71a4b6 100644 --- a/salt/modules/mac_package.py +++ b/salt/modules/mac_package.py @@ -16,8 +16,8 @@ try: except ImportError: HAS_DEPS = False -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'macpackage' @@ -35,7 +35,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin() and _quote is not None: + if salt.utils.platform.is_darwin() and _quote is not None: return __virtualname__ return False diff --git a/salt/modules/mac_pkgutil.py b/salt/modules/mac_pkgutil.py index 1626e73644..01adfd4231 100644 --- a/salt/modules/mac_pkgutil.py +++ b/salt/modules/mac_pkgutil.py @@ -12,8 +12,9 @@ import os.path # Import 3rd-party libs from salt.ext.six.moves import urllib # pylint: disable=import-error -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform import salt.utils.itertools import salt.utils.mac_utils from salt.exceptions import SaltInvocationError @@ -26,10 +27,10 @@ __virtualname__ = 'pkgutil' def __virtual__(): - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'Only available on Mac OS systems') - if not salt.utils.which('pkgutil'): + if not salt.utils.path.which('pkgutil'): return (False, 'Missing pkgutil binary') return __virtualname__ diff --git a/salt/modules/mac_ports.py b/salt/modules/mac_ports.py index 8a842cdb8e..4d720e5a03 100644 --- a/salt/modules/mac_ports.py +++ b/salt/modules/mac_ports.py @@ -38,12 +38,15 @@ import re # Import salt libs import salt.utils +import salt.utils.path import salt.utils.pkg +import salt.utils.platform import salt.utils.mac_utils +import salt.utils.versions from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -55,10 +58,10 @@ def __virtual__(): ''' Confine this module to Mac OS with MacPorts. ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return False, 'mac_ports only available on MacOS' - if not salt.utils.which('port'): + if not salt.utils.path.which('port'): return False, 'mac_ports requires the "port" binary' return __virtualname__ @@ -168,7 +171,7 @@ def latest_version(*names, **kwargs): ret = {} for key, val in six.iteritems(available): - if key not in installed or salt.utils.compare_versions(ver1=installed[key], oper='<', ver2=val): + if key not in installed or salt.utils.versions.compare(ver1=installed[key], oper='<', ver2=val): ret[key] = val else: ret[key] = '{0} (installed)'.format(version(key)) diff --git a/salt/modules/mac_power.py b/salt/modules/mac_power.py index d7e09dce19..705899ad32 100644 --- a/salt/modules/mac_power.py +++ b/salt/modules/mac_power.py @@ -7,10 +7,13 @@ Module for editing power settings on macOS # Import python libs from __future__ import absolute_import -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.mac_utils +import salt.utils.platform from salt.exceptions import SaltInvocationError + +# Import 3rd-party libs +from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' @@ -20,7 +23,7 @@ def __virtual__(): ''' Only for macOS ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') @@ -38,7 +41,7 @@ def _validate_sleep(minutes): Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off - if isinstance(minutes, str): + if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: diff --git a/salt/modules/mac_service.py b/salt/modules/mac_service.py index 9f795edd0f..5493910279 100644 --- a/salt/modules/mac_service.py +++ b/salt/modules/mac_service.py @@ -12,13 +12,16 @@ import plistlib # Import salt libs import salt.utils +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils import salt.utils.decorators as decorators import salt.utils.files from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six # Define the module's virtual name __virtualname__ = 'service' @@ -32,15 +35,15 @@ def __virtual__(): ''' Only for macOS with launchctl ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'Failed to load the mac_service module:\n' 'Only available on macOS systems.') - if not salt.utils.which('launchctl'): + if not salt.utils.path.which('launchctl'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "launchctl"') - if not salt.utils.which('plutil'): + if not salt.utils.path.which('plutil'): return (False, 'Failed to load the mac_service module:\n' 'Required binary not found: "plutil"') @@ -101,7 +104,7 @@ def _available_services(): plist = plistlib.readPlistFromString(plist_xml) else: plist = plistlib.readPlistFromBytes( - salt.utils.to_bytes(plist_xml)) + salt.utils.stringutils.to_bytes(plist_xml)) try: available_services[plist.Label.lower()] = { diff --git a/salt/modules/mac_shadow.py b/salt/modules/mac_shadow.py index d434f87e09..6daaa14360 100644 --- a/salt/modules/mac_shadow.py +++ b/salt/modules/mac_shadow.py @@ -21,9 +21,9 @@ except ImportError: HAS_PWD = False # Import salt libs -import salt.utils import logging import salt.utils.mac_utils +import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) # Start logging @@ -33,7 +33,7 @@ __virtualname__ = 'shadow' def __virtual__(): # Is this macOS? - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return False, 'Not macOS' if HAS_PWD: diff --git a/salt/modules/mac_softwareupdate.py b/salt/modules/mac_softwareupdate.py index 449874679c..0f598ed0bf 100644 --- a/salt/modules/mac_softwareupdate.py +++ b/salt/modules/mac_softwareupdate.py @@ -13,6 +13,7 @@ import os import salt.utils import salt.utils.files import salt.utils.mac_utils +import salt.utils.platform from salt.exceptions import CommandExecutionError, SaltInvocationError __virtualname__ = 'softwareupdate' @@ -22,7 +23,7 @@ def __virtual__(): ''' Only for MacOS ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'The softwareupdate module could not be loaded: ' 'module only works on MacOS systems.') diff --git a/salt/modules/mac_system.py b/salt/modules/mac_system.py index 889c668486..9d92502573 100644 --- a/salt/modules/mac_system.py +++ b/salt/modules/mac_system.py @@ -19,8 +19,8 @@ except ImportError: # python 2 import getpass # Import salt libs -import salt.utils import salt.utils.mac_utils +import salt.utils.platform from salt.exceptions import SaltInvocationError __virtualname__ = 'system' @@ -30,7 +30,7 @@ def __virtual__(): ''' Only for MacOS with atrun enabled ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'The mac_system module could not be loaded: ' 'module only works on MacOS systems.') diff --git a/salt/modules/mac_timezone.py b/salt/modules/mac_timezone.py index ab5528a120..1534a7cc4f 100644 --- a/salt/modules/mac_timezone.py +++ b/salt/modules/mac_timezone.py @@ -9,9 +9,9 @@ from __future__ import absolute_import # Import python libs from datetime import datetime -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.mac_utils +import salt.utils.platform from salt.exceptions import SaltInvocationError __virtualname__ = 'timezone' @@ -21,7 +21,7 @@ def __virtual__(): ''' Only for macOS ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'The mac_timezone module could not be loaded: ' 'module only works on macOS systems.') diff --git a/salt/modules/mac_user.py b/salt/modules/mac_user.py index 18194aada6..9fe8d443bb 100644 --- a/salt/modules/mac_user.py +++ b/salt/modules/mac_user.py @@ -24,7 +24,9 @@ from salt.ext.six import string_types # Import salt libs import salt.utils -import salt.utils.decorators as decorators +import salt.utils.args +import salt.utils.decorators.path +import salt.utils.stringutils from salt.utils.locales import sdecode as _sdecode from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -95,7 +97,7 @@ def add(name, if info(name): raise CommandExecutionError('User \'{0}\' already exists'.format(name)) - if salt.utils.contains_whitespace(name): + if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Username cannot contain whitespace') if uid is None: @@ -142,7 +144,7 @@ def delete(name, remove=False, force=False): salt '*' user.delete name remove=True force=True ''' - if salt.utils.contains_whitespace(name): + if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Username cannot contain whitespace') if not info(name): return True @@ -271,10 +273,10 @@ def chhome(name, home, **kwargs): salt '*' user.chhome foo /Users/foo ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) persist = kwargs.pop('persist', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if persist: log.info('Ignoring unsupported \'persist\' argument to user.chhome') @@ -358,7 +360,7 @@ def chgroups(name, groups, append=False): if isinstance(groups, string_types): groups = groups.split(',') - bad_groups = [x for x in groups if salt.utils.contains_whitespace(x)] + bad_groups = [x for x in groups if salt.utils.stringutils.contains_whitespace(x)] if bad_groups: raise SaltInvocationError( 'Invalid group name(s): {0}'.format(', '.join(bad_groups)) @@ -418,7 +420,7 @@ def _format_info(data): 'fullname': data.pw_gecos} -@decorators.which('id') +@salt.utils.decorators.path.which('id') def primary_group(name): ''' Return the primary group of the named user diff --git a/salt/modules/mac_xattr.py b/salt/modules/mac_xattr.py index c6d3e88ade..88663c2e7e 100644 --- a/salt/modules/mac_xattr.py +++ b/salt/modules/mac_xattr.py @@ -12,6 +12,7 @@ from __future__ import absolute_import import logging # Import salt libs +import salt.utils.args import salt.utils.mac_utils from salt.exceptions import CommandExecutionError @@ -53,10 +54,10 @@ def list_(path, **kwargs): salt '*' xattr.list /path/to/file salt '*' xattr.list /path/to/file hex=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) hex_ = kwargs.pop('hex', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cmd = ['xattr', path] try: @@ -101,10 +102,10 @@ def read(path, attribute, **kwargs): salt '*' xattr.read /path/to/file com.test.attr salt '*' xattr.read /path/to/file com.test.attr hex=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) hex_ = kwargs.pop('hex', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cmd = ['xattr', '-p'] if hex_: @@ -147,10 +148,10 @@ def write(path, attribute, value, **kwargs): salt '*' xattr.write /path/to/file "com.test.attr" "value" ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) hex_ = kwargs.pop('hex', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) cmd = ['xattr', '-w'] if hex_: diff --git a/salt/modules/marathon.py b/salt/modules/marathon.py index 0d405b32d6..ad53c07747 100644 --- a/salt/modules/marathon.py +++ b/salt/modules/marathon.py @@ -10,8 +10,8 @@ from __future__ import absolute_import import json import logging -import salt.utils import salt.utils.http +import salt.utils.platform from salt.exceptions import get_error_message @@ -21,9 +21,13 @@ log = logging.getLogger(__file__) def __virtual__(): # only valid in proxy minions for now - if salt.utils.is_proxy() and 'proxy' in __opts__: + if salt.utils.platform.is_proxy() and 'proxy' in __opts__: return True - return (False, 'The marathon execution module cannot be loaded: this only works in proxy minions.') + return ( + False, + 'The marathon execution module cannot be loaded: this only works on ' + 'proxy minions.' + ) def _base_url(): diff --git a/salt/modules/match.py b/salt/modules/match.py index 20b50df14b..4f87f313a0 100644 --- a/salt/modules/match.py +++ b/salt/modules/match.py @@ -11,7 +11,7 @@ import sys # Import salt libs import salt.minion -import salt.utils +import salt.utils.versions from salt.defaults import DEFAULT_TARGET_DELIM from salt.ext.six import string_types @@ -338,7 +338,7 @@ def filter_by(lookup, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/modules/mdadm.py b/salt/modules/mdadm.py index 0b4b5115fa..0b453a2689 100644 --- a/salt/modules/mdadm.py +++ b/salt/modules/mdadm.py @@ -10,9 +10,12 @@ import logging import re # Import salt libs -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError +# Import 3rd-party libs +from salt.ext import six + # Set up logger log = logging.getLogger(__name__) @@ -32,7 +35,7 @@ def __virtual__(): ''' if __grains__['kernel'] != 'Linux': return (False, 'The mdadm execution module cannot be loaded: only available on Linux.') - if not salt.utils.which('mdadm'): + if not salt.utils.path.which('mdadm'): return (False, 'The mdadm execution module cannot be loaded: the mdadm binary is not in the path.') return __virtualname__ @@ -344,7 +347,7 @@ def assemble(name, opts.append(kwargs[key]) # Devices may have been written with a blob: - if isinstance(devices, str): + if isinstance(devices, six.string_types): devices = devices.split(',') cmd = ['mdadm', '-A', name, '-v'] + opts + devices diff --git a/salt/modules/mdata.py b/salt/modules/mdata.py index a38a28653a..cc9ff1ece6 100644 --- a/salt/modules/mdata.py +++ b/salt/modules/mdata.py @@ -14,7 +14,8 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform import salt.utils.decorators as decorators log = logging.getLogger(__name__) @@ -36,7 +37,7 @@ def _check_mdata_list(): ''' looks to see if mdata-list is present on the system ''' - return salt.utils.which('mdata-list') + return salt.utils.path.which('mdata-list') @decorators.memoize @@ -44,7 +45,7 @@ def _check_mdata_get(): ''' looks to see if mdata-get is present on the system ''' - return salt.utils.which('mdata-get') + return salt.utils.path.which('mdata-get') @decorators.memoize @@ -52,7 +53,7 @@ def _check_mdata_put(): ''' looks to see if mdata-put is present on the system ''' - return salt.utils.which('mdata-put') + return salt.utils.path.which('mdata-put') @decorators.memoize @@ -60,14 +61,14 @@ def _check_mdata_delete(): ''' looks to see if mdata-delete is present on the system ''' - return salt.utils.which('mdata-delete') + return salt.utils.path.which('mdata-delete') def __virtual__(): ''' Provides mdata only on SmartOS ''' - if _check_mdata_list() and not salt.utils.is_smartos_globalzone(): + if _check_mdata_list() and not salt.utils.platform.is_smartos_globalzone(): return __virtualname__ return ( False, diff --git a/salt/modules/mine.py b/salt/modules/mine.py index fce8a7120c..063f577a44 100644 --- a/salt/modules/mine.py +++ b/salt/modules/mine.py @@ -14,12 +14,14 @@ import traceback import salt.crypt import salt.payload import salt.utils -import salt.utils.network +import salt.utils.args import salt.utils.event +import salt.utils.network +import salt.utils.versions from salt.exceptions import SaltClientError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six MINE_INTERNAL_KEYWORDS = frozenset([ '__pub_user', @@ -197,7 +199,7 @@ def send(func, *args, **kwargs): salt '*' mine.send network.ip_addrs eth0 salt '*' mine.send eth0_ip_addrs mine_function=network.ip_addrs eth0 ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) mine_func = kwargs.pop('mine_function', func) if mine_func not in __salt__: return False @@ -291,7 +293,7 @@ def get(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/modules/minion.py b/salt/modules/minion.py index 6b46832392..94bf948b0b 100644 --- a/salt/modules/minion.py +++ b/salt/modules/minion.py @@ -14,7 +14,7 @@ import salt.utils import salt.key # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # Don't shadow built-ins. diff --git a/salt/modules/mod_random.py b/salt/modules/mod_random.py index 0482d3ba7d..a4c53ee826 100644 --- a/salt/modules/mod_random.py +++ b/salt/modules/mod_random.py @@ -17,7 +17,7 @@ import salt.utils.pycrypto from salt.exceptions import SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six if six.PY2: ALGORITHMS_ATTR_NAME = 'algorithms' diff --git a/salt/modules/modjk.py b/salt/modules/modjk.py index 3e7c753198..88a1100774 100644 --- a/salt/modules/modjk.py +++ b/salt/modules/modjk.py @@ -34,6 +34,7 @@ from __future__ import absolute_import # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module +from salt.ext import six from salt.ext.six.moves.urllib.parse import urlencode as _urlencode from salt.ext.six.moves.urllib.request import ( HTTPBasicAuthHandler as _HTTPBasicAuthHandler, @@ -330,7 +331,7 @@ def bulk_stop(workers, lbn, profile='default'): ret = {} - if isinstance(workers, str): + if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: @@ -359,7 +360,7 @@ def bulk_activate(workers, lbn, profile='default'): ret = {} - if isinstance(workers, str): + if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: @@ -388,7 +389,7 @@ def bulk_disable(workers, lbn, profile='default'): ret = {} - if isinstance(workers, str): + if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: @@ -417,7 +418,7 @@ def bulk_recover(workers, lbn, profile='default'): ret = {} - if isinstance(workers, str): + if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: diff --git a/salt/modules/mongodb.py b/salt/modules/mongodb.py index 7aeef8180d..83f5430b3b 100644 --- a/salt/modules/mongodb.py +++ b/salt/modules/mongodb.py @@ -26,7 +26,7 @@ from salt.exceptions import get_error_message as _get_error_message # Import third party libs -import salt.ext.six as six +from salt.ext import six try: import pymongo HAS_MONGODB = True diff --git a/salt/modules/monit.py b/salt/modules/monit.py index 4588ee299e..cfb18cd063 100644 --- a/salt/modules/monit.py +++ b/salt/modules/monit.py @@ -9,7 +9,7 @@ from __future__ import absolute_import import re # Import salt libs -import salt.utils +import salt.utils.path # Function alias to make sure not to shadow built-in's __func_alias__ = { @@ -19,7 +19,7 @@ __func_alias__ = { def __virtual__(): - if salt.utils.which('monit') is not None: + if salt.utils.path.which('monit') is not None: # The monit binary exists, let the module load return True return (False, 'The monit execution module cannot be loaded: the monit binary is not in the path.') diff --git a/salt/modules/moosefs.py b/salt/modules/moosefs.py index 29be8e86fb..dcf8114cd7 100644 --- a/salt/modules/moosefs.py +++ b/salt/modules/moosefs.py @@ -5,14 +5,14 @@ Module for gathering and managing information about MooseFS from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.path def __virtual__(): ''' Only load if the mfs commands are installed ''' - if salt.utils.which('mfsgetgoal'): + if salt.utils.path.which('mfsgetgoal'): return 'moosefs' return (False, 'The moosefs execution module cannot be loaded: the mfsgetgoal binary is not in the path.') diff --git a/salt/modules/mount.py b/salt/modules/mount.py index d4466a00b5..ee263c4d77 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -10,13 +10,14 @@ import re import logging # Import salt libs -import salt.utils +import salt.utils # Can be removed once test_mode is moved import salt.utils.files -from salt.utils import which as _which +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandNotFoundError, CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import filter, zip # pylint: disable=import-error,redefined-builtin # Set up logger @@ -31,7 +32,7 @@ def __virtual__(): Only load on POSIX-like systems ''' # Disable on Windows, a specific file module exists: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The mount module cannot be loaded: not a POSIX-like system.') return True @@ -1115,12 +1116,12 @@ def is_fuse_exec(cmd): salt '*' mount.is_fuse_exec sshfs ''' - cmd_path = _which(cmd) + cmd_path = salt.utils.path.which(cmd) # No point in running ldd on a command that doesn't exist if not cmd_path: return False - elif not _which('ldd'): + elif not salt.utils.path.which('ldd'): raise CommandNotFoundError('ldd') out = __salt__['cmd.run']('ldd {0}'.format(cmd_path), python_shell=False) diff --git a/salt/modules/mysql.py b/salt/modules/mysql.py index 04ae110953..26e4150ce4 100644 --- a/salt/modules/mysql.py +++ b/salt/modules/mysql.py @@ -47,7 +47,7 @@ import salt.utils import salt.utils.files # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error from salt.ext.six.moves import range, zip # pylint: disable=no-name-in-module,redefined-builtin try: diff --git a/salt/modules/nacl.py b/salt/modules/nacl.py index 9622531cd3..228d72974d 100644 --- a/salt/modules/nacl.py +++ b/salt/modules/nacl.py @@ -11,112 +11,160 @@ regardless if they are encrypted or not. When generating keys and encrypting passwords use --local when using salt-call for extra security. Also consider using just the salt runner nacl when encrypting pillar passwords. +:configuration: The following configuration defaults can be + define (pillar or config files) Avoid storing private keys in pillars! Ensure master does not have `pillar_opts=True`: + + .. code-block:: python + + # cat /etc/salt/master.d/nacl.conf + nacl.config: + # NOTE: `key` and `key_file` have been renamed to `sk`, `sk_file` + # also `box_type` default changed from secretbox to sealedbox. + box_type: sealedbox (default) + sk_file: /etc/salt/pki/master/nacl (default) + pk_file: /etc/salt/pki/master/nacl.pub (default) + sk: None + pk: None + + Usage can override the config defaults: + + .. code-block:: bash + + salt-call nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub + + The nacl lib uses 32byte keys, these keys are base64 encoded to make your life more simple. -To generate your `key` or `keyfile` you can use: +To generate your `sk_file` and `pk_file` use: .. code-block:: bash - salt-call --local nacl.keygen keyfile=/root/.nacl + salt-call --local nacl.keygen sk_file=/etc/salt/pki/master/nacl + # or if you want to work without files. + salt-call --local nacl.keygen + local: + ---------- + pk: + /kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0= + sk: + SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow= -Now with your key, you can encrypt some data: +Now with your keypair, you can encrypt data: + +You have two option, `sealedbox` or `secretbox`. + +SecretBox is data encrypted using private key `pk`. Sealedbox is encrypted using public key `pk`. + +Recommend using Sealedbox because the one way encryption permits developers to encrypt data for source control but not decrypt. +Sealedbox only has one key that is for both encryption and decryption. .. code-block:: bash - salt-call --local nacl.enc mypass keyfile=/root/.nacl - DRB7Q6/X5gGSRCTpZyxS6hXO5LnlJIIJ4ivbmUlbWj0llUA+uaVyvou3vJ4= + salt-call --local nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0= + tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58= To decrypt the data: .. code-block:: bash - salt-call --local nacl.dec data='DRB7Q6/X5gGSRCTpZyxS6hXO5LnlJIIJ4ivbmUlbWj0llUA+uaVyvou3vJ4=' keyfile=/root/.nacl - mypass + salt-call --local nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \ + sk='SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow=' -The following optional configurations can be defined in the -minion or master config. Avoid storing the config in pillars! +When the keys are defined in the master config you can use them from the nacl runner +without extra parameters: -.. code-block:: yaml +.. code-block:: python - cat /etc/salt/master.d/nacl.conf + # cat /etc/salt/master.d/nacl.conf nacl.config: - key: 'cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' - keyfile: /root/.nacl - -When the key is defined in the master config you can use it from the nacl runner: + sk_file: /etc/salt/pki/master/nacl + pk: 'cTIqXwnUiD1ulg4kXsbeCE7/NoeKEzd4nLeYcCFpd9k=' .. code-block:: bash - salt-run nacl.enc 'myotherpass' + salt-run nacl.enc 'asecretpass' + salt-run nacl.dec 'tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' -Now you can create a pillar with protected data like: +.. code-block:: yam + # a salt developers minion could have pillar data that includes a nacl public key + nacl.config: + pk: '/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=' + +The developer can then use a less-secure system to encrypt data. + +.. code-block:: bash + + salt-call --local nacl.enc apassword + + +Pillar files can include protected data that the salt master decrypts: .. code-block:: jinja pillarexample: user: root - password: {{ salt.nacl.dec('DRB7Q6/X5gGSRCTpZyxS6hXO5LnlJIIJ4ivbmUlbWj0llUA+uaVyvou3vJ4=') }} + password1: {{salt.nacl.dec('DRB7Q6/X5gGSRCTpZyxS6hlbWj0llUA+uaVyvou3vJ4=')|json}} + cert_key: {{salt.nacl.dec_file('/srv/salt/certs/example.com/key.nacl')|json}} + cert_key2: {{salt.nacl.dec_file('salt:///certs/example.com/key.nacl')|json}} -Or do something interesting with grains like: - -.. code-block:: jinja - - salt-call nacl.enc minionname:dbrole - AL24Z2C5OlkReer3DuQTFdrNLchLuz3NGIhGjZkLtKRYry/b/CksWM8O9yskLwH2AGVLoEXI5jAa - - salt minionname grains.setval role 'AL24Z2C5OlkReer3DuQTFdrNLchLuz3NGIhGjZkLtKRYry/b/CksWM8O9yskLwH2AGVLoEXI5jAa' - - {%- set r = grains.get('role') %} - {%- set role = None %} - {%- if r and 'nacl.dec' in salt %} - {%- set r = salt['nacl.dec'](r,keyfile='/root/.nacl').split(':') %} - {%- if opts['id'] == r[0] %} - {%- set role = r[1] %} - {%- endif %} - {%- endif %} - base: - {%- if role %} - '{{ opts['id'] }}': - - {{ role }} - {%- endif %} - -Multi-line text items like certificates require a bit of extra work. You have to strip the new lines -and replace them with '/n' characters. Certificates specifically require some leading white space when -calling nacl.enc so that the '--' in the first line (commonly -----BEGIN CERTIFICATE-----) doesn't get -interpreted as an argument to nacl.enc. For instance if you have a certificate file that lives in cert.crt: +Larger files like certificates can be encrypted with: .. code-block:: bash - cert=$(cat cert.crt |awk '{printf "%s\\n",$0} END {print ""}'); salt-run nacl.enc " $cert" + salt-call nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl + # or more advanced + cert=$(cat /tmp/cert.crt) + salt-call --out=newline_values_only nacl.enc_pub data="$cert" > /tmp/cert.nacl -Pillar data should look the same, even though the secret will be quite long. However, when calling -multiline encrypted secrets from pillar in a state, use the following format to avoid issues with /n -creating extra whitespace at the beginning of each line in the cert file: +In pillars rended with jinja be sure to include `|json` so line breaks are encoded: .. code-block:: jinja - secret.txt: - file.managed: - - template: jinja - - user: user - - group: group - - mode: 700 - - contents: "{{- salt['pillar.get']('secret') }}" + cert: "{{salt.nacl.dec('S2uogToXkgENz9...085KYt')|json}}" + +In states rendered with jinja it is also good pratice to include `|json`: + +.. code-block:: jinja + + {{sls}} private key: + file.managed: + - name: /etc/ssl/private/cert.key + - mode: 700 + - contents: "{{pillar['pillarexample']['cert_key']|json}}" + + +Optional small program to encrypt data without needing salt modules. + +.. code-block:: python + + #!/bin/python3 + import sys, base64, libnacl.sealed + pk = base64.b64decode('YOURPUBKEY') + b = libnacl.sealed.SealedBox(pk) + data = sys.stdin.buffer.read() + print(base64.b64encode(b.encrypt(data)).decode()) + +.. code-block:: bash + + echo 'apassword' | nacl_enc.py -The '{{-' will tell jinja to strip the whitespace from the beginning of each of the new lines. ''' from __future__ import absolute_import import base64 import os import salt.utils.files +import salt.utils.platform +import salt.utils.win_functions +import salt.utils.win_dacl import salt.syspaths REQ_ERROR = None try: import libnacl.secret + import libnacl.sealed except (ImportError, OSError) as e: - REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package' + REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package or should update.' __virtualname__ = 'nacl' @@ -130,91 +178,294 @@ def _get_config(**kwargs): Return configuration ''' config = { - 'key': None, - 'keyfile': None, + 'box_type': 'sealedbox', + 'sk': None, + 'sk_file': '/etc/salt/pki/master/nacl', + 'pk': None, + 'pk_file': '/etc/salt/pki/master/nacl.pub', } config_key = '{0}.config'.format(__virtualname__) - config.update(__salt__['config.get'](config_key, {})) - for k in set(config) & set(kwargs): + try: + config.update(__salt__['config.get'](config_key, {})) + except (NameError, KeyError) as e: + # likly using salt-run so fallback to __opts__ + config.update(__opts__.get(config_key, {})) + # pylint: disable=C0201 + for k in set(config.keys()) & set(kwargs.keys()): config[k] = kwargs[k] return config -def _get_key(rstrip_newline=True, **kwargs): +def _get_sk(**kwargs): ''' - Return key + Return sk ''' config = _get_config(**kwargs) - key = config['key'] - keyfile = config['keyfile'] - if not key and keyfile: - if not os.path.isfile(keyfile): - raise Exception('file not found: {0}'.format(keyfile)) - with salt.utils.files.fopen(keyfile, 'rb') as keyf: - key = keyf.read() + key = config['sk'] + sk_file = config['sk_file'] + if not key and sk_file: + with salt.utils.files.fopen(sk_file, 'rb') as keyf: + key = str(keyf.read()).rstrip('\n') if key is None: - raise Exception('no key found') - key = str(key) - if rstrip_newline: - key = key.rstrip('\n') - return key + raise Exception('no key or sk_file found') + return base64.b64decode(key) -def keygen(keyfile=None): +def _get_pk(**kwargs): ''' - Use libnacl to generate a private key + Return pk + ''' + config = _get_config(**kwargs) + pubkey = config['pk'] + pk_file = config['pk_file'] + if not pubkey and pk_file: + with salt.utils.files.fopen(pk_file, 'rb') as keyf: + pubkey = str(keyf.read()).rstrip('\n') + if pubkey is None: + raise Exception('no pubkey or pk_file found') + pubkey = str(pubkey) + return base64.b64decode(pubkey) + + +def keygen(sk_file=None, pk_file=None): + ''' + Use libnacl to generate a keypair. + + If no `sk_file` is defined return a keypair. + + If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`. + + When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated + using the `sk_file`. CLI Examples: .. code-block:: bash + salt-call nacl.keygen + salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl + salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub salt-call --local nacl.keygen - salt-call --local nacl.keygen keyfile=/root/.nacl - salt-call --local --out=newline_values_only nacl.keygen > /root/.nacl ''' - b = libnacl.secret.SecretBox() - key = b.sk - key = base64.b64encode(key) - if keyfile: - if os.path.isfile(keyfile): - raise Exception('file already found: {0}'.format(keyfile)) - with salt.utils.files.fopen(keyfile, 'w') as keyf: - keyf.write(key) - return 'saved: {0}'.format(keyfile) - return key + if sk_file is None: + kp = libnacl.public.SecretKey() + return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)} + + if pk_file is None: + pk_file = '{0}.pub'.format(sk_file) + + if sk_file and pk_file is None: + if not os.path.isfile(sk_file): + kp = libnacl.public.SecretKey() + with salt.utils.files.fopen(sk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.sk)) + if salt.utils.platform.is_windows(): + cur_user = salt.utils.win_functions.get_current_user() + salt.utils.win_dacl.set_owner(sk_file, cur_user) + salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True) + else: + # chmod 0600 file + os.chmod(sk_file, 1536) + return 'saved sk_file: {0}'.format(sk_file) + else: + raise Exception('sk_file:{0} already exist.'.format(sk_file)) + + if sk_file is None and pk_file: + raise Exception('sk_file: Must be set inorder to generate a public key.') + + if os.path.isfile(sk_file) and os.path.isfile(pk_file): + raise Exception('sk_file:{0} and pk_file:{1} already exist.'.format(sk_file, pk_file)) + + if os.path.isfile(sk_file) and not os.path.isfile(pk_file): + # generate pk using the sk + with salt.utils.files.fopen(sk_file, 'rb') as keyf: + sk = str(keyf.read()).rstrip('\n') + sk = base64.b64decode(sk) + kp = libnacl.public.SecretKey(sk) + with salt.utils.files.fopen(pk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.pk)) + return 'saved pk_file: {0}'.format(pk_file) + + kp = libnacl.public.SecretKey() + with salt.utils.files.fopen(sk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.sk)) + if salt.utils.platform.is_windows(): + cur_user = salt.utils.win_functions.get_current_user() + salt.utils.win_dacl.set_owner(sk_file, cur_user) + salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True) + else: + # chmod 0600 file + os.chmod(sk_file, 1536) + with salt.utils.files.fopen(pk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.pk)) + return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file) def enc(data, **kwargs): ''' - Takes a key generated from `nacl.keygen` and encrypt some data. + Alias to `{box_type}_encrypt` + + box_type: secretbox, sealedbox(default) + ''' + box_type = _get_config(**kwargs)['box_type'] + if box_type == 'sealedbox': + return sealedbox_encrypt(data, **kwargs) + if box_type == 'secretbox': + return secretbox_encrypt(data, **kwargs) + return sealedbox_encrypt(data, **kwargs) + + +def enc_file(name, out=None, **kwargs): + ''' + This is a helper function to encrypt a file and return its contents. + + You can provide an optional output file using `out` + + `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash - salt-call --local nacl.enc datatoenc - salt-call --local nacl.enc datatoenc keyfile=/root/.nacl - salt-call --local nacl.enc datatoenc key='cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' + salt-run nacl.enc_file name=/tmp/id_rsa + salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert + salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \ + sk_file=/etc/salt/pki/master/nacl.pub ''' - key = _get_key(**kwargs) - sk = base64.b64decode(key) - b = libnacl.secret.SecretBox(sk) - return base64.b64encode(b.encrypt(data)) + try: + data = __salt__['cp.get_file_str'](name) + except Exception as e: + # likly using salt-run so fallback to local filesystem + with salt.utils.files.fopen(name, 'rb') as f: + data = f.read() + d = enc(data, **kwargs) + if out: + if os.path.isfile(out): + raise Exception('file:{0} already exist.'.format(out)) + with salt.utils.files.fopen(out, 'wb') as f: + f.write(d) + return 'Wrote: {0}'.format(out) + return d def dec(data, **kwargs): ''' - Takes a key generated from `nacl.keygen` and decrypt some data. + Alias to `{box_type}_decrypt` + + box_type: secretbox, sealedbox(default) + ''' + box_type = _get_config(**kwargs)['box_type'] + if box_type == 'sealedbox': + return sealedbox_decrypt(data, **kwargs) + if box_type == 'secretbox': + return secretbox_decrypt(data, **kwargs) + return sealedbox_decrypt(data, **kwargs) + + +def dec_file(name, out=None, **kwargs): + ''' + This is a helper function to decrypt a file and return its contents. + + You can provide an optional output file using `out` + + `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash - salt-call --local nacl.dec pEXHQM6cuaF7A= - salt-call --local nacl.dec data='pEXHQM6cuaF7A=' keyfile=/root/.nacl - salt-call --local nacl.dec data='pEXHQM6cuaF7A=' key='cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' + salt-run nacl.dec_file name=/tmp/id_rsa.nacl + salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa + salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \ + sk_file=/etc/salt/pki/master/nacl.pub ''' - key = _get_key(**kwargs) - sk = base64.b64decode(key) - b = libnacl.secret.SecretBox(key=sk) + try: + data = __salt__['cp.get_file_str'](name) + except Exception as e: + # likly using salt-run so fallback to local filesystem + with salt.utils.files.fopen(name, 'rb') as f: + data = f.read() + d = dec(data, **kwargs) + if out: + if os.path.isfile(out): + raise Exception('file:{0} already exist.'.format(out)) + with salt.utils.files.fopen(out, 'wb') as f: + f.write(d) + return 'Wrote: {0}'.format(out) + return d + + +def sealedbox_encrypt(data, **kwargs): + ''' + Encrypt data using a public key generated from `nacl.keygen`. + The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key. + + CLI Examples: + + .. code-block:: bash + + salt-run nacl.sealedbox_encrypt datatoenc + salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub + salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ=' + ''' + pk = _get_pk(**kwargs) + b = libnacl.sealed.SealedBox(pk) + return base64.b64encode(b.encrypt(data)) + + +def sealedbox_decrypt(data, **kwargs): + ''' + Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`. + + CLI Examples: + + .. code-block:: bash + + salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A= + salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' + ''' + if data is None: + return None + sk = _get_sk(**kwargs) + keypair = libnacl.public.SecretKey(sk) + b = libnacl.sealed.SealedBox(keypair) + return b.decrypt(base64.b64decode(data)) + + +def secretbox_encrypt(data, **kwargs): + ''' + Encrypt data using a secret key generated from `nacl.keygen`. + The same secret key can be used to decrypt the data using `nacl.secretbox_decrypt`. + + CLI Examples: + + .. code-block:: bash + + salt-run nacl.secretbox_encrypt datatoenc + salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo=' + ''' + sk = _get_sk(**kwargs) + b = libnacl.secret.SecretBox(sk) + return base64.b64encode(b.encrypt(data)) + + +def secretbox_decrypt(data, **kwargs): + ''' + Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key + that was generated from `nacl.keygen`. + + CLI Examples: + + .. code-block:: bash + + salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A= + salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' + ''' + if data is None: + return None + key = _get_sk(**kwargs) + b = libnacl.secret.SecretBox(key=key) return b.decrypt(base64.b64decode(data)) diff --git a/salt/modules/nagios.py b/salt/modules/nagios.py index f7e77d4770..0f4aa00823 100644 --- a/salt/modules/nagios.py +++ b/salt/modules/nagios.py @@ -10,7 +10,7 @@ import stat import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/namecheap_domains.py b/salt/modules/namecheap_domains.py index 4f89ccdba0..55707179ec 100644 --- a/salt/modules/namecheap_domains.py +++ b/salt/modules/namecheap_domains.py @@ -49,7 +49,7 @@ except ImportError: CAN_USE_NAMECHEAP = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): diff --git a/salt/modules/namecheap_ssl.py b/salt/modules/namecheap_ssl.py index 6a267d1069..a0171cedf4 100644 --- a/salt/modules/namecheap_ssl.py +++ b/salt/modules/namecheap_ssl.py @@ -52,7 +52,7 @@ except ImportError: CAN_USE_NAMECHEAP = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index 35c507e6b4..545332607c 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -21,16 +21,17 @@ Dependencies from __future__ import absolute_import -# Import python lib +# Import Python lib import logging log = logging.getLogger(__name__) -# salt libs -from salt.ext import six +# Import Salt libs import salt.utils.files -import salt.utils.templates import salt.utils.napalm -from salt.utils.napalm import proxy_napalm_wrap +import salt.utils.templates + +# Import 3rd-party libs +from salt.ext import six # ---------------------------------------------------------------------------------------------------------------------- # module properties @@ -221,7 +222,7 @@ def _config_logic(napalm_device, # ---------------------------------------------------------------------------------------------------------------------- -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def connected(**kwarvs): # pylint: disable=unused-argument ''' Specifies if the connection to the device succeeded. @@ -238,7 +239,7 @@ def connected(**kwarvs): # pylint: disable=unused-argument } -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def facts(**kwargs): # pylint: disable=unused-argument ''' Returns characteristics of the network device. @@ -293,7 +294,7 @@ def facts(**kwargs): # pylint: disable=unused-argument ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def environment(**kwargs): # pylint: disable=unused-argument ''' Returns the environment of the device. @@ -360,14 +361,129 @@ def environment(**kwargs): # pylint: disable=unused-argument ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def cli(*commands, **kwargs): # pylint: disable=unused-argument - ''' Returns a dictionary with the raw output of all commands passed as arguments. - :param commands: list of commands to be executed on the device - :return: a dictionary with the mapping between each command and its raw output + commands + List of commands to be executed on the device. + + textfsm_parse: ``False`` + Try parsing the outputs using the TextFSM templates. + + .. versionadded:: Oxygen + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``napalm_cli_textfsm_parse``. + + textfsm_path + The path where the TextFSM templates can be found. This option implies + the usage of the TextFSM index file. + ``textfsm_path`` can be either absolute path on the server, + either specified using the following URL mschemes: ``file://``, + ``salt://``, ``http://``, ``https://``, ``ftp://``, + ``s3://``, ``swift://``. + + .. versionadded:: Oxygen + + .. note:: + This needs to be a directory with a flat structure, having an + index file (whose name can be specified using the ``index_file`` option) + and a number of TextFSM templates. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_path``. + + textfsm_template + The path to a certain the TextFSM template. + This can be specified using the absolute path + to the file, or using one of the following URL schemes: + + - ``salt://``, to fetch the template from the Salt fileserver. + - ``http://`` or ``https://`` + - ``ftp://`` + - ``s3://`` + - ``swift://`` + + .. versionadded:: Oxygen + + textfsm_template_dict + A dictionary with the mapping between a command + and the corresponding TextFSM path to use to extract the data. + The TextFSM paths can be specified as in ``textfsm_template``. + + .. versionadded:: Oxygen + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``napalm_cli_textfsm_template_dict``. + + platform_grain_name: ``os`` + The name of the grain used to identify the platform name + in the TextFSM index file. Default: ``os``. + + .. versionadded:: Oxygen + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_platform_grain``. + + platform_column_name: ``Platform`` + The column name used to identify the platform, + exactly as specified in the TextFSM index file. + Default: ``Platform``. + + .. versionadded:: Oxygen + + .. note:: + This is field is case sensitive, make sure + to assign the correct value to this option, + exactly as defined in the index file. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_platform_column_name``. + + index_file: ``index`` + The name of the TextFSM index file, under the ``textfsm_path``. Default: ``index``. + + .. versionadded:: Oxygen + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_index_file``. + + saltenv: ``base`` + Salt fileserver envrionment from which to retrieve the file. + Ignored if ``textfsm_path`` is not a ``salt://`` URL. + + .. versionadded:: Oxygen + + include_empty: ``False`` + Include empty files under the ``textfsm_path``. + + .. versionadded:: Oxygen + + include_pat + Glob or regex to narrow down the files cached from the given path. + If matching with a regex, the regex must be prefixed with ``E@``, + otherwise the expression will be interpreted as a glob. + + .. versionadded:: Oxygen + + exclude_pat + Glob or regex to exclude certain files from being cached from the given path. + If matching with a regex, the regex must be prefixed with ``E@``, + otherwise the expression will be interpreted as a glob. + + .. versionadded:: Oxygen + + .. note:: + If used with ``include_pat``, files matching this pattern will be + excluded from the subset of files defined by ``include_pat``. CLI Example: @@ -375,6 +491,12 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument salt '*' net.cli "show version" "show chassis fan" + CLI Example with TextFSM template: + + .. code-block:: + + salt '*' net.cli textfsm_parse=True textfsm_path=salt://textfsm/ + Example output: .. code-block:: python @@ -396,9 +518,31 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument Bottom Front Fan OK 3840 Spinning at intermediate-speed ' } - ''' - return salt.utils.napalm.call( + Example output with TextFSM parsing: + + .. code-block:: json + + { + "comment": "", + "result": true, + "out": { + "sh ver": [ + { + "kernel": "9.1S3.5", + "documentation": "9.1S3.5", + "boot": "9.1S3.5", + "crypto": "9.1S3.5", + "chassis": "", + "routing": "9.1S3.5", + "base": "9.1S3.5", + "model": "mx960" + } + ] + } + } + ''' + raw_cli_outputs = salt.utils.napalm.call( napalm_device, # pylint: disable=undefined-variable 'cli', **{ @@ -407,9 +551,110 @@ def cli(*commands, **kwargs): # pylint: disable=unused-argument ) # thus we can display the output as is # in case of errors, they'll be catched in the proxy + if not raw_cli_outputs['result']: + # Error -> dispaly the output as-is. + return raw_cli_outputs + textfsm_parse = kwargs.get('textfsm_parse') or __opts__.get('napalm_cli_textfsm_parse') or\ + __pillar__.get('napalm_cli_textfsm_parse', False) + if not textfsm_parse: + # No TextFSM parsing required, return raw commands. + log.debug('No TextFSM parsing requested.') + return raw_cli_outputs + if 'textfsm.extract' not in __salt__ or 'textfsm.index' not in __salt__: + raw_cli_outputs['comment'] += 'Unable to process: is TextFSM installed?' + log.error(raw_cli_outputs['comment']) + return raw_cli_outputs + textfsm_template = kwargs.get('textfsm_template') + log.debug('textfsm_template: {}'.format(textfsm_template)) + textfsm_path = kwargs.get('textfsm_path') or __opts__.get('textfsm_path') or\ + __pillar__.get('textfsm_path') + log.debug('textfsm_path: {}'.format(textfsm_path)) + textfsm_template_dict = kwargs.get('textfsm_template_dict') or __opts__.get('napalm_cli_textfsm_template_dict') or\ + __pillar__.get('napalm_cli_textfsm_template_dict', {}) + log.debug('TextFSM command-template mapping: {}'.format(textfsm_template_dict)) + index_file = kwargs.get('index_file') or __opts__.get('textfsm_index_file') or\ + __pillar__.get('textfsm_index_file') + log.debug('index_file: {}'.format(index_file)) + platform_grain_name = kwargs.get('platform_grain_name') or __opts__.get('textfsm_platform_grain') or\ + __pillar__.get('textfsm_platform_grain', 'os') + log.debug('platform_grain_name: {}'.format(platform_grain_name)) + platform_column_name = kwargs.get('platform_column_name') or __opts__.get('textfsm_platform_column_name') or\ + __pillar__.get('textfsm_platform_column_name', 'Platform') + log.debug('platform_column_name: {}'.format(platform_column_name)) + saltenv = kwargs.get('saltenv', 'base') + include_empty = kwargs.get('include_empty', False) + include_pat = kwargs.get('include_pat') + exclude_pat = kwargs.get('exclude_pat') + processed_cli_outputs = { + 'comment': raw_cli_outputs.get('comment', ''), + 'result': raw_cli_outputs['result'], + 'out': {} + } + log.debug('Starting to analyse the raw outputs') + for command in list(commands): + command_output = raw_cli_outputs['out'][command] + log.debug('Output from command: {}'.format(command)) + log.debug(command_output) + processed_command_output = None + if textfsm_path: + log.debug('Using the templates under {}'.format(textfsm_path)) + processed_cli_output = __salt__['textfsm.index'](command, + platform_grain_name=platform_grain_name, + platform_column_name=platform_column_name, + output=command_output.strip(), + textfsm_path=textfsm_path, + saltenv=saltenv, + include_empty=include_empty, + include_pat=include_pat, + exclude_pat=exclude_pat) + log.debug('Processed CLI output:') + log.debug(processed_cli_output) + if not processed_cli_output['result']: + log.debug('Apparently this didnt work, returnin the raw output') + processed_command_output = command_output + processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}.'.format(command, + processed_cli_output['comment']) + log.error(processed_cli_outputs['comment']) + elif processed_cli_output['out']: + log.debug('All good, {} has a nice output!'.format(command)) + processed_command_output = processed_cli_output['out'] + else: + comment = '''\nProcessing "{}" didn't fail, but didn't return anything either. Dumping raw.'''.format( + command) + processed_cli_outputs['comment'] += comment + log.error(comment) + processed_command_output = command_output + elif textfsm_template or command in textfsm_template_dict: + if command in textfsm_template_dict: + textfsm_template = textfsm_template_dict[command] + log.debug('Using {0} to process the command: {1}'.format(textfsm_template, command)) + processed_cli_output = __salt__['textfsm.extract'](textfsm_template, + raw_text=command_output, + saltenv=saltenv) + log.debug('Processed CLI output:') + log.debug(processed_cli_output) + if not processed_cli_output['result']: + log.debug('Apparently this didnt work, returnin the raw output') + processed_command_output = command_output + processed_cli_outputs['comment'] += '\nUnable to process the output from {0}: {1}'.format(command, + processed_cli_output['comment']) + log.error(processed_cli_outputs['comment']) + elif processed_cli_output['out']: + log.debug('All good, {} has a nice output!'.format(command)) + processed_command_output = processed_cli_output['out'] + else: + log.debug('Processing {} didnt fail, but didnt return anything either. Dumping raw.'.format(command)) + processed_command_output = command_output + else: + log.error('No TextFSM template specified, or no TextFSM path defined') + processed_command_output = command_output + processed_cli_outputs['comment'] += '\nUnable to process the output from {}.'.format(command) + processed_cli_outputs['out'][command] = processed_command_output + processed_cli_outputs['comment'] = processed_cli_outputs['comment'].strip() + return processed_cli_outputs -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def traceroute(destination, source=None, ttl=None, timeout=None, vrf=None, **kwargs): # pylint: disable=unused-argument ''' @@ -454,7 +699,7 @@ def traceroute(destination, source=None, ttl=None, timeout=None, vrf=None, **kwa ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None, vrf=None, **kwargs): # pylint: disable=unused-argument ''' @@ -507,7 +752,7 @@ def ping(destination, source=None, ttl=None, timeout=None, size=None, count=None ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def arp(interface='', ipaddr='', macaddr='', **kwargs): # pylint: disable=unused-argument ''' @@ -573,7 +818,7 @@ def arp(interface='', ipaddr='', macaddr='', **kwargs): # pylint: disable=unuse return proxy_output -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def ipaddrs(**kwargs): # pylint: disable=unused-argument ''' @@ -633,7 +878,7 @@ def ipaddrs(**kwargs): # pylint: disable=unused-argument ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def interfaces(**kwargs): # pylint: disable=unused-argument ''' @@ -680,7 +925,7 @@ def interfaces(**kwargs): # pylint: disable=unused-argument ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def lldp(interface='', **kwargs): # pylint: disable=unused-argument ''' @@ -742,7 +987,7 @@ def lldp(interface='', **kwargs): # pylint: disable=unused-argument return proxy_output -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def mac(address='', interface='', vlan=0, **kwargs): # pylint: disable=unused-argument ''' @@ -815,7 +1060,7 @@ def mac(address='', interface='', vlan=0, **kwargs): # pylint: disable=unused-a return proxy_output -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def config(source=None, **kwargs): # pylint: disable=unused-argument ''' .. versionadded:: 2017.7.0 @@ -868,7 +1113,7 @@ def config(source=None, **kwargs): # pylint: disable=unused-argument ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def optics(**kwargs): # pylint: disable=unused-argument ''' .. versionadded:: 2017.7.0 @@ -918,7 +1163,7 @@ def optics(**kwargs): # pylint: disable=unused-argument # ----- Configuration specific functions ------------------------------------------------------------------------------> -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def load_config(filename=None, text=None, test=False, @@ -1028,7 +1273,7 @@ def load_config(filename=None, loaded_config=loaded_config) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def load_template(template_name, template_source=None, template_path=None, @@ -1427,7 +1672,7 @@ def load_template(template_name, loaded_config=loaded_config) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def commit(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' @@ -1447,7 +1692,7 @@ def commit(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argu ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def discard_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument """ @@ -1467,7 +1712,7 @@ def discard_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unu ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def compare_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' @@ -1487,7 +1732,7 @@ def compare_config(inherit_napalm_device=None, **kwargs): # pylint: disable=unu ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def rollback(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' @@ -1507,7 +1752,7 @@ def rollback(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-ar ) -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def config_changed(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' @@ -1538,7 +1783,7 @@ def config_changed(inherit_napalm_device=None, **kwargs): # pylint: disable=unu return is_config_changed, reason -@proxy_napalm_wrap +@salt.utils.napalm.proxy_napalm_wrap def config_control(inherit_napalm_device=None, **kwargs): # pylint: disable=unused-argument ''' diff --git a/salt/modules/netscaler.py b/salt/modules/netscaler.py index 409ef19ab7..ab91db5126 100644 --- a/salt/modules/netscaler.py +++ b/salt/modules/netscaler.py @@ -43,9 +43,12 @@ Module to provide Citrix Netscaler compatibility to Salt (compatible with netsca salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' +# Import Python libs from __future__ import absolute_import import logging -import salt.utils + +# Import Salt libs +import salt.utils.platform try: from nsnitro.nsnitro import NSNitro @@ -68,11 +71,19 @@ def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' - if salt.utils.is_windows(): - return (False, 'The netscaler execution module failed to load: not available on Windows.') + if salt.utils.platform.is_windows(): + return ( + False, + 'The netscaler execution module failed to load: not available ' + 'on Windows.' + ) if HAS_NSNITRO: return 'netscaler' - return (False, 'The netscaler execution module failed to load: the nsnitro python library is not available.') + return ( + False, + 'The netscaler execution module failed to load: the nsnitro python ' + 'library is not available.' + ) def _connect(**kwargs): diff --git a/salt/modules/network.py b/salt/modules/network.py index 324fb92bd8..a023d6ff7b 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py @@ -13,15 +13,18 @@ import os import socket # Import salt libs -import salt.utils -import salt.utils.decorators as decorators +import salt.utils # Can be removed when alias_function mac_str_to_bytes are moved +import salt.utils.decorators.path import salt.utils.files import salt.utils.network +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils import salt.utils.validate.net from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin if six.PY3: import ipaddress @@ -37,7 +40,7 @@ def __virtual__(): Only work on POSIX-like systems ''' # Disable on Windows, a specific file module exists: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The network execution module cannot be loaded on Windows: use win_network instead.') return True @@ -761,7 +764,7 @@ def netstat(): salt '*' network.netstat ''' if __grains__['kernel'] == 'Linux': - if not salt.utils.which('netstat'): + if not salt.utils.path.which('netstat'): return _ss_linux() else: return _netstat_linux() @@ -841,7 +844,7 @@ def traceroute(host): salt '*' network.traceroute archlinux.org ''' ret = [] - if not salt.utils.which('traceroute'): + if not salt.utils.path.which('traceroute'): log.info('This minion does not have traceroute installed') return ret @@ -850,7 +853,7 @@ def traceroute(host): out = __salt__['cmd.run'](cmd) # Parse version of traceroute - if salt.utils.is_sunos() or salt.utils.is_aix(): + if salt.utils.platform.is_sunos() or salt.utils.platform.is_aix(): traceroute_version = [0, 0, 0] else: cmd2 = 'traceroute --version' @@ -883,7 +886,7 @@ def traceroute(host): if line.startswith('traceroute'): continue - if salt.utils.is_aix(): + if salt.utils.platform.is_aix(): if line.startswith('trying to get source for'): continue @@ -956,7 +959,7 @@ def traceroute(host): return ret -@decorators.which('dig') +@salt.utils.decorators.path.which('dig') def dig(host): ''' Performs a DNS lookup with dig @@ -971,7 +974,7 @@ def dig(host): return __salt__['cmd.run'](cmd) -@decorators.which('arp') +@salt.utils.decorators.path.which('arp') def arp(): ''' Return the arp table from the minion @@ -1268,10 +1271,10 @@ def mod_hostname(hostname): if hostname is None: return False - hostname_cmd = salt.utils.which('hostnamectl') or salt.utils.which('hostname') - if salt.utils.is_sunos(): - uname_cmd = '/usr/bin/uname' if salt.utils.is_smartos() else salt.utils.which('uname') - check_hostname_cmd = salt.utils.which('check-hostname') + hostname_cmd = salt.utils.path.which('hostnamectl') or salt.utils.path.which('hostname') + if salt.utils.platform.is_sunos(): + uname_cmd = '/usr/bin/uname' if salt.utils.platform.is_smartos() else salt.utils.path.which('uname') + check_hostname_cmd = salt.utils.path.which('check-hostname') # Grab the old hostname so we know which hostname to change and then # change the hostname using the hostname command @@ -1281,7 +1284,7 @@ def mod_hostname(hostname): line = line.split(':') if 'Static hostname' in line[0]: o_hostname = line[1].strip() - elif not salt.utils.is_sunos(): + elif not salt.utils.platform.is_sunos(): # don't run hostname -f because -f is not supported on all platforms o_hostname = socket.getfqdn() else: @@ -1290,7 +1293,7 @@ def mod_hostname(hostname): if hostname_cmd.endswith('hostnamectl'): __salt__['cmd.run']('{0} set-hostname {1}'.format(hostname_cmd, hostname)) - elif not salt.utils.is_sunos(): + elif not salt.utils.platform.is_sunos(): __salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname)) else: __salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0])) @@ -1306,7 +1309,7 @@ def mod_hostname(hostname): try: host[host.index(o_hostname)] = hostname - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): # also set a copy of the hostname host[host.index(o_hostname.split('.')[0])] = hostname.split('.')[0] except ValueError: @@ -1324,9 +1327,9 @@ def mod_hostname(hostname): for net in network_c: if net.startswith('HOSTNAME'): old_hostname = net.split('=', 1)[1].rstrip() - quote_type = salt.utils.is_quoted(old_hostname) + quote_type = salt.utils.stringutils.is_quoted(old_hostname) fh_.write('HOSTNAME={1}{0}{1}\n'.format( - salt.utils.dequote(hostname), quote_type)) + salt.utils.stringutils.dequote(hostname), quote_type)) else: fh_.write(net) elif __grains__['os_family'] in ('Debian', 'NILinuxRT'): @@ -1337,7 +1340,7 @@ def mod_hostname(hostname): fh_.write(hostname + '\n') # Update /etc/nodename and /etc/defaultdomain on SunOS - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): with salt.utils.files.fopen('/etc/nodename', 'w') as fh_: fh_.write(hostname.split('.')[0] + '\n') with salt.utils.files.fopen('/etc/defaultdomain', 'w') as fh_: @@ -1602,7 +1605,7 @@ def routes(family=None): raise CommandExecutionError('Invalid address family {0}'.format(family)) if __grains__['kernel'] == 'Linux': - if not salt.utils.which('netstat'): + if not salt.utils.path.which('netstat'): routes_ = _ip_route_linux() else: routes_ = _netstat_route_linux() diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 7114983f43..ecbb77e53b 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -8,8 +8,8 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -18,7 +18,7 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if not salt.utils.which('showmount'): + if not salt.utils.path.which('showmount'): return (False, 'The nfs3 execution module failed to load: the showmount binary is not in the path.') return True diff --git a/salt/modules/nftables.py b/salt/modules/nftables.py index 5c206104e1..57ab63ca3f 100644 --- a/salt/modules/nftables.py +++ b/salt/modules/nftables.py @@ -9,8 +9,8 @@ import logging import re # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS from salt.exceptions import ( CommandExecutionError @@ -37,7 +37,7 @@ def __virtual__(): ''' Only load the module if nftables is installed ''' - if salt.utils.which('nft'): + if salt.utils.path.which('nft'): return 'nftables' return (False, 'The nftables execution module failed to load: nftables is not installed.') diff --git a/salt/modules/nginx.py b/salt/modules/nginx.py index d2841b122d..dbaea65e46 100644 --- a/salt/modules/nginx.py +++ b/salt/modules/nginx.py @@ -8,7 +8,7 @@ from __future__ import absolute_import from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: disable=no-name-in-module,import-error # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.decorators as decorators import re @@ -19,7 +19,7 @@ import re # for nginx over and over and over for each function herein @decorators.memoize def __detect_os(): - return salt.utils.which('nginx') + return salt.utils.path.which('nginx') def __virtual__(): diff --git a/salt/modules/nilrt_ip.py b/salt/modules/nilrt_ip.py index a215fb26f3..18b4470e73 100644 --- a/salt/modules/nilrt_ip.py +++ b/salt/modules/nilrt_ip.py @@ -15,7 +15,7 @@ import salt.utils.validate.net import salt.exceptions # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import pyconnman HAS_PYCONNMAN = True diff --git a/salt/modules/nix.py b/salt/modules/nix.py index 862bd9f3c0..af2884751f 100644 --- a/salt/modules/nix.py +++ b/salt/modules/nix.py @@ -23,7 +23,8 @@ import logging import itertools import os -import salt.utils +import salt.utils.itertools +import salt.utils.path from salt.ext.six.moves import zip @@ -35,7 +36,7 @@ def __virtual__(): This only works if we have access to nix-env ''' nixhome = os.path.join(os.path.expanduser('~{0}'.format(__opts__['user'])), '.nix-profile/bin/') - if salt.utils.which(os.path.join(nixhome, 'nix-env')) and salt.utils.which(os.path.join(nixhome, 'nix-collect-garbage')): + if salt.utils.path.which(os.path.join(nixhome, 'nix-env')) and salt.utils.path.which(os.path.join(nixhome, 'nix-collect-garbage')): return True else: return (False, "The `nix` binaries required cannot be found or are not installed. (`nix-store` and `nix-env`)") @@ -222,8 +223,7 @@ def list_pkgs(installed=True, out = _run(cmd) - # TODO: This could be a tad long when using `installed=False`, maybe use salt.utils.itertools.split? - return [s.split() for s in out['stdout'].splitlines()] + return [s.split() for s in salt.utils.itertools.split(out['stdout'], '\n')] def uninstall(*pkgs): diff --git a/salt/modules/npm.py b/salt/modules/npm.py index e794f8fdd5..4a0bbf9c91 100644 --- a/salt/modules/npm.py +++ b/salt/modules/npm.py @@ -14,6 +14,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.path import salt.modules.cmdmod from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion @@ -32,7 +33,7 @@ def __virtual__(): Only work when npm is installed. ''' try: - if salt.utils.which('npm') is not None: + if salt.utils.path.which('npm') is not None: _check_valid_version() return True else: diff --git a/salt/modules/nspawn.py b/salt/modules/nspawn.py index 9fcca78d42..eb3d94bc1b 100644 --- a/salt/modules/nspawn.py +++ b/salt/modules/nspawn.py @@ -35,6 +35,8 @@ import tempfile # Import Salt libs import salt.defaults.exitcodes import salt.utils +import salt.utils.args +import salt.utils.path import salt.utils.systemd from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.ext import six @@ -84,7 +86,7 @@ def _ensure_exists(wrapped): raise CommandExecutionError( 'Container \'{0}\' does not exist'.format(name) ) - return wrapped(name, *args, **salt.utils.clean_kwargs(**kwargs)) + return wrapped(name, *args, **salt.utils.args.clean_kwargs(**kwargs)) return check_exists @@ -146,7 +148,7 @@ def _bootstrap_arch(name, **kwargs): ''' Bootstrap an Arch Linux container ''' - if not salt.utils.which('pacstrap'): + if not salt.utils.path.which('pacstrap'): raise CommandExecutionError( 'pacstrap not found, is the arch-install-scripts package ' 'installed?' @@ -774,7 +776,7 @@ def bootstrap_salt(name, pub_key=pub_key, priv_key=priv_key) if needs_install or force_install or unconditional_install: if install: - rstr = __salt__['test.rand_str']() + rstr = __salt__['test.random_hash']() configdir = '/tmp/.c_{0}'.format(rstr) run(name, 'install -m 0700 -d {0}'.format(configdir), @@ -961,10 +963,10 @@ def info(name, **kwargs): salt myminion nspawn.info arch1 salt myminion nspawn.info arch1 force_start=False ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) start_ = kwargs.pop('start', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if not start_: _ensure_running(name) @@ -1341,14 +1343,14 @@ def _pull_image(pull_type, image, name, **kwargs): 'Unsupported image type \'{0}\''.format(pull_type) ) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) bad_kwargs = dict( - [(x, y) for x, y in six.iteritems(salt.utils.clean_kwargs(**kwargs)) + [(x, y) for x, y in six.iteritems(salt.utils.args.clean_kwargs(**kwargs)) if x not in valid_kwargs] ) if bad_kwargs: - salt.utils.invalid_kwargs(bad_kwargs) + salt.utils.args.invalid_kwargs(bad_kwargs) pull_opts = [] diff --git a/salt/modules/nxos.py b/salt/modules/nxos.py index 45b39bc09b..5b1d8f1440 100644 --- a/salt/modules/nxos.py +++ b/salt/modules/nxos.py @@ -7,16 +7,18 @@ Execution module for Cisco NX OS Switches Proxy minions For documentation on setting up the nxos proxy minion look in the documentation for :mod:`salt.proxy.nxos `. ''' +# Import Python libs from __future__ import absolute_import -import salt.utils +# Import Salt libs +import salt.utils.platform __proxyenabled__ = ['nxos'] __virtualname__ = 'nxos' def __virtual__(): - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return __virtualname__ return (False, 'The nxos execution module failed to load: ' 'only available on proxy minions.') diff --git a/salt/modules/openbsdpkg.py b/salt/modules/openbsdpkg.py index f500a34355..238d879ba2 100644 --- a/salt/modules/openbsdpkg.py +++ b/salt/modules/openbsdpkg.py @@ -30,6 +30,7 @@ import logging # Import Salt libs import salt.utils +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError log = logging.getLogger(__name__) @@ -123,7 +124,7 @@ def latest_version(*names, **kwargs): continue pkgname += '--{0}'.format(flavor) if flavor else '' cur = pkgs.get(pkgname, '') - if not cur or salt.utils.compare_versions(ver1=cur, + if not cur or salt.utils.versions.compare(ver1=cur, oper='<', ver2=pkgver): ret[pkgname] = pkgver diff --git a/salt/modules/openbsdrcctl.py b/salt/modules/openbsdrcctl.py index 54545ceb82..16c31bdfc7 100644 --- a/salt/modules/openbsdrcctl.py +++ b/salt/modules/openbsdrcctl.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.decorators as decorators from salt.exceptions import CommandNotFoundError @@ -35,7 +35,7 @@ def _cmd(): ''' Return the full path to the rcctl(8) command. ''' - rcctl = salt.utils.which('rcctl') + rcctl = salt.utils.path.which('rcctl') if not rcctl: raise CommandNotFoundError return rcctl diff --git a/salt/modules/openbsdservice.py b/salt/modules/openbsdservice.py index f09002f6b0..27332ee24c 100644 --- a/salt/modules/openbsdservice.py +++ b/salt/modules/openbsdservice.py @@ -16,7 +16,7 @@ import fnmatch import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin # Import Salt libs diff --git a/salt/modules/openstack_config.py b/salt/modules/openstack_config.py index 97dbc5ce91..1033f4a68d 100644 --- a/salt/modules/openstack_config.py +++ b/salt/modules/openstack_config.py @@ -13,7 +13,7 @@ from __future__ import absolute_import import salt.utils import salt.exceptions -from salt.utils.decorators import which as _which +import salt.utils.decorators.path import shlex try: @@ -45,7 +45,7 @@ def _fallback(*args, **kw): return 'The "openstack-config" command needs to be installed for this function to work. Typically this is included in the "openstack-utils" package.' -@_which('openstack-config') +@salt.utils.decorators.path.which('openstack-config') def set_(filename, section, parameter, value): ''' Set a value in an OpenStack configuration file. @@ -87,7 +87,7 @@ def set_(filename, section, parameter, value): raise salt.exceptions.CommandExecutionError(result['stderr']) -@_which('openstack-config') +@salt.utils.decorators.path.which('openstack-config') def get(filename, section, parameter): ''' Get a value from an OpenStack configuration file. @@ -126,7 +126,7 @@ def get(filename, section, parameter): raise salt.exceptions.CommandExecutionError(result['stderr']) -@_which('openstack-config') +@salt.utils.decorators.path.which('openstack-config') def delete(filename, section, parameter): ''' Delete a value from an OpenStack configuration file. diff --git a/salt/modules/openvswitch.py b/salt/modules/openvswitch.py index 0113859a68..bd287b8983 100644 --- a/salt/modules/openvswitch.py +++ b/salt/modules/openvswitch.py @@ -12,7 +12,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -21,7 +21,7 @@ def __virtual__(): ''' Only load the module if Open vSwitch is installed ''' - if salt.utils.which('ovs-vsctl'): + if salt.utils.path.which('ovs-vsctl'): return 'openvswitch' return False diff --git a/salt/modules/opkg.py b/salt/modules/opkg.py index 316c7cc5fd..36284dfab8 100644 --- a/salt/modules/opkg.py +++ b/salt/modules/opkg.py @@ -24,16 +24,18 @@ import re import logging # Import salt libs -import salt.utils +import salt.utils # Can be removed when is_true, compare_dicts are moved +import salt.utils.args import salt.utils.files -import salt.utils.pkg import salt.utils.itertools -from salt.utils.versions import LooseVersion as _LooseVersion +import salt.utils.path +import salt.utils.pkg +import salt.utils.versions from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import shlex_quote as _cmd_quote # pylint: disable=import-error REPO_REGEXP = r'^#?\s*(src|src/gz)\s+([^\s<>]+|"[^<>]+")\s+[^\s<>]+' @@ -325,13 +327,13 @@ def install(name=None, else: pkgstr = '{0}={1}'.format(pkgname, version_num) cver = old.get(pkgname, '') - if reinstall and cver and salt.utils.compare_versions( + if reinstall and cver and salt.utils.versions.compare( ver1=version_num, oper='==', ver2=cver, cmp_func=version_cmp): to_reinstall.append(pkgstr) - elif not cver or salt.utils.compare_versions( + elif not cver or salt.utils.versions.compare( ver1=version_num, oper='>=', ver2=cver, @@ -929,7 +931,7 @@ def info_installed(*names, **kwargs): attr = kwargs.pop('attr', None) if attr is None: filter_attrs = None - elif isinstance(attr, str): + elif isinstance(attr, six.string_types): filter_attrs = set(attr.split(',')) else: filter_attrs = set(attr) @@ -1012,9 +1014,10 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False): output_loglevel='trace', python_shell=False) opkg_version = output.split(' ')[2].strip() - if _LooseVersion(opkg_version) >= _LooseVersion('0.3.4'): + if salt.utils.versions.LooseVersion(opkg_version) >= \ + salt.utils.versions.LooseVersion('0.3.4'): cmd_compare = ['opkg', 'compare-versions'] - elif salt.utils.which('opkg-compare-versions'): + elif salt.utils.path.which('opkg-compare-versions'): cmd_compare = ['opkg-compare-versions'] else: log.warning('Unable to find a compare-versions utility installed. Either upgrade opkg to ' @@ -1058,7 +1061,7 @@ def list_repos(): line = line[1:] else: repo['enabled'] = True - cols = salt.utils.shlex_split(line.strip()) + cols = salt.utils.args.shlex_split(line.strip()) if cols[0] in 'src': repo['compressed'] = False else: @@ -1103,7 +1106,7 @@ def _del_repo_from_file(alias, filepath): if regex.search(line): if line.startswith('#'): line = line[1:] - cols = salt.utils.shlex_split(line.strip()) + cols = salt.utils.args.shlex_split(line.strip()) if alias != cols[1]: output.append(line) with salt.utils.files.fopen(filepath, 'w') as fhandle: @@ -1134,7 +1137,7 @@ def _mod_repo_in_file(alias, repostr, filepath): with salt.utils.files.fopen(filepath) as fhandle: output = [] for line in fhandle: - cols = salt.utils.shlex_split(line.strip()) + cols = salt.utils.args.shlex_split(line.strip()) if alias not in cols: output.append(line) else: diff --git a/salt/modules/oracle.py b/salt/modules/oracle.py index 1069061c9b..8e527fc176 100644 --- a/salt/modules/oracle.py +++ b/salt/modules/oracle.py @@ -32,7 +32,7 @@ from __future__ import absolute_import import os import logging from salt.utils.decorators import depends -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/osquery.py b/salt/modules/osquery.py index 137125f070..b1a3de51ba 100644 --- a/salt/modules/osquery.py +++ b/salt/modules/osquery.py @@ -10,7 +10,8 @@ from __future__ import absolute_import import json # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform import logging log = logging.getLogger(__name__) @@ -26,7 +27,7 @@ __virtualname__ = 'osquery' def __virtual__(): - if salt.utils.which('osqueryi'): + if salt.utils.path.which('osqueryi'): return __virtualname__ return (False, 'The osquery execution module cannot be loaded: ' 'osqueryi binary is not in the path.') @@ -57,11 +58,11 @@ def _osquery(sql, format='json'): cmd = 'osqueryi --json "{0}"'.format(sql) res = __salt__['cmd.run_all'](cmd) - if res['retcode'] == 0: - ret['data'] = json.loads(res['stdout']) - else: + if res['stderr']: ret['result'] = False ret['error'] = res['stderr'] + else: + ret['data'] = json.loads(res['stdout']) return ret @@ -657,7 +658,7 @@ def alf(attrs=None, where=None): salt '*' osquery.alf ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='alf', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -672,7 +673,7 @@ def alf_exceptions(attrs=None, where=None): salt '*' osquery.alf_exceptions ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='alf_exceptions', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -687,7 +688,7 @@ def alf_explicit_auths(attrs=None, where=None): salt '*' osquery.alf_explicit_auths ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='alf_explicit_auths', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -702,7 +703,7 @@ def alf_services(attrs=None, where=None): salt '*' osquery.alf_services ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='alf_services', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -717,7 +718,7 @@ def apps(attrs=None, where=None): salt '*' osquery.apps ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='apps', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -732,7 +733,7 @@ def certificates(attrs=None, where=None): salt '*' osquery.certificates ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='certificates', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -747,7 +748,7 @@ def chrome_extensions(attrs=None, where=None): salt '*' osquery.chrome_extensions ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='chrome_extensions', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -762,7 +763,7 @@ def firefox_addons(attrs=None, where=None): salt '*' osquery.firefox_addons ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='firefox_addons', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -777,7 +778,7 @@ def homebrew_packages(attrs=None, where=None): salt '*' osquery.homebrew_packages ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='homebrew_packages', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -792,7 +793,7 @@ def iokit_devicetree(attrs=None, where=None): salt '*' osquery.iokit_devicetree ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='iokit_devicetree', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -807,7 +808,7 @@ def iokit_registry(attrs=None, where=None): salt '*' osquery.iokit_registry ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='iokit_registry', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -822,7 +823,7 @@ def kernel_extensions(attrs=None, where=None): salt '*' osquery.kernel_extensions ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='kernel_extensions', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -837,7 +838,7 @@ def keychain_items(attrs=None, where=None): salt '*' osquery.keychain_items ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='keychain_items', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -852,7 +853,7 @@ def launchd(attrs=None, where=None): salt '*' osquery.launchd ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='launchd', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -867,7 +868,7 @@ def nfs_shares(attrs=None, where=None): salt '*' osquery.nfs_shares ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='nfs_shares', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -882,7 +883,7 @@ def nvram(attrs=None, where=None): salt '*' osquery.nvram ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='nvram', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -897,7 +898,7 @@ def preferences(attrs=None, where=None): salt '*' osquery.preferences ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='preferences', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -912,7 +913,7 @@ def quarantine(attrs=None, where=None): salt '*' osquery.quarantine ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='quarantine', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -927,7 +928,7 @@ def safari_extensions(attrs=None, where=None): salt '*' osquery.safari_extensions ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='safari_extensions', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -942,7 +943,7 @@ def startup_items(attrs=None, where=None): salt '*' osquery.startup_items ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='startup_items', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -957,7 +958,7 @@ def xattr_where_from(attrs=None, where=None): salt '*' osquery.xattr_where_from ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='xattr_where_from', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -972,7 +973,7 @@ def xprotect_entries(attrs=None, where=None): salt '*' osquery.xprotect_entries ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='xprotect_entries', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} @@ -987,7 +988,7 @@ def xprotect_reports(attrs=None, where=None): salt '*' osquery.xprotect_reports ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return _osquery_cmd(table='xprotect_reports', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on macOS systems.'} diff --git a/salt/modules/pacman.py b/salt/modules/pacman.py index 7984f808c3..ac50e7ebfe 100644 --- a/salt/modules/pacman.py +++ b/salt/modules/pacman.py @@ -19,6 +19,7 @@ import os.path # Import salt libs import salt.utils +import salt.utils.args import salt.utils.pkg import salt.utils.itertools import salt.utils.systemd @@ -26,7 +27,7 @@ from salt.exceptions import CommandExecutionError, MinionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -569,33 +570,33 @@ def install(name=None, continue targets.append('{0}{1}{2}'.format(param, prefix, verstr)) - if wildcards: - # Resolve wildcard matches - _available = list_repo_pkgs(*[x[0] for x in wildcards], refresh=refresh) - for pkgname, verstr in wildcards: - candidates = _available.get(pkgname, []) - match = salt.utils.fnmatch_multiple(candidates, verstr) - if match is not None: - targets.append('='.join((pkgname, match))) - else: - errors.append( - 'No version matching \'{0}\' found for package \'{1}\' ' - '(available: {2})'.format( - verstr, - pkgname, - ', '.join(candidates) if candidates else 'none' + if wildcards: + # Resolve wildcard matches + _available = list_repo_pkgs(*[x[0] for x in wildcards], refresh=refresh) + for pkgname, verstr in wildcards: + candidates = _available.get(pkgname, []) + match = salt.utils.fnmatch_multiple(candidates, verstr) + if match is not None: + targets.append('='.join((pkgname, match))) + else: + errors.append( + 'No version matching \'{0}\' found for package \'{1}\' ' + '(available: {2})'.format( + verstr, + pkgname, + ', '.join(candidates) if candidates else 'none' + ) ) - ) - if refresh: - try: - # Prevent a second refresh when we run the install command - cmd.remove('-y') - except ValueError: - # Shouldn't happen since we only add -y when refresh is True, - # but just in case that code above is inadvertently changed, - # don't let this result in a traceback. - pass + if refresh: + try: + # Prevent a second refresh when we run the install command + cmd.remove('-y') + except ValueError: + # Shouldn't happen since we only add -y when refresh is True, + # but just in case that code above is inadvertently changed, + # don't let this result in a traceback. + pass if not errors: cmd.extend(targets) @@ -991,12 +992,12 @@ def list_repo_pkgs(*args, **kwargs): salt '*' pkg.list_repo_pkgs 'samba4*' fromrepo=base,updates salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) fromrepo = kwargs.pop('fromrepo', '') or '' byrepo = kwargs.pop('byrepo', False) refresh = kwargs.pop('refresh', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if fromrepo: try: diff --git a/salt/modules/panos.py b/salt/modules/panos.py new file mode 100644 index 0000000000..6e8ffd556a --- /dev/null +++ b/salt/modules/panos.py @@ -0,0 +1,1857 @@ +# -*- coding: utf-8 -*- +''' +Module to provide Palo Alto compatibility to Salt. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + + +Configuration +============= +This module accepts connection configuration details either as +parameters, or as configuration settings in pillar as a Salt proxy. +Options passed into opts will be ignored if options are passed into pillar. + +.. seealso:: + :prox:`Palo Alto Proxy Module ` + +About +===== +This execution module was designed to handle connections to a Palo Alto based +firewall. This module adds support to send connections directly to the device +through the XML API or through a brokered connection to Panorama. + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import time + +# Import Salt Libs +import salt.utils.platform +import salt.proxy.panos + +log = logging.getLogger(__name__) + +__virtualname__ = 'panos' + + +def __virtual__(): + ''' + Will load for the panos proxy minions. + ''' + try: + if salt.utils.platform.is_proxy() and \ + __opts__['proxy']['proxytype'] == 'panos': + return __virtualname__ + except KeyError: + pass + + return False, 'The panos execution module can only be loaded for panos proxy minions.' + + +def _get_job_results(query=None): + ''' + Executes a query that requires a job for completion. This funciton will wait for the job to complete + and return the results. + ''' + if not query: + raise salt.exception.CommandExecutionError("Query parameters cannot be empty.") + + response = __proxy__['panos.call'](query) + + # If the response contains a job, we will wait for the results + if 'job' in response: + jid = response['job'] + + while get_job(jid)['job']['status'] != 'FIN': + time.sleep(5) + + return get_job(jid) + else: + return response + + +def add_config_lock(): + ''' + Prevent other users from changing configuration until the lock is released. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.add_config_lock + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def check_antivirus(): + ''' + Get anti-virus information from PaloAlto Networks server + + CLI Example: + + .. code-block:: bash + + salt '*' panos.check_antivirus + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def check_software(): + ''' + Get software information from PaloAlto Networks server. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.check_software + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def clear_commit_tasks(): + ''' + Clear all commit tasks. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.clear_commit_tasks + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def commit(): + ''' + Commits the candidate configuration to the running configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.commit + + ''' + query = {'type': 'commit', 'cmd': ''} + + return _get_job_results(query) + + +def deactivate_license(key_name=None): + ''' + Deactivates an installed license. + Required version 7.0.0 or greater. + + key_name(str): The file name of the license key installed. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.deactivate_license key_name=License_File_Name.key + + ''' + + _required_version = '7.0.0' + if not __proxy__['panos.is_required_version'](_required_version): + return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version) + + if not key_name: + return False, 'You must specify a key_name.' + else: + query = {'type': 'op', 'cmd': '{0}' + ''.format(key_name)} + + return __proxy__['panos.call'](query) + + +def delete_license(key_name=None): + ''' + Remove license keys on disk. + + key_name(str): The file name of the license key to be deleted. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.delete_license key_name=License_File_Name.key + + ''' + + if not key_name: + return False, 'You must specify a key_name.' + else: + query = {'type': 'op', 'cmd': '{0}'.format(key_name)} + + return __proxy__['panos.call'](query) + + +def download_antivirus(): + ''' + Download the most recent anti-virus package. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.download_antivirus + + ''' + query = {'type': 'op', + 'cmd': '' + ''} + + return _get_job_results(query) + + +def download_software_file(filename=None, synch=False): + ''' + Download software packages by filename. + + Args: + filename(str): The filename of the PANOS file to download. + + synch (bool): If true then the file will synch to the peer unit. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.download_software_file PanOS_5000-8.0.0 + salt '*' panos.download_software_file PanOS_5000-8.0.0 True + + ''' + if not filename: + raise salt.exception.CommandExecutionError("Filename option must not be none.") + + if not isinstance(synch, bool): + raise salt.exception.CommandExecutionError("Synch option must be boolean..") + + if synch is True: + query = {'type': 'op', + 'cmd': '' + '{0}'.format(filename)} + else: + query = {'type': 'op', + 'cmd': 'yes' + '{0}'.format(filename)} + + return _get_job_results(query) + + +def download_software_version(version=None, synch=False): + ''' + Download software packages by version number. + + Args: + version(str): The version of the PANOS file to download. + + synch (bool): If true then the file will synch to the peer unit. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.download_software_version 8.0.0 + salt '*' panos.download_software_version 8.0.0 True + + ''' + if not version: + raise salt.exception.CommandExecutionError("Version option must not be none.") + + if not isinstance(synch, bool): + raise salt.exception.CommandExecutionError("Synch option must be boolean..") + + if synch is True: + query = {'type': 'op', + 'cmd': '' + '{0}'.format(version)} + else: + query = {'type': 'op', + 'cmd': 'yes' + '{0}'.format(version)} + + return _get_job_results(query) + + +def fetch_license(auth_code=None): + ''' + Get new license(s) using from the Palo Alto Network Server. + + auth_code + The license authorization code. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.fetch_license + salt '*' panos.fetch_license auth_code=foobar + + ''' + if not auth_code: + query = {'type': 'op', 'cmd': ''} + else: + query = {'type': 'op', 'cmd': '{0}' + ''.format(auth_code)} + + return __proxy__['panos.call'](query) + + +def get_admins_active(): + ''' + Show active administrators. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_admins_active + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_admins_all(): + ''' + Show all administrators. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_admins_all + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_antivirus_info(): + ''' + Show information about available anti-virus packages. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_antivirus_info + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_arp(): + ''' + Show ARP information. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_arp + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_cli_idle_timeout(): + ''' + Show timeout information for this administrative session. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_cli_idle_timeout + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_cli_permissions(): + ''' + Show cli administrative permissions. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_cli_permissions + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_disk_usage(): + ''' + Report filesystem disk space usage. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_disk_usage + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_dns_server_config(): + ''' + Get the DNS server configuration from the candidate configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_dns_server_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/dns-setting/servers'} + + return __proxy__['panos.call'](query) + + +def get_domain_config(): + ''' + Get the domain name configuration from the candidate configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_domain_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/domain'} + + return __proxy__['panos.call'](query) + + +def get_fqdn_cache(): + ''' + Print FQDNs used in rules and their IPs. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_fqdn_cache + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_ha_config(): + ''' + Get the high availability configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ha_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/high-availability'} + + return __proxy__['panos.call'](query) + + +def get_hostname(): + ''' + Get the hostname of the device. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_hostname + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/hostname'} + + return __proxy__['panos.call'](query)['hostname'] + + +def get_interface_counters(name='all'): + ''' + Get the counter statistics for interfaces. + + Args: + name (str): The name of the interface to view. By default, all interface statistics are viewed. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_interface_counters + salt '*' panos.get_interface_counters ethernet1/1 + + ''' + query = {'type': 'op', + 'cmd': '{0}'.format(name)} + + return __proxy__['panos.call'](query) + + +def get_interfaces(name='all'): + ''' + Show interface information. + + Args: + name (str): The name of the interface to view. By default, all interface statistics are viewed. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_interfaces + salt '*' panos.get_interfaces ethernet1/1 + + ''' + query = {'type': 'op', + 'cmd': '{0}'.format(name)} + + return __proxy__['panos.call'](query) + + +def get_job(jid=None): + ''' + List all a single job by ID. + + jid + The ID of the job to retrieve. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_job jid=15 + + ''' + if not jid: + raise salt.exception.CommandExecutionError("ID option must not be none.") + + query = {'type': 'op', 'cmd': '{0}'.format(jid)} + + return __proxy__['panos.call'](query) + + +def get_jobs(state='all'): + ''' + List all jobs on the device. + + state + The state of the jobs to display. Valid options are all, pending, or processed. Pending jobs are jobs + that are currently in a running or waiting state. Processed jobs are jobs that have completed + execution. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_jobs + salt '*' panos.get_jobs state=pending + + ''' + if state.lower() == 'all': + query = {'type': 'op', 'cmd': ''} + elif state.lower() == 'pending': + query = {'type': 'op', 'cmd': ''} + elif state.lower() == 'processed': + query = {'type': 'op', 'cmd': ''} + else: + raise salt.exception.CommandExecutionError("The state parameter must be all, pending, or processed.") + + return __proxy__['panos.call'](query) + + +def get_lacp(): + ''' + Show LACP state. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_lacp + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def get_license_info(): + ''' + Show information about owned license(s). + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_license_info + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_license_tokens(): + ''' + Show license token files for manual license deactivation. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_license_tokens + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_lldp_config(): + ''' + Show lldp config for interfaces. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_lldp_config + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def get_lldp_counters(): + ''' + Show lldp counters for interfaces. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_lldp_counters + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def get_lldp_local(): + ''' + Show lldp local info for interfaces. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_lldp_local + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def get_lldp_neighbors(): + ''' + Show lldp neighbors info for interfaces. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_lldp_neighbors + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def get_logdb_quota(): + ''' + Report the logdb quotas. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_logdb_quota + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_master_key(): + ''' + Get the master key properties. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_master_key + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_ntp_config(): + ''' + Get the NTP configuration from the candidate configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ntp_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers'} + + return __proxy__['panos.call'](query) + + +def get_ntp_servers(): + ''' + Get list of configured NTP servers. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ntp_servers + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_operational_mode(): + ''' + Show device operational mode setting. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_operational_mode + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_panorama_status(): + ''' + Show panorama connection status. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_panorama_status + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_permitted_ips(): + ''' + Get the IP addresses that are permitted to establish management connections to the device. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_permitted_ips + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip'} + + return __proxy__['panos.call'](query) + + +def get_platform(): + ''' + Get the platform model information and limitations. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_platform + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/platform'} + + return __proxy__['panos.call'](query) + + +def get_snmp_config(): + ''' + Get the SNMP configuration from the device. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_snmp_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/snmp-setting'} + + return __proxy__['panos.call'](query) + + +def get_software_info(): + ''' + Show information about available software packages. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_software_info + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_system_date_time(): + ''' + Get the system date/time. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_system_date_time + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_system_files(): + ''' + List important files in the system. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_system_files + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_system_info(): + ''' + Get the system information. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_system_info + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_system_services(): + ''' + Show system services. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_system_services + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_system_state(filter=None): + ''' + Show the system state variables. + + filter + Filters by a subtree or a wildcard. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_system_state + salt '*' panos.get_system_state filter=cfg.ha.config.enabled + salt '*' panos.get_system_state filter=cfg.ha.* + + ''' + if filter: + query = {'type': 'op', + 'cmd': '{0}'.format(filter)} + else: + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_users_config(): + ''' + Get the local administrative user account configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_users_config + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/mgt-config/users'} + + return __proxy__['panos.call'](query) + + +def get_vlans(): + ''' + Show all VLAN information. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_vlans + + ''' + query = {'type': 'op', 'cmd': 'all'} + + return __proxy__['panos.call'](query) + + +def install_antivirus(version=None, latest=False, synch=False, skip_commit=False,): + ''' + Install anti-virus packages. + + Args: + version(str): The version of the PANOS file to install. + + latest(bool): If true, the latest anti-virus file will be installed. + The specified version option will be ignored. + + synch(bool): If true, the anti-virus will synch to the peer unit. + + skip_commit(bool): If true, the install will skip committing to the device. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.install_antivirus 8.0.0 + + ''' + if not version and latest is False: + raise salt.exception.CommandExecutionError("Version option must not be none.") + + if synch is True: + s = "yes" + else: + s = "no" + + if skip_commit is True: + c = "yes" + else: + c = "no" + + if latest is True: + query = {'type': 'op', + 'cmd': '' + '{0}{1}' + 'latest'.format(c, s)} + else: + query = {'type': 'op', + 'cmd': '' + '{0}{1}' + '{2}'.format(c, s, version)} + + return _get_job_results(query) + + +def install_license(): + ''' + Install the license key(s). + + CLI Example: + + .. code-block:: bash + + salt '*' panos.install_license + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def install_software(version=None): + ''' + Upgrade to a software package by version. + + Args: + version(str): The version of the PANOS file to install. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.install_license 8.0.0 + + ''' + if not version: + raise salt.exception.CommandExecutionError("Version option must not be none.") + + query = {'type': 'op', + 'cmd': '' + '{0}'.format(version)} + + return _get_job_results(query) + + +def reboot(): + ''' + Reboot a running system. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.reboot + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def refresh_fqdn_cache(force=False): + ''' + Force refreshes all FQDNs used in rules. + + force + Forces all fqdn refresh + + CLI Example: + + .. code-block:: bash + + salt '*' panos.refresh_fqdn_cache + salt '*' panos.refresh_fqdn_cache force=True + + ''' + if not isinstance(force, bool): + raise salt.exception.CommandExecutionError("Force option must be boolean.") + + if force: + query = {'type': 'op', + 'cmd': 'yes'} + else: + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def remove_config_lock(): + ''' + Release config lock previously held. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.remove_config_lock + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def resolve_address(address=None, vsys=None): + ''' + Resolve address to ip address. + Required version 7.0.0 or greater. + + address + Address name you want to resolve. + + vsys + The vsys name. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.resolve_address foo.bar.com + salt '*' panos.resolve_address foo.bar.com vsys=2 + + ''' + _required_version = '7.0.0' + if not __proxy__['panos.is_required_version'](_required_version): + return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version) + + if not address: + raise salt.exception.CommandExecutionError("FQDN to resolve must be provided as address.") + + if not vsys: + query = {'type': 'op', + 'cmd': '
{0}
'.format(address)} + else: + query = {'type': 'op', + 'cmd': '{0}
{1}
' + '
'.format(vsys, address)} + + return __proxy__['panos.call'](query) + + +def save_device_config(filename=None): + ''' + Save device configuration to a named file. + + filename + The filename to save the configuration to. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.save_device_config foo.xml + + ''' + if not filename: + raise salt.exception.CommandExecutionError("Filename must not be empty.") + + query = {'type': 'op', 'cmd': '{0}'.format(filename)} + + return __proxy__['panos.call'](query) + + +def save_device_state(): + ''' + Save files needed to restore device to local disk. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.save_device_state + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def set_authentication_profile(profile=None, deploy=False): + ''' + Set the authentication profile of the Palo Alto proxy minion. A commit will be required before this is processed. + + CLI Example: + + Args: + profile (str): The name of the authentication profile to set. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_authentication_profile foo + salt '*' panos.set_authentication_profile foo deploy=True + + ''' + + if not profile: + salt.exception.CommandExecutionError("Profile name option must not be none.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/' + 'authentication-profile/', + 'element': '{0}'.format(profile)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_hostname(hostname=None, deploy=False): + ''' + Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed. + + CLI Example: + + Args: + hostname (str): The hostname to set + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_hostname newhostname + salt '*' panos.set_hostname newhostname deploy=True + + ''' + + if not hostname: + salt.exception.CommandExecutionError("Hostname option must not be none.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/', + 'element': '{0}'.format(hostname)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_icmp(enabled=True, deploy=False): + ''' + Enables or disables the ICMP management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_icmp + salt '*' panos.set_management_icmp enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_http(enabled=True, deploy=False): + ''' + Enables or disables the HTTP management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_http + salt '*' panos.set_management_http enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_https(enabled=True, deploy=False): + ''' + Enables or disables the HTTPS management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_https + salt '*' panos.set_management_https enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_ocsp(enabled=True, deploy=False): + ''' + Enables or disables the HTTP OCSP management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_ocsp + salt '*' panos.set_management_ocsp enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_snmp(enabled=True, deploy=False): + ''' + Enables or disables the SNMP management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_snmp + salt '*' panos.set_management_snmp enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_ssh(enabled=True, deploy=False): + ''' + Enables or disables the SSH management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_ssh + salt '*' panos.set_management_ssh enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_management_telnet(enabled=True, deploy=False): + ''' + Enables or disables the Telnet management service on the device. + + CLI Example: + + Args: + enabled (bool): If true the service will be enabled. If false the service will be disabled. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_management_telnet + salt '*' panos.set_management_telnet enabled=False deploy=True + + ''' + + if enabled is True: + value = "no" + elif enabled is False: + value = "yes" + else: + salt.exception.CommandExecutionError("Invalid option provided for service enabled option.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'element': '{0}'.format(value)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_ntp_authentication(target=None, + authentication_type=None, + key_id=None, + authentication_key=None, + algorithm=None, + deploy=False): + ''' + Set the NTP authentication of the Palo Alto proxy minion. A commit will be required before this is processed. + + CLI Example: + + Args: + target(str): Determines the target of the authentication. Valid options are primary, secondary, or both. + + authentication_type(str): The authentication type to be used. Valid options are symmetric, autokey, and none. + + key_id(int): The NTP authentication key ID. + + authentication_key(str): The authentication key. + + algorithm(str): The algorithm type to be used for a symmetric key. Valid options are md5 and sha1. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' ntp.set_authentication target=both authentication_type=autokey + salt '*' ntp.set_authentication target=primary authentication_type=none + salt '*' ntp.set_authentication target=both authentication_type=symmetric key_id=15 authentication_key=mykey algorithm=md5 + salt '*' ntp.set_authentication target=both authentication_type=symmetric key_id=15 authentication_key=mykey algorithm=md5 deploy=True + + ''' + ret = {} + + if target not in ['primary', 'secondary', 'both']: + raise salt.exceptions.CommandExecutionError("Target option must be primary, secondary, or both.") + + if authentication_type not in ['symmetric', 'autokey', 'none']: + raise salt.exceptions.CommandExecutionError("Type option must be symmetric, autokey, or both.") + + if authentication_type == "symmetric" and not authentication_key: + raise salt.exceptions.CommandExecutionError("When using symmetric authentication, authentication_key must be " + "provided.") + + if authentication_type == "symmetric" and not key_id: + raise salt.exceptions.CommandExecutionError("When using symmetric authentication, key_id must be provided.") + + if authentication_type == "symmetric" and algorithm not in ['md5', 'sha1']: + raise salt.exceptions.CommandExecutionError("When using symmetric authentication, algorithm must be md5 or " + "sha1.") + + if authentication_type == 'symmetric': + if target == 'primary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'primary-ntp-server/authentication-type', + 'element': '<{0}>{1}' + '{2}'.format(algorithm, + authentication_key, + key_id)} + ret.update({'primary_server': __proxy__['panos.call'](query)}) + + if target == 'secondary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'secondary-ntp-server/authentication-type', + 'element': '<{0}>{1}' + '{2}'.format(algorithm, + authentication_key, + key_id)} + ret.update({'secondary_server': __proxy__['panos.call'](query)}) + elif authentication_type == 'autokey': + if target == 'primary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'primary-ntp-server/authentication-type', + 'element': ''} + ret.update({'primary_server': __proxy__['panos.call'](query)}) + + if target == 'secondary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'secondary-ntp-server/authentication-type', + 'element': ''} + ret.update({'secondary_server': __proxy__['panos.call'](query)}) + elif authentication_type == 'none': + if target == 'primary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'primary-ntp-server/authentication-type', + 'element': ''} + ret.update({'primary_server': __proxy__['panos.call'](query)}) + + if target == 'secondary' or target == 'both': + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'secondary-ntp-server/authentication-type', + 'element': ''} + ret.update({'secondary_server': __proxy__['panos.call'](query)}) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_ntp_servers(primary_server=None, secondary_server=None, deploy=False): + ''' + Set the NTP servers of the Palo Alto proxy minion. A commit will be required before this is processed. + + CLI Example: + + Args: + primary_server(str): The primary NTP server IP address or FQDN. + + secondary_server(str): The secondary NTP server IP address or FQDN. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' ntp.set_servers 0.pool.ntp.org 1.pool.ntp.org + salt '*' ntp.set_servers primary_server=0.pool.ntp.org secondary_server=1.pool.ntp.org + salt '*' ntp.ser_servers 0.pool.ntp.org 1.pool.ntp.org deploy=True + + ''' + ret = {} + + if primary_server: + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'primary-ntp-server', + 'element': '{0}'.format(primary_server)} + ret.update({'primary_server': __proxy__['panos.call'](query)}) + + if secondary_server: + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' + 'secondary-ntp-server', + 'element': '{0}'.format(secondary_server)} + ret.update({'secondary_server': __proxy__['panos.call'](query)}) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_permitted_ip(address=None, deploy=False): + ''' + Add an IPv4 address or network to the permitted IP list. + + CLI Example: + + Args: + address (str): The IPv4 address or network to allow access to add to the Palo Alto device. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_permitted_ip 10.0.0.1 + salt '*' panos.set_permitted_ip 10.0.0.0/24 + salt '*' panos.set_permitted_ip 10.0.0.1 deploy=True + + ''' + + if not address: + salt.exception.CommandExecutionError("Address option must not be empty.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip/', + 'element': ''.format(address)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def set_timezone(tz=None, deploy=False): + ''' + Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. + + CLI Example: + + Args: + tz (str): The name of the timezone to set. + + deploy (bool): If true then commit the full candidate configuration, if false only set pending change. + + .. code-block:: bash + + salt '*' panos.set_timezone UTC + salt '*' panos.set_timezone UTC deploy=True + + ''' + + if not tz: + salt.exception.CommandExecutionError("Timezone name option must not be none.") + + ret = {} + + query = {'type': 'config', + 'action': 'set', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone/', + 'element': '{0}'.format(tz)} + + ret.update(__proxy__['panos.call'](query)) + + if deploy is True: + ret.update(commit()) + + return ret + + +def shutdown(): + ''' + Shutdown a running system. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.shutdown + + ''' + query = {'type': 'op', 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def unlock_admin(username=None): + ''' + Unlocks a locked administrator account. + + username + Username of the administrator. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.unlock_admin username=bob + + ''' + if not username: + raise salt.exception.CommandExecutionError("Username option must not be none.") + + query = {'type': 'op', + 'cmd': '{0}' + ''.format(username)} + + return __proxy__['panos.call'](query) diff --git a/salt/modules/parallels.py b/salt/modules/parallels.py index c72b61ceef..d361350cc4 100644 --- a/salt/modules/parallels.py +++ b/salt/modules/parallels.py @@ -27,12 +27,12 @@ import shlex import yaml # Import salt libs -import salt.utils -from salt.utils.locales import sdecode as _sdecode +import salt.utils.locales +import salt.utils.path from salt.exceptions import SaltInvocationError # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six __virtualname__ = 'parallels' __func_alias__ = { @@ -47,9 +47,9 @@ def __virtual__(): ''' Load this module if prlctl is available ''' - if not salt.utils.which('prlctl'): + if not salt.utils.path.which('prlctl'): return (False, 'prlctl utility not available') - if not salt.utils.which('prlsrvctl'): + if not salt.utils.path.which('prlsrvctl'): return (False, 'prlsrvctl utility not available') return __virtualname__ @@ -237,7 +237,7 @@ def clone(name, new_name, linked=False, template=False, runas=None): salt '*' parallels.clone macvm macvm_new runas=macdev salt '*' parallels.clone macvm macvm_templ template=True runas=macdev ''' - args = [_sdecode(name), '--name', _sdecode(new_name)] + args = [salt.utils.locales.sdecode(name), '--name', salt.utils.locales.sdecode(new_name)] if linked: args.append('--linked') if template: @@ -263,7 +263,7 @@ def delete(name, runas=None): salt '*' parallels.exec macvm 'find /etc/paths.d' runas=macdev ''' - return prlctl('delete', _sdecode(name), runas=runas) + return prlctl('delete', salt.utils.locales.sdecode(name), runas=runas) def exists(name, runas=None): @@ -307,7 +307,7 @@ def start(name, runas=None): salt '*' parallels.start macvm runas=macdev ''' - return prlctl('start', _sdecode(name), runas=runas) + return prlctl('start', salt.utils.locales.sdecode(name), runas=runas) def stop(name, kill=False, runas=None): @@ -331,7 +331,7 @@ def stop(name, kill=False, runas=None): salt '*' parallels.stop macvm kill=True runas=macdev ''' # Construct argument list - args = [_sdecode(name)] + args = [salt.utils.locales.sdecode(name)] if kill: args.append('--kill') @@ -356,7 +356,7 @@ def restart(name, runas=None): salt '*' parallels.restart macvm runas=macdev ''' - return prlctl('restart', _sdecode(name), runas=runas) + return prlctl('restart', salt.utils.locales.sdecode(name), runas=runas) def reset(name, runas=None): @@ -375,7 +375,7 @@ def reset(name, runas=None): salt '*' parallels.reset macvm runas=macdev ''' - return prlctl('reset', _sdecode(name), runas=runas) + return prlctl('reset', salt.utils.locales.sdecode(name), runas=runas) def status(name, runas=None): @@ -394,7 +394,7 @@ def status(name, runas=None): salt '*' parallels.status macvm runas=macdev ''' - return prlctl('status', _sdecode(name), runas=runas) + return prlctl('status', salt.utils.locales.sdecode(name), runas=runas) def exec_(name, command, runas=None): @@ -417,7 +417,7 @@ def exec_(name, command, runas=None): salt '*' parallels.exec macvm 'find /etc/paths.d' runas=macdev ''' # Construct argument list - args = [_sdecode(name)] + args = [salt.utils.locales.sdecode(name)] args.extend(_normalize_args(command)) # Execute command and return output @@ -459,10 +459,10 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None): salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev ''' # Validate VM name and snapshot ID - name = _sdecode(name) + name = salt.utils.locales.sdecode(name) if not re.match(GUID_REGEX, snap_id): raise SaltInvocationError( - u'Snapshot ID "{0}" is not a GUID'.format(_sdecode(snap_id)) + u'Snapshot ID "{0}" is not a GUID'.format(salt.utils.locales.sdecode(snap_id)) ) # Get the snapshot information of the snapshot having the requested ID @@ -503,7 +503,7 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None): u'Could not find a snapshot name for snapshot ID "{0}" of VM ' u'"{1}"'.format(snap_id, name) ) - return _sdecode(snap_name) + return salt.utils.locales.sdecode(snap_name) def snapshot_name_to_id(name, snap_name, strict=False, runas=None): @@ -531,8 +531,8 @@ def snapshot_name_to_id(name, snap_name, strict=False, runas=None): salt '*' parallels.snapshot_id_to_name macvm original runas=macdev ''' # Validate VM and snapshot names - name = _sdecode(name) - snap_name = _sdecode(snap_name) + name = salt.utils.locales.sdecode(name) + snap_name = salt.utils.locales.sdecode(snap_name) # Get a multiline string containing all the snapshot GUIDs info = prlctl('snapshot-list', name, runas=runas) @@ -580,7 +580,7 @@ def _validate_snap_name(name, snap_name, strict=True, runas=None): :param str runas: The user that the prlctl command will be run as ''' - snap_name = _sdecode(snap_name) + snap_name = salt.utils.locales.sdecode(snap_name) # Try to convert snapshot name to an ID without {} if re.match(GUID_REGEX, snap_name): @@ -620,7 +620,7 @@ def list_snapshots(name, snap_name=None, tree=False, names=False, runas=None): salt '*' parallels.list_snapshots macvm names=True runas=macdev ''' # Validate VM and snapshot names - name = _sdecode(name) + name = salt.utils.locales.sdecode(name) if snap_name: snap_name = _validate_snap_name(name, snap_name, runas=runas) @@ -643,7 +643,7 @@ def list_snapshots(name, snap_name=None, tree=False, names=False, runas=None): ret = '{0:<38} {1}\n'.format('Snapshot ID', 'Snapshot Name') for snap_id in snap_ids: snap_name = snapshot_id_to_name(name, snap_id, runas=runas) - ret += (u'{{{0}}} {1}\n'.format(snap_id, _sdecode(snap_name))) + ret += (u'{{{0}}} {1}\n'.format(snap_id, salt.utils.locales.sdecode(snap_name))) return ret # Return information directly from parallels desktop @@ -675,9 +675,9 @@ def snapshot(name, snap_name=None, desc=None, runas=None): salt '*' parallels.create_snapshot macvm snap_name=macvm-updates desc='clean install with updates' runas=macdev ''' # Validate VM and snapshot names - name = _sdecode(name) + name = salt.utils.locales.sdecode(name) if snap_name: - snap_name = _sdecode(snap_name) + snap_name = salt.utils.locales.sdecode(snap_name) # Construct argument list args = [name] @@ -724,7 +724,7 @@ def delete_snapshot(name, snap_name, runas=None, all=False): strict = not all # Validate VM and snapshot names - name = _sdecode(name) + name = salt.utils.locales.sdecode(name) snap_ids = _validate_snap_name(name, snap_name, strict=strict, runas=runas) if isinstance(snap_ids, six.string_types): snap_ids = [snap_ids] @@ -767,7 +767,7 @@ def revert_snapshot(name, snap_name, runas=None): salt '*' parallels.revert_snapshot macvm base-with-updates runas=macdev ''' # Validate VM and snapshot names - name = _sdecode(name) + name = salt.utils.locales.sdecode(name) snap_name = _validate_snap_name(name, snap_name, runas=runas) # Construct argument list diff --git a/salt/modules/parted.py b/salt/modules/parted.py index 04a7d37d47..72a6d3ce65 100644 --- a/salt/modules/parted.py +++ b/salt/modules/parted.py @@ -23,8 +23,9 @@ import stat import string import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) @@ -48,16 +49,16 @@ def __virtual__(): Only work on POSIX-like systems, which have parted and lsblk installed. These are usually provided by the ``parted`` and ``util-linux`` packages. ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The parted execution module failed to load ' 'Windows systems are not supported.') - if not salt.utils.which('parted'): + if not salt.utils.path.which('parted'): return (False, 'The parted execution module failed to load ' 'parted binary is not in the path.') - if not salt.utils.which('lsblk'): + if not salt.utils.path.which('lsblk'): return (False, 'The parted execution module failed to load ' 'lsblk binary is not in the path.') - if not salt.utils.which('partprobe'): + if not salt.utils.path.which('partprobe'): return (False, 'The parted execution module failed to load ' 'partprobe binary is not in the path.') return __virtualname__ @@ -216,7 +217,7 @@ def align_check(device, part_type, partition): 'Invalid partition passed to partition.align_check' ) - cmd = 'parted -m -s {0} align-check {1} {2}'.format( + cmd = 'parted -m {0} align-check {1} {2}'.format( device, part_type, partition ) out = __salt__['cmd.run'](cmd).splitlines() @@ -388,7 +389,7 @@ def mkfs(device, fs_type): else: mkfs_cmd = 'mkfs.{0}'.format(fs_type) - if not salt.utils.which(mkfs_cmd): + if not salt.utils.path.which(mkfs_cmd): return 'Error: {0} is unavailable.'.format(mkfs_cmd) cmd = '{0} {1}'.format(mkfs_cmd, device) out = __salt__['cmd.run'](cmd).splitlines() diff --git a/salt/modules/pcs.py b/salt/modules/pcs.py index 02153c12e0..d99bc29ea5 100644 --- a/salt/modules/pcs.py +++ b/salt/modules/pcs.py @@ -14,14 +14,15 @@ from __future__ import absolute_import # Import salt libs import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six def __virtual__(): ''' Only load if pcs package is installed ''' - if salt.utils.which('pcs'): + if salt.utils.path.which('pcs'): return 'pcs' return False diff --git a/salt/modules/pdbedit.py b/salt/modules/pdbedit.py index d8cb4a75a6..a226398719 100644 --- a/salt/modules/pdbedit.py +++ b/salt/modules/pdbedit.py @@ -21,6 +21,7 @@ except ImportError: # Import Salt libs import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -38,7 +39,7 @@ def __virtual__(): ''' Provides pdbedit if available ''' - if salt.utils.which('pdbedit'): + if salt.utils.path.which('pdbedit'): return __virtualname__ return ( False, diff --git a/salt/modules/pecl.py b/salt/modules/pecl.py index edca3bcd56..284ce74e49 100644 --- a/salt/modules/pecl.py +++ b/salt/modules/pecl.py @@ -15,9 +15,10 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __func_alias__ = { 'list_': 'list' @@ -30,7 +31,7 @@ __virtualname__ = 'pecl' def __virtual__(): - if salt.utils.which('pecl'): + if salt.utils.path.which('pecl'): return __virtualname__ return (False, 'The pecl execution module not loaded: ' 'pecl binary is not in the path.') diff --git a/salt/modules/pillar.py b/salt/modules/pillar.py index d0beb92a06..a81892c069 100644 --- a/salt/modules/pillar.py +++ b/salt/modules/pillar.py @@ -13,7 +13,7 @@ import os import copy import logging import yaml -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.pillar @@ -257,8 +257,8 @@ def items(*args, **kwargs): __opts__, __grains__, __opts__['id'], - pillar_override=kwargs.get('pillar'), - pillarenv=kwargs.get('pillarenv') or __opts__['pillarenv']) + pillar_override=pillar_override, + pillarenv=pillarenv) return pillar.compile_pillar() diff --git a/salt/modules/pip.py b/salt/modules/pip.py index 218ec4c379..31a59458ee 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py @@ -82,13 +82,15 @@ import re import shutil import logging import sys - -# Import salt libs -import salt.utils -import salt.utils.files import tempfile + +# Import Salt libs +import salt.utils # Can be removed once compare_dicts is moved +import salt.utils.files import salt.utils.locales +import salt.utils.platform import salt.utils.url +import salt.utils.versions from salt.ext import six from salt.exceptions import CommandExecutionError, CommandNotFoundError @@ -125,7 +127,7 @@ def _get_pip_bin(bin_env): 'pip{0}'.format(sys.version_info[0]), 'pip', 'pip-python'] ) - if salt.utils.is_windows() and six.PY2: + if salt.utils.platform.is_windows() and six.PY2: which_result.encode('string-escape') if which_result is None: raise CommandNotFoundError('Could not find a `pip` binary') @@ -133,7 +135,7 @@ def _get_pip_bin(bin_env): # try to get pip bin from virtualenv, bin_env if os.path.isdir(bin_env): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if six.PY2: pip_bin = os.path.join( bin_env, 'Scripts', 'pip.exe').encode('string-escape') @@ -192,7 +194,7 @@ def _get_env_activate(bin_env): raise CommandNotFoundError('Could not find a `activate` binary') if os.path.isdir(bin_env): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): activate_bin = os.path.join(bin_env, 'Scripts', 'activate.bat') else: activate_bin = os.path.join(bin_env, 'bin', 'activate') @@ -576,7 +578,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914 if use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): logger.error( ('The --use-wheel option is only supported in pip {0} and ' @@ -589,7 +591,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914 if no_use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): logger.error( ('The --no-use-wheel option is only supported in pip {0} and ' @@ -662,7 +664,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914 if mirrors: # https://github.com/pypa/pip/pull/2641/files#diff-3ef137fb9ffdd400f117a565cd94c188L216 pip_version = version(pip_bin) - if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='7.0.0'): + if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'): raise CommandExecutionError( 'pip >= 7.0.0 does not support mirror argument:' ' use index_url and/or extra_index_url instead' @@ -689,7 +691,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914 cmd.extend(['--download', download]) if download_cache or cache_dir: - cmd.extend(['--cache-dir' if salt.utils.compare_versions( + cmd.extend(['--cache-dir' if salt.utils.versions.compare( ver1=version(bin_env), oper='>=', ver2='6.0' ) else '--download-cache', download_cache or cache_dir]) @@ -730,7 +732,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914 pip_version = version(pip_bin) # From pip v1.4 the --pre flag is available - if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='1.4'): + if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='1.4'): cmd.append('--pre') if cert: @@ -1006,7 +1008,7 @@ def freeze(bin_env=None, # Include pip, setuptools, distribute, wheel min_version = '8.0.3' cur_version = version(bin_env) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): logger.warning( ('The version of pip installed is {0}, which is older than {1}. ' diff --git a/salt/modules/pkg_resource.py b/salt/modules/pkg_resource.py index 928dccae7d..1e156f7e42 100644 --- a/salt/modules/pkg_resource.py +++ b/salt/modules/pkg_resource.py @@ -5,6 +5,7 @@ Resources needed by pkg providers # Import python libs from __future__ import absolute_import +import copy import fnmatch import logging import os @@ -12,10 +13,11 @@ import pprint # Import third party libs import yaml -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.utils +import salt.utils.versions from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) @@ -104,7 +106,7 @@ def parse_targets(name=None, salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'__env__\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -306,3 +308,31 @@ def check_extra_requirements(pkgname, pkgver): return __salt__['pkg.check_extra_requirements'](pkgname, pkgver) return True + + +def format_pkg_list(packages, versions_as_list, attr): + ''' + Formats packages according to parameters for list_pkgs. + ''' + ret = copy.deepcopy(packages) + if attr: + requested_attr = set(['version', 'arch', 'install_date', 'install_date_time_t']) + + if attr != 'all': + requested_attr &= set(attr + ['version']) + + for name in ret: + versions = [] + for all_attr in ret[name]: + filtered_attr = {} + for key in requested_attr: + filtered_attr[key] = all_attr[key] + versions.append(filtered_attr) + ret[name] = versions + return ret + + for name in ret: + ret[name] = [d['version'] for d in ret[name]] + if not versions_as_list: + stringify(ret) + return ret diff --git a/salt/modules/pkgin.py b/salt/modules/pkgin.py index fa702834e7..a5f253cc16 100644 --- a/salt/modules/pkgin.py +++ b/salt/modules/pkgin.py @@ -18,12 +18,13 @@ import re # Import salt libs import salt.utils +import salt.utils.path import salt.utils.pkg import salt.utils.decorators as decorators from salt.exceptions import CommandExecutionError, MinionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six VERSION_MATCH = re.compile(r'pkgin(?:[\s]+)([\d.]+)(?:[\s]+)(?:.*)') log = logging.getLogger(__name__) @@ -37,7 +38,7 @@ def _check_pkgin(): ''' Looks to see if pkgin is present on the system, return full path ''' - ppath = salt.utils.which('pkgin') + ppath = salt.utils.path.which('pkgin') if ppath is None: # pkgin was not found in $PATH, try to find it via LOCALBASE try: diff --git a/salt/modules/pkgng.py b/salt/modules/pkgng.py index 606e80f654..dc20541f94 100644 --- a/salt/modules/pkgng.py +++ b/salt/modules/pkgng.py @@ -48,8 +48,9 @@ import salt.utils import salt.utils.files import salt.utils.itertools import salt.utils.pkg +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -297,7 +298,7 @@ def latest_version(*names, **kwargs): root = kwargs.get('root') pkgs = list_pkgs(versions_as_list=True, jail=jail, chroot=chroot, root=root) - if salt.utils.compare_versions(_get_pkgng_version(jail, chroot, root), '>=', '1.6.0'): + if salt.utils.versions.compare(_get_pkgng_version(jail, chroot, root), '>=', '1.6.0'): quiet = True else: quiet = False @@ -329,7 +330,7 @@ def latest_version(*names, **kwargs): ret[name] = pkgver else: if not any( - (salt.utils.compare_versions(ver1=x, + (salt.utils.versions.compare(ver1=x, oper='>=', ver2=pkgver) for x in installed) diff --git a/salt/modules/pkgutil.py b/salt/modules/pkgutil.py index 9dd294b7e0..9b16ea56fa 100644 --- a/salt/modules/pkgutil.py +++ b/salt/modules/pkgutil.py @@ -16,8 +16,9 @@ import copy # Import salt libs import salt.utils import salt.utils.pkg +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError -import salt.ext.six as six +from salt.ext import six # Define the module's virtual name __virtualname__ = 'pkgutil' @@ -27,7 +28,7 @@ def __virtual__(): ''' Set the virtual pkg module if the os is Solaris ''' - if __grains__['os'] == 'Solaris': + if __grains__['os_family'] == 'Solaris': return __virtualname__ return (False, 'The pkgutil execution module cannot be loaded: ' 'only available on Solaris systems.') @@ -227,7 +228,7 @@ def latest_version(*names, **kwargs): if name in names: cver = pkgs.get(name, '') nver = version_rev.split(',')[0] - if not cver or salt.utils.compare_versions(ver1=cver, + if not cver or salt.utils.versions.compare(ver1=cver, oper='<', ver2=nver): # Remove revision for version comparison diff --git a/salt/modules/portage_config.py b/salt/modules/portage_config.py index 4028a7e4c0..2a05ebb451 100644 --- a/salt/modules/portage_config.py +++ b/salt/modules/portage_config.py @@ -13,7 +13,7 @@ import shutil import salt.utils.files # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import portage diff --git a/salt/modules/postfix.py b/salt/modules/postfix.py index 0f1590590a..10c57722a5 100644 --- a/salt/modules/postfix.py +++ b/salt/modules/postfix.py @@ -18,8 +18,11 @@ import re import logging # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path + +# Import 3rd-party libs +from salt.ext import six SWWS = re.compile(r'^\s') @@ -33,7 +36,7 @@ def __virtual__(): ''' Only load the module if Postfix is installed ''' - if salt.utils.which('postfix'): + if salt.utils.path.which('postfix'): return True return (False, 'postfix execution module not loaded: postfix not installed.') @@ -239,7 +242,7 @@ def _parse_main(path=MAIN_CF): # This should only happen at the top of the file conf_list.append(line) continue - if not isinstance(conf_list[-1], str): + if not isinstance(conf_list[-1], six.string_types): conf_list[-1] = '' # This line is a continuation of the previous line conf_list[-1] = '\n'.join([conf_list[-1], line]) diff --git a/salt/modules/postgres.py b/salt/modules/postgres.py index 4a58c5d988..e0fd6d203c 100644 --- a/salt/modules/postgres.py +++ b/salt/modules/postgres.py @@ -47,15 +47,16 @@ except ImportError: HAS_CSV = False # Import salt libs -import salt.utils import salt.utils.files import salt.utils.itertools import salt.utils.odict +import salt.utils.path +import salt.utils.stringutils from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import StringIO @@ -121,7 +122,7 @@ def __virtual__(): if not HAS_CSV: return False for util in utils: - if not salt.utils.which(util): + if not salt.utils.path.which(util): if not _find_pg_binary(util): return (False, '{0} was not found'.format(util)) return True @@ -134,10 +135,10 @@ def _find_pg_binary(util): Helper function to locate various psql related binaries ''' pg_bin_dir = __salt__['config.option']('postgres.bins_dir') - util_bin = salt.utils.which(util) + util_bin = salt.utils.path.which(util) if not util_bin: if pg_bin_dir: - return salt.utils.which(os.path.join(pg_bin_dir, util)) + return salt.utils.path.which(os.path.join(pg_bin_dir, util)) else: return util_bin @@ -1006,7 +1007,7 @@ def _maybe_encrypt_password(role, password = str(password) if encrypted and password and not password.startswith('md5'): password = "md5{0}".format( - hashlib.md5(salt.utils.to_bytes('{0}{1}'.format(password, role))).hexdigest()) + hashlib.md5(salt.utils.stringutils.to_bytes('{0}{1}'.format(password, role))).hexdigest()) return password @@ -1955,7 +1956,7 @@ def schema_create(dbname, name, owner=None, ''' # check if schema exists - if schema_exists(dbname, name, + if schema_exists(dbname, name, user=user, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('\'{0}\' already exists in \'{1}\''.format(name, dbname)) @@ -2010,7 +2011,7 @@ def schema_remove(dbname, name, ''' # check if schema exists - if not schema_exists(dbname, name, + if not schema_exists(dbname, name, user=None, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): log.info('Schema \'{0}\' does not exist in \'{1}\''.format(name, dbname)) @@ -2024,7 +2025,7 @@ def schema_remove(dbname, name, maintenance_db=dbname, host=db_host, user=db_user, port=db_port, password=db_password) - if not schema_exists(dbname, name, + if not schema_exists(dbname, name, user, db_user=db_user, db_password=db_password, db_host=db_host, db_port=db_port): return True @@ -2033,7 +2034,7 @@ def schema_remove(dbname, name, return False -def schema_exists(dbname, name, +def schema_exists(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' @@ -2051,6 +2052,9 @@ def schema_exists(dbname, name, name Schema name we look for + user + The system user the operation should be performed on behalf of + db_user database username if different from config or default @@ -2065,14 +2069,14 @@ def schema_exists(dbname, name, ''' return bool( - schema_get(dbname, name, + schema_get(dbname, name, user=user, db_user=db_user, db_host=db_host, db_port=db_port, db_password=db_password)) -def schema_get(dbname, name, +def schema_get(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' @@ -2090,6 +2094,9 @@ def schema_get(dbname, name, name Schema name we look for + user + The system user the operation should be performed on behalf of + db_user database username if different from config or default @@ -2102,7 +2109,7 @@ def schema_get(dbname, name, db_port Database port if different from config or default ''' - all_schemas = schema_list(dbname, + all_schemas = schema_list(dbname, user=user, db_user=db_user, db_host=db_host, db_port=db_port, @@ -2114,7 +2121,7 @@ def schema_get(dbname, name, return False -def schema_list(dbname, +def schema_list(dbname, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' @@ -2129,6 +2136,9 @@ def schema_list(dbname, dbname Database name we query on + user + The system user the operation should be performed on behalf of + db_user database username if different from config or default @@ -2153,7 +2163,7 @@ def schema_list(dbname, 'LEFT JOIN pg_roles ON pg_roles.oid = pg_namespace.nspowner ' ])) - rows = psql_query(query, + rows = psql_query(query, runas=user, host=db_host, user=db_user, port=db_port, diff --git a/salt/modules/poudriere.py b/salt/modules/poudriere.py index faa3468c20..ac4215c3b9 100644 --- a/salt/modules/poudriere.py +++ b/salt/modules/poudriere.py @@ -9,8 +9,8 @@ import os import logging # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -19,7 +19,7 @@ def __virtual__(): ''' Module load on freebsd only and if poudriere installed ''' - if __grains__['os'] == 'FreeBSD' and salt.utils.which('poudriere'): + if __grains__['os'] == 'FreeBSD' and salt.utils.path.which('poudriere'): return 'poudriere' else: return (False, 'The poudriere execution module failed to load: only available on FreeBSD with the poudriere binary in the path.') diff --git a/salt/modules/proxy.py b/salt/modules/proxy.py index d5e88634e4..2bdd0c9283 100644 --- a/salt/modules/proxy.py +++ b/salt/modules/proxy.py @@ -12,8 +12,8 @@ from __future__ import absolute_import import logging import re -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'proxy' @@ -23,7 +23,7 @@ def __virtual__(): ''' Only work on Mac OS and Windows ''' - if salt.utils.is_darwin() or salt.utils.is_windows(): + if salt.utils.platform.is_darwin() or salt.utils.platform.is_windows(): return True return (False, 'Module proxy: module only works on Windows or MacOS systems') diff --git a/salt/modules/ps.py b/salt/modules/ps.py index cd5a99b6d3..b2625c4811 100644 --- a/salt/modules/ps.py +++ b/salt/modules/ps.py @@ -17,8 +17,8 @@ import re from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs -import salt.utils.decorators as decorators -import salt.ext.six as six +import salt.utils.decorators.path +from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil @@ -656,7 +656,7 @@ def lsof(name): return ret -@decorators.which('netstat') +@salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. @@ -678,7 +678,7 @@ def netstat(name): return ret -@decorators.which('ss') +@salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. diff --git a/salt/modules/publish.py b/salt/modules/publish.py index cd43ead5f5..680e23dcd2 100644 --- a/salt/modules/publish.py +++ b/salt/modules/publish.py @@ -13,6 +13,7 @@ import salt.crypt import salt.payload import salt.transport import salt.utils.args +import salt.utils.versions from salt.exceptions import SaltReqTimeoutError, SaltInvocationError log = logging.getLogger(__name__) @@ -252,7 +253,7 @@ def publish(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -302,7 +303,7 @@ def full_data(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/modules/puppet.py b/salt/modules/puppet.py index fb0eae0c34..58b3963c8c 100644 --- a/salt/modules/puppet.py +++ b/salt/modules/puppet.py @@ -11,13 +11,15 @@ import os import datetime # Import salt libs -import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range log = logging.getLogger(__name__) @@ -27,7 +29,7 @@ def __virtual__(): Only load if puppet is installed ''' unavailable_exes = ', '.join(exe for exe in ('facter', 'puppet') - if salt.utils.which(exe) is None) + if salt.utils.path.which(exe) is None) if unavailable_exes: return (False, ('The puppet execution module cannot be loaded: ' @@ -62,7 +64,7 @@ class _Puppet(object): self.kwargs = {'color': 'false'} # e.g. --tags=apache::server self.args = [] # e.g. --noop - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): self.vardir = 'C:\\ProgramData\\PuppetLabs\\puppet\\var' self.rundir = 'C:\\ProgramData\\PuppetLabs\\puppet\\run' self.confdir = 'C:\\ProgramData\\PuppetLabs\\puppet\\etc' @@ -165,7 +167,7 @@ def run(*args, **kwargs): # args will exist as an empty list even if none have been provided puppet.arguments(buildargs) - puppet.kwargs.update(salt.utils.clean_kwargs(**kwargs)) + puppet.kwargs.update(salt.utils.args.clean_kwargs(**kwargs)) ret = __salt__['cmd.run_all'](repr(puppet), python_shell=puppet.useshell) if ret['retcode'] in [0, 2]: @@ -346,9 +348,10 @@ def summary(): def plugin_sync(): ''' - Runs a plugin synch between the puppet master and agent + Runs a plugin sync between the puppet master and agent CLI Example: + .. code-block:: bash salt '*' puppet.plugin_sync diff --git a/salt/modules/pw_group.py b/salt/modules/pw_group.py index 182abb5470..143b3a1495 100644 --- a/salt/modules/pw_group.py +++ b/salt/modules/pw_group.py @@ -15,6 +15,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.args log = logging.getLogger(__name__) @@ -49,7 +50,7 @@ def add(name, gid=None, **kwargs): salt '*' group.add foo 3456 ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if salt.utils.is_true(kwargs.pop('system', False)): log.warning('pw_group module does not support the \'system\' argument') if kwargs: diff --git a/salt/modules/pw_user.py b/salt/modules/pw_user.py index 41fbd181cf..c63b97b7b1 100644 --- a/salt/modules/pw_user.py +++ b/salt/modules/pw_user.py @@ -44,12 +44,13 @@ except ImportError: HAS_PWD = False # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.utils +import salt.utils.args +import salt.utils.locales from salt.exceptions import CommandExecutionError -from salt.utils import locales log = logging.getLogger(__name__) @@ -82,10 +83,10 @@ def _get_gecos(name): # Assign empty strings for any unspecified trailing GECOS fields while len(gecos_field) < 4: gecos_field.append('') - return {'fullname': locales.sdecode(gecos_field[0]), - 'roomnumber': locales.sdecode(gecos_field[1]), - 'workphone': locales.sdecode(gecos_field[2]), - 'homephone': locales.sdecode(gecos_field[3])} + return {'fullname': salt.utils.locales.sdecode(gecos_field[0]), + 'roomnumber': salt.utils.locales.sdecode(gecos_field[1]), + 'workphone': salt.utils.locales.sdecode(gecos_field[2]), + 'homephone': salt.utils.locales.sdecode(gecos_field[3])} def _build_gecos(gecos_dict): @@ -141,7 +142,7 @@ def add(name, salt '*' user.add name ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if salt.utils.is_true(kwargs.pop('system', False)): log.warning('pw_user module does not support the \'system\' argument') if kwargs: diff --git a/salt/modules/qemu_img.py b/salt/modules/qemu_img.py index 9e37646309..8c947d78c9 100644 --- a/salt/modules/qemu_img.py +++ b/salt/modules/qemu_img.py @@ -14,13 +14,14 @@ import os # Import salt libs import salt.utils +import salt.utils.path def __virtual__(): ''' Only load if qemu-img is installed ''' - if salt.utils.which('qemu-img'): + if salt.utils.path.which('qemu-img'): return 'qemu_img' return (False, 'The qemu_img execution module cannot be loaded: the qemu-img binary is not in the path.') diff --git a/salt/modules/qemu_nbd.py b/salt/modules/qemu_nbd.py index b30135ce45..f5ffa3b36b 100644 --- a/salt/modules/qemu_nbd.py +++ b/salt/modules/qemu_nbd.py @@ -16,10 +16,11 @@ import logging # Import salt libs import salt.utils +import salt.utils.path import salt.crypt # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Set up logging log = logging.getLogger(__name__) @@ -29,7 +30,7 @@ def __virtual__(): ''' Only load if qemu-img and qemu-nbd are installed ''' - if salt.utils.which('qemu-nbd'): + if salt.utils.path.which('qemu-nbd'): return 'qemu_nbd' return (False, 'The qemu_nbd execution module cannot be loaded: the qemu-nbd binary is not in the path.') @@ -49,7 +50,7 @@ def connect(image): '{0} does not exist'.format(image)) return '' - if salt.utils.which('sfdisk'): + if salt.utils.path.which('sfdisk'): fdisk = 'sfdisk -d' else: fdisk = 'fdisk -l' diff --git a/salt/modules/quota.py b/salt/modules/quota.py index 610e5cdf16..e7de0b5e14 100644 --- a/salt/modules/quota.py +++ b/salt/modules/quota.py @@ -8,7 +8,8 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) @@ -24,9 +25,14 @@ def __virtual__(): ''' Only work on POSIX-like systems with setquota binary available ''' - if not salt.utils.is_windows() and salt.utils.which('setquota'): + if not salt.utils.platform.is_windows() \ + and salt.utils.path.which('setquota'): return 'quota' - return (False, 'The quota execution module cannot be loaded: the module is only available on POSIX-like systems with the setquota binary available.') + return ( + False, + 'The quota execution module cannot be loaded: the module is only ' + 'available on POSIX-like systems with the setquota binary available.' + ) def report(mount): diff --git a/salt/modules/rabbitmq.py b/salt/modules/rabbitmq.py index b55f2dd173..e6338e7a55 100644 --- a/salt/modules/rabbitmq.py +++ b/salt/modules/rabbitmq.py @@ -18,7 +18,9 @@ import string # Import salt libs import salt.utils import salt.utils.itertools -import salt.ext.six as six +import salt.utils.path +import salt.utils.platform +from salt.ext import six from salt.exceptions import SaltInvocationError from salt.ext.six.moves import range from salt.exceptions import CommandExecutionError @@ -36,7 +38,7 @@ def __virtual__(): global RABBITMQCTL global RABBITMQ_PLUGINS - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): from salt.ext.six.moves import winreg key = None try: @@ -71,8 +73,8 @@ def __virtual__(): if key is not None: winreg.CloseKey(key) else: - RABBITMQCTL = salt.utils.which('rabbitmqctl') - RABBITMQ_PLUGINS = salt.utils.which('rabbitmq-plugins') + RABBITMQCTL = salt.utils.path.which('rabbitmqctl') + RABBITMQ_PLUGINS = salt.utils.path.which('rabbitmq-plugins') if not RABBITMQCTL: return (False, 'Module rabbitmq: module only works when RabbitMQ is installed') @@ -135,6 +137,7 @@ def _safe_output(line): ''' return not any([ line.startswith('Listing') and line.endswith('...'), + line.startswith('Listing') and '\t' not in line, '...done' in line, line.startswith('WARNING:') ]) @@ -218,7 +221,7 @@ def list_users(runas=None): # Windows runas currently requires a password. # Due to this, don't use a default value for # runas in Windows. - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_users', '-q'], @@ -241,7 +244,7 @@ def list_vhosts(runas=None): salt '*' rabbitmq.list_vhosts ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_vhosts', '-q'], @@ -261,7 +264,7 @@ def user_exists(name, runas=None): salt '*' rabbitmq.user_exists rabbit_user ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() return name in list_users(runas=runas) @@ -276,7 +279,7 @@ def vhost_exists(name, runas=None): salt '*' rabbitmq.vhost_exists rabbit_host ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() return name in list_vhosts(runas=runas) @@ -299,10 +302,10 @@ def add_user(name, password=None, runas=None): password = ''.join(random.SystemRandom().choice( string.ascii_uppercase + string.digits) for x in range(15)) - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # On Windows, if the password contains a special character # such as '|', normal execution will fail. For example: # cmd: rabbitmq.add_user abc "asdf|def" @@ -347,7 +350,7 @@ def delete_user(name, runas=None): salt '*' rabbitmq.delete_user rabbit_user ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'delete_user', name], @@ -368,9 +371,9 @@ def change_password(name, password, runas=None): salt '*' rabbitmq.change_password rabbit_user password ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # On Windows, if the password contains a special character # such as '|', normal execution will fail. For example: # cmd: rabbitmq.add_user abc "asdf|def" @@ -404,7 +407,7 @@ def clear_password(name, runas=None): salt '*' rabbitmq.clear_password rabbit_user ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'clear_password', name], @@ -429,7 +432,7 @@ def check_password(name, password, runas=None): ''' # try to get the rabbitmq-version - adapted from _get_rabbitmq_plugin - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() try: @@ -448,7 +451,7 @@ def check_password(name, password, runas=None): # rabbitmq introduced a native api to check a username and password in version 3.5.7. if tuple(version) >= (3, 5, 7): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # On Windows, if the password contains a special character # such as '|', normal execution will fail. For example: # cmd: rabbitmq.add_user abc "asdf|def" @@ -504,7 +507,7 @@ def add_vhost(vhost, runas=None): salt '*' rabbitmq add_vhost '' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'add_vhost', vhost], @@ -525,7 +528,7 @@ def delete_vhost(vhost, runas=None): salt '*' rabbitmq.delete_vhost '' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'delete_vhost', vhost], @@ -545,7 +548,7 @@ def set_permissions(vhost, user, conf='.*', write='.*', read='.*', runas=None): salt '*' rabbitmq.set_permissions 'myvhost' 'myuser' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'set_permissions', '-p', @@ -566,7 +569,7 @@ def list_permissions(vhost, runas=None): salt '*' rabbitmq.list_permissions '/myvhost' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_permissions', '-q', '-p', vhost], @@ -586,7 +589,7 @@ def list_user_permissions(name, runas=None): salt '*' rabbitmq.list_user_permissions 'user'. ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_user_permissions', name, '-q'], @@ -605,14 +608,14 @@ def set_user_tags(name, tags, runas=None): salt '*' rabbitmq.set_user_tags 'myadmin' 'administrator' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() - if tags and isinstance(tags, (list, tuple)): - tags = ' '.join(tags) + if not isinstance(tags, (list, tuple)): + tags = [tags] res = __salt__['cmd.run_all']( - [RABBITMQCTL, 'set_user_tags', name, tags], + [RABBITMQCTL, 'set_user_tags', name] + list(tags), runas=runas, python_shell=False) msg = "Tag(s) set" @@ -629,7 +632,7 @@ def status(runas=None): salt '*' rabbitmq.status ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'status'], @@ -649,7 +652,7 @@ def cluster_status(runas=None): salt '*' rabbitmq.cluster_status ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'cluster_status'], @@ -674,7 +677,7 @@ def join_cluster(host, user='rabbit', ram_node=None, runas=None): cmd.append('--ram') cmd.append('{0}@{1}'.format(user, host)) - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() stop_app(runas) res = __salt__['cmd.run_all'](cmd, runas=runas, python_shell=False) @@ -693,7 +696,7 @@ def stop_app(runas=None): salt '*' rabbitmq.stop_app ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'stop_app'], @@ -713,7 +716,7 @@ def start_app(runas=None): salt '*' rabbitmq.start_app ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'start_app'], @@ -733,7 +736,7 @@ def reset(runas=None): salt '*' rabbitmq.reset ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'reset'], @@ -753,7 +756,7 @@ def force_reset(runas=None): salt '*' rabbitmq.force_reset ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'force_reset'], @@ -773,7 +776,7 @@ def list_queues(runas=None, *args): salt '*' rabbitmq.list_queues messages consumers ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [RABBITMQCTL, 'list_queues', '-q'] cmd.extend(args) @@ -795,7 +798,7 @@ def list_queues_vhost(vhost, runas=None, *args): salt '*' rabbitmq.list_queues messages consumers ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [RABBITMQCTL, 'list_queues', '-q', '-p', vhost] cmd.extend(args) @@ -818,7 +821,7 @@ def list_policies(vhost="/", runas=None): salt '*' rabbitmq.list_policies' ''' ret = {} - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'list_policies', '-q', '-p', vhost], @@ -860,7 +863,7 @@ def set_policy(vhost, name, pattern, definition, priority=None, runas=None): salt '*' rabbitmq.set_policy / HA '.*' '{"ha-mode":"all"}' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() if isinstance(definition, dict): definition = json.dumps(definition) @@ -889,7 +892,7 @@ def delete_policy(vhost, name, runas=None): salt '*' rabbitmq.delete_policy / HA' ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() res = __salt__['cmd.run_all']( [RABBITMQCTL, 'clear_policy', '-p', vhost, name], @@ -911,7 +914,7 @@ def policy_exists(vhost, name, runas=None): salt '*' rabbitmq.policy_exists / HA ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() policies = list_policies(runas=runas) return bool(vhost in policies and name in policies[vhost]) @@ -927,7 +930,7 @@ def list_available_plugins(runas=None): salt '*' rabbitmq.list_available_plugins ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [_get_rabbitmq_plugin(), 'list', '-m'] ret = __salt__['cmd.run_all'](cmd, python_shell=False, runas=runas) @@ -945,7 +948,7 @@ def list_enabled_plugins(runas=None): salt '*' rabbitmq.list_enabled_plugins ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [_get_rabbitmq_plugin(), 'list', '-m', '-e'] ret = __salt__['cmd.run_all'](cmd, python_shell=False, runas=runas) @@ -963,7 +966,7 @@ def plugin_is_enabled(name, runas=None): salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() return name in list_enabled_plugins(runas) @@ -978,7 +981,7 @@ def enable_plugin(name, runas=None): salt '*' rabbitmq.enable_plugin foo ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [_get_rabbitmq_plugin(), 'enable', name] ret = __salt__['cmd.run_all'](cmd, runas=runas, python_shell=False) @@ -995,7 +998,7 @@ def disable_plugin(name, runas=None): salt '*' rabbitmq.disable_plugin foo ''' - if runas is None and not salt.utils.is_windows(): + if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.get_user() cmd = [_get_rabbitmq_plugin(), 'disable', name] ret = __salt__['cmd.run_all'](cmd, runas=runas, python_shell=False) diff --git a/salt/modules/raet_publish.py b/salt/modules/raet_publish.py index 99aa062926..294a1d84c3 100644 --- a/salt/modules/raet_publish.py +++ b/salt/modules/raet_publish.py @@ -12,6 +12,7 @@ import logging import salt.payload import salt.transport import salt.utils.args +import salt.utils.versions from salt.exceptions import SaltReqTimeoutError log = logging.getLogger(__name__) @@ -166,7 +167,7 @@ def publish(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -214,7 +215,7 @@ def full_data(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/modules/rbac_solaris.py b/salt/modules/rbac_solaris.py index 881a7b4b65..5027d4e2c8 100644 --- a/salt/modules/rbac_solaris.py +++ b/salt/modules/rbac_solaris.py @@ -8,8 +8,8 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.path log = logging.getLogger(__name__) @@ -21,7 +21,7 @@ def __virtual__(): ''' Provides rbac if we are running on a solaris like system ''' - if __grains__['kernel'] == 'SunOS' and salt.utils.which('profiles'): + if __grains__['kernel'] == 'SunOS' and salt.utils.path.which('profiles'): return __virtualname__ return ( False, diff --git a/salt/modules/rbenv.py b/salt/modules/rbenv.py index cdae6e9d9a..bcbefa1582 100644 --- a/salt/modules/rbenv.py +++ b/salt/modules/rbenv.py @@ -18,10 +18,12 @@ import logging # Import Salt libs import salt.utils +import salt.utils.args +import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Set up logger log = logging.getLogger(__name__) @@ -40,18 +42,18 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The rbenv execution module failed to load: only available on non-Windows systems.') return True def _shlex_split(s): - # from python:salt.utils.shlex_split: passing None for s will read + # from python:salt.utils.args.shlex_split: passing None for s will read # the string to split from standard input. if s is None: - ret = salt.utils.shlex_split('') + ret = salt.utils.args.shlex_split('') else: - ret = salt.utils.shlex_split(s) + ret = salt.utils.args.shlex_split(s) return ret @@ -381,9 +383,9 @@ def do(cmdline, runas=None, env=None): env['PATH'] = '{0}/shims:{1}'.format(path, os.environ['PATH']) try: - cmdline = salt.utils.shlex_split(cmdline) + cmdline = salt.utils.args.shlex_split(cmdline) except AttributeError: - cmdline = salt.utils.shlex_split(str(cmdline)) + cmdline = salt.utils.args.shlex_split(str(cmdline)) result = __salt__['cmd.run_all']( cmdline, @@ -417,9 +419,9 @@ def do_with_ruby(ruby, cmdline, runas=None): raise SaltInvocationError('Command must be specified') try: - cmdline = salt.utils.shlex_split(cmdline) + cmdline = salt.utils.args.shlex_split(cmdline) except AttributeError: - cmdline = salt.utils.shlex_split(str(cmdline)) + cmdline = salt.utils.args.shlex_split(str(cmdline)) env = {} if ruby: diff --git a/salt/modules/rdp.py b/salt/modules/rdp.py index 800fa2d387..3840570729 100644 --- a/salt/modules/rdp.py +++ b/salt/modules/rdp.py @@ -8,9 +8,9 @@ from __future__ import absolute_import import logging import re -# Import salt libs +# Import Salt libs +import salt.utils.platform from salt.utils.decorators import depends -import salt.utils try: from pywintypes import error as PyWinError @@ -26,7 +26,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return 'rdp' return (False, 'Module only works on Windows.') diff --git a/salt/modules/reg.py b/salt/modules/reg.py index 5990ea804b..04f1cdbba5 100644 --- a/salt/modules/reg.py +++ b/salt/modules/reg.py @@ -47,8 +47,8 @@ try: except ImportError: HAS_WINDOWS_MODULES = False -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError PY2 = sys.version_info[0] == 2 @@ -62,7 +62,7 @@ def __virtual__(): ''' Only works on Windows systems with the _winreg python module ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return (False, 'reg execution module failed to load: ' 'The module will only run on Windows systems') @@ -150,7 +150,9 @@ class Registry(object): # pylint: disable=R0903 _winreg.REG_DWORD: 'REG_DWORD', _winreg.REG_EXPAND_SZ: 'REG_EXPAND_SZ', _winreg.REG_MULTI_SZ: 'REG_MULTI_SZ', - _winreg.REG_SZ: 'REG_SZ' + _winreg.REG_SZ: 'REG_SZ', + # REG_QWORD isn't in the winreg library + 11: 'REG_QWORD' } self.opttype_reverse = { _winreg.REG_OPTION_NON_VOLATILE: 'REG_OPTION_NON_VOLATILE', diff --git a/salt/modules/rest_package.py b/salt/modules/rest_package.py index 8baeabab08..a0bb1abed8 100644 --- a/salt/modules/rest_package.py +++ b/salt/modules/rest_package.py @@ -7,6 +7,7 @@ from __future__ import absolute_import # Import python libs import logging import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -20,12 +21,21 @@ def __virtual__(): Only work on systems that are a proxy minion ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except KeyError: - return (False, 'The rest_package execution module failed to load. Check the proxy key in pillar.') + return ( + False, + 'The rest_package execution module failed to load. Check the ' + 'proxy key in pillar.' + ) - return (False, 'The rest_package execution module failed to load: only works on a rest_sample proxy minion.') + return ( + False, + 'The rest_package execution module failed to load: only works on a ' + 'rest_sample proxy minion.' + ) def list_pkgs(versions_as_list=False, **kwargs): diff --git a/salt/modules/rest_service.py b/salt/modules/rest_service.py index fd06a7ef51..78260596ca 100644 --- a/salt/modules/rest_service.py +++ b/salt/modules/rest_service.py @@ -2,14 +2,15 @@ ''' Provide the service module for the proxy-minion REST sample ''' -# Import python libs +# Import Python libs from __future__ import absolute_import -import salt.utils - import logging import fnmatch import re +# Import Salt libs +import salt.utils.platform + log = logging.getLogger(__name__) __func_alias__ = { @@ -26,12 +27,21 @@ def __virtual__(): Only work on systems that are a proxy minion ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except KeyError: - return (False, 'The rest_service execution module failed to load. Check the proxy key in pillar.') + return ( + False, + 'The rest_service execution module failed to load. Check the ' + 'proxy key in pillar.' + ) - return (False, 'The rest_service execution module failed to load: only works on a rest_sample proxy minion.') + return ( + False, + 'The rest_service execution module failed to load: only works on a ' + 'rest_sample proxy minion.' + ) def get_all(): diff --git a/salt/modules/rh_ip.py b/salt/modules/rh_ip.py index 4502b7479b..33d0955041 100644 --- a/salt/modules/rh_ip.py +++ b/salt/modules/rh_ip.py @@ -14,11 +14,12 @@ import jinja2 import jinja2.exceptions # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net -import salt.ext.six as six +from salt.exceptions import CommandExecutionError +from salt.ext import six # Set up logging log = logging.getLogger(__name__) @@ -667,14 +668,24 @@ def _parse_settings_eth(opts, iface_type, enabled, iface): bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) + + bridgectls = [ + 'net.bridge.bridge-nf-call-ip6tables', + 'net.bridge.bridge-nf-call-iptables', + 'net.bridge.bridge-nf-call-arptables', + ] + if bypassfirewall: - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-ip6tables', '0') - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-iptables', '0') - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-arptables', '0') + sysctl_value = 0 else: - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-ip6tables', '1') - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-iptables', '1') - __salt__['sysctl.persist']('net.bridge.bridge-nf-call-arptables', '1') + sysctl_value = 1 + + for sysctl in bridgectls: + try: + __salt__['sysctl.persist'](sysctl, sysctl_value) + except CommandExecutionError: + log.warning('Failed to set sysctl: {0}'.format(sysctl)) + else: if 'bridge' in opts: result['bridge'] = opts['bridge'] @@ -824,7 +835,7 @@ def _parse_network_settings(opts, current): try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type - quote_type = salt.utils.is_quoted(opts['networking']) + quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) @@ -834,7 +845,7 @@ def _parse_network_settings(opts, current): true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) - networking = salt.utils.dequote(opts['networking']) + networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val @@ -852,12 +863,12 @@ def _parse_network_settings(opts, current): if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( - salt.utils.dequote(opts['hostname']), quote_type) + salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: - nozeroconf = salt.utils.dequote(opts['nozeroconf']) + nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val @@ -869,7 +880,7 @@ def _parse_network_settings(opts, current): for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( - salt.utils.dequote(opts[opt]), quote_type) + salt.utils.stringutils.dequote(opts[opt]), quote_type) return result diff --git a/salt/modules/rh_service.py b/salt/modules/rh_service.py index cd66952900..95d5048791 100644 --- a/salt/modules/rh_service.py +++ b/salt/modules/rh_service.py @@ -19,7 +19,7 @@ import fnmatch import re # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -32,7 +32,7 @@ __virtualname__ = 'service' # Import upstart module if needed HAS_UPSTART = False -if salt.utils.which('initctl'): +if salt.utils.path.which('initctl'): try: # Don't re-invent the wheel, import the helper functions from the # upstart module. diff --git a/salt/modules/riak.py b/salt/modules/riak.py index 5d61fb150c..cb4b3d24f0 100644 --- a/salt/modules/riak.py +++ b/salt/modules/riak.py @@ -5,14 +5,14 @@ Riak Salt Module from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.path def __virtual__(): ''' Only available on systems with Riak installed. ''' - if salt.utils.which('riak'): + if salt.utils.path.which('riak'): return True return (False, 'The riak execution module failed to load: the riak binary is not in the path.') @@ -22,7 +22,7 @@ def __execute_cmd(name, cmd): Execute Riak commands ''' return __salt__['cmd.run_all']( - '{0} {1}'.format(salt.utils.which(name), cmd) + '{0} {1}'.format(salt.utils.path.which(name), cmd) ) diff --git a/salt/modules/rpm.py b/salt/modules/rpm.py index 0e4d889304..0419092251 100644 --- a/salt/modules/rpm.py +++ b/salt/modules/rpm.py @@ -11,10 +11,11 @@ import re import datetime # Import Salt libs -import salt.utils +import salt.utils.decorators.path import salt.utils.itertools -import salt.utils.decorators as decorators +import salt.utils.path import salt.utils.pkg.rpm +import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six @@ -44,7 +45,7 @@ def __virtual__(): ''' Confine this module to rpm based systems ''' - if not salt.utils.which('rpm'): + if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() @@ -420,9 +421,9 @@ def owner(*paths): return ret -@decorators.which('rpm2cpio') -@decorators.which('cpio') -@decorators.which('diff') +@salt.utils.decorators.path.which('rpm2cpio') +@salt.utils.decorators.path.which('cpio') +@salt.utils.decorators.path.which('diff') def diff(package, path): ''' Return a formatted diff between current file and original in a package. @@ -658,7 +659,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False): log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: - if salt.utils.which('rpmdev-vercmp'): + if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): @@ -687,7 +688,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False): elif result['retcode'] == 12: return -1 else: - # We'll need to fall back to salt.utils.version_cmp() + # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' @@ -695,7 +696,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False): result['retcode'], result['stdout'] ) else: - # We'll need to fall back to salt.utils.version_cmp() + # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' @@ -705,8 +706,8 @@ def version_cmp(ver1, ver2, ignore_epoch=False): # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. - (ver1_e, ver1_v, ver1_r) = salt.utils.str_version_to_evr(ver1) - (ver2_e, ver2_v, ver2_r) = salt.utils.str_version_to_evr(ver2) + (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) + (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' @@ -727,7 +728,7 @@ def version_cmp(ver1, ver2, ignore_epoch=False): # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. - return salt.utils.version_cmp(ver1, ver2, ignore_epoch=False) + return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths): diff --git a/salt/modules/rpmbuild.py b/salt/modules/rpmbuild.py index 47b75e1b20..a6687db524 100644 --- a/salt/modules/rpmbuild.py +++ b/salt/modules/rpmbuild.py @@ -23,12 +23,16 @@ import traceback import functools # Import salt libs -from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error from salt.exceptions import SaltInvocationError -import salt.utils +import salt.utils # Can be removed when chugid_and_umask is moved import salt.utils.files +import salt.utils.path import salt.utils.vt +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error + HAS_LIBS = False try: @@ -50,7 +54,7 @@ def __virtual__(): missing_util = False utils_reqd = ['gpg', 'rpm', 'rpmbuild', 'mock', 'createrepo'] for named_util in utils_reqd: - if not salt.utils.which(named_util): + if not salt.utils.path.which(named_util): missing_util = True break @@ -188,7 +192,7 @@ def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base _create_rpmmacros() tree_base = _mk_tree() spec_path = _get_spec(tree_base, spec, template, saltenv) - if isinstance(sources, str): + if isinstance(sources, six.string_types): sources = sources.split(',') for src in sources: _get_src(tree_base, src, saltenv) diff --git a/salt/modules/rsync.py b/salt/modules/rsync.py index 16d9fc4e04..e536b063e4 100644 --- a/salt/modules/rsync.py +++ b/salt/modules/rsync.py @@ -16,8 +16,8 @@ import re import tempfile # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) @@ -29,7 +29,7 @@ def __virtual__(): ''' Only load module if rsync binary is present ''' - if salt.utils.which('rsync'): + if salt.utils.path.which('rsync'): return __virtualname__ return (False, 'The rsync execution module cannot be loaded: ' 'the rsync binary is not in the path.') diff --git a/salt/modules/runit.py b/salt/modules/runit.py index 9a11ba43d2..8fcf19d48a 100644 --- a/salt/modules/runit.py +++ b/salt/modules/runit.py @@ -80,7 +80,8 @@ for service_dir in VALID_SERVICE_DIRS: AVAIL_SVR_DIRS = [] # Define the module's virtual name -__virtualname__ = 'service' +__virtualname__ = 'runit' +__virtual_aliases__ = ('runit',) def __virtual__(): @@ -91,8 +92,12 @@ def __virtual__(): if __grains__.get('init') == 'runit': if __grains__['os'] == 'Void': add_svc_avail_path('/etc/sv') + global __virtualname__ + __virtualname__ = 'service' return __virtualname__ - return False + if salt.utils.which('sv'): + return __virtualname__ + return (False, 'Runit not available. Please install sv') def _service_path(name): diff --git a/salt/modules/rvm.py b/salt/modules/rvm.py index 4a42ed498b..a23f8e050f 100644 --- a/salt/modules/rvm.py +++ b/salt/modules/rvm.py @@ -10,7 +10,7 @@ import os import logging # Import salt libs -import salt.utils +import salt.utils.args from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) @@ -462,7 +462,7 @@ def do(ruby, command, runas=None, cwd=None, env=None): # pylint: disable=C0103 salt '*' rvm.do 2.0.0 ''' try: - command = salt.utils.shlex_split(command) + command = salt.utils.args.shlex_split(command) except AttributeError: - command = salt.utils.shlex_split(str(command)) + command = salt.utils.args.shlex_split(str(command)) return _rvm_do(ruby, command, runas=runas, cwd=cwd, env=env) diff --git a/salt/modules/salt_proxy.py b/salt/modules/salt_proxy.py index ad3c866792..4c7c1a4e36 100644 --- a/salt/modules/salt_proxy.py +++ b/salt/modules/salt_proxy.py @@ -7,15 +7,17 @@ Module to deploy and manage salt-proxy processes on a minion. ''' +# Import Python libs from __future__ import absolute_import - -import salt.ext.six.moves - import os import logging +# Import Salt libs import salt.utils.files +# Import 3rd-party libs +import salt.ext.six.moves + log = logging.getLogger(__name__) diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py index 4ce5da85a7..869fb1ac39 100644 --- a/salt/modules/saltutil.py +++ b/salt/modules/saltutil.py @@ -54,6 +54,7 @@ import salt.utils.files import salt.utils.minion import salt.utils.process import salt.utils.url +import salt.utils.versions import salt.wheel HAS_PSUTIL = True @@ -375,10 +376,10 @@ def refresh_grains(**kwargs): salt '*' saltutil.refresh_grains ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) _refresh_pillar = kwargs.pop('refresh_pillar', True) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) # Modules and pillar need to be refreshed in case grains changes affected # them, and the module refresh process reloads the grains and assigns the # newly-reloaded grains to each execution module's __grains__ dunder. @@ -1285,7 +1286,7 @@ def _get_ssh_or_api_client(cfgfile, ssh=False): def _exec(client, tgt, fun, arg, timeout, tgt_type, ret, kwarg, **kwargs): if 'expr_form' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -1445,7 +1446,7 @@ def runner(name, arg=None, kwarg=None, full_return=False, saltenv='base', jid=No kwarg = {} jid = kwargs.pop('__orchestration_jid__', jid) saltenv = kwargs.pop('__env__', saltenv) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: kwarg.update(kwargs) diff --git a/salt/modules/schedule.py b/salt/modules/schedule.py index ca1a9bb226..da18688853 100644 --- a/salt/modules/schedule.py +++ b/salt/modules/schedule.py @@ -19,7 +19,7 @@ import salt.utils.files import salt.utils.odict # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __proxyenabled__ = ['*'] diff --git a/salt/modules/scsi.py b/salt/modules/scsi.py index 5bbcd00ec2..eb0deccbbb 100644 --- a/salt/modules/scsi.py +++ b/salt/modules/scsi.py @@ -7,6 +7,7 @@ from __future__ import absolute_import import os.path import logging import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -34,7 +35,7 @@ def ls_(get_size=True): .. versionadded:: 2015.5.10 ''' - if not salt.utils.which('lsscsi'): + if not salt.utils.path.which('lsscsi'): __context__['retcode'] = 1 return 'scsi.ls not available - lsscsi command not found' diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index 247f89d320..13dc390f12 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -17,13 +17,13 @@ import os import re # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path import salt.utils.decorators as decorators from salt.exceptions import CommandExecutionError, SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six _SELINUX_FILETYPES = { @@ -47,7 +47,7 @@ def __virtual__(): # Iterate over all of the commands this module uses and make sure # each of them are available in the standard PATH to prevent breakage for cmd in required_cmds: - if not salt.utils.which(cmd): + if not salt.utils.path.which(cmd): return (False, cmd + ' is not in the path') # SELinux only makes sense on Linux *obviously* if __grains__['kernel'] == 'Linux': diff --git a/salt/modules/sensors.py b/salt/modules/sensors.py index 77b1907149..2da4dcee70 100644 --- a/salt/modules/sensors.py +++ b/salt/modules/sensors.py @@ -6,18 +6,18 @@ Read lm-sensors ''' from __future__ import absolute_import -#Import python libs +# Import python libs import logging -#import salt libs -import salt.utils +# import Salt libs +import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): - if salt.utils.which('sensors'): + if salt.utils.path.which('sensors'): return True return (False, 'sensors does not exist in the path') diff --git a/salt/modules/serverdensity_device.py b/salt/modules/serverdensity_device.py index ae08d1da4e..fcbabcdfb3 100644 --- a/salt/modules/serverdensity_device.py +++ b/salt/modules/serverdensity_device.py @@ -14,7 +14,7 @@ import os import tempfile # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.exceptions import CommandExecutionError diff --git a/salt/modules/servicenow.py b/salt/modules/servicenow.py index 18422c8031..d4c5c460dc 100644 --- a/salt/modules/servicenow.py +++ b/salt/modules/servicenow.py @@ -131,7 +131,7 @@ def non_structured_query(table, query=None, **kwargs): client = _get_client() client.table = table # underlying lib doesn't use six or past.basestring, - # does isinstance(x,str) + # does isinstance(x, str) # http://bit.ly/1VkMmpE if query is None: # try and assemble a query by keyword diff --git a/salt/modules/shadow.py b/salt/modules/shadow.py index 68f90e38ae..785af50dd3 100644 --- a/salt/modules/shadow.py +++ b/salt/modules/shadow.py @@ -19,8 +19,9 @@ except ImportError: pass # Import salt libs -import salt.utils +import salt.utils # Can be removed when is_true is moved import salt.utils.files +import salt.utils.stringutils from salt.exceptions import CommandExecutionError try: import salt.utils.pycrypto @@ -291,7 +292,7 @@ def set_password(name, password, use_usermod=False): lines = [] with salt.utils.files.fopen(s_file, 'rb') as fp_: for line in fp_: - line = salt.utils.to_str(line) + line = salt.utils.stringutils.to_str(line) comps = line.strip().split(':') if comps[0] != name: lines.append(line) diff --git a/salt/modules/smartos_imgadm.py b/salt/modules/smartos_imgadm.py index 25bae318b8..5b13ce3c00 100644 --- a/salt/modules/smartos_imgadm.py +++ b/salt/modules/smartos_imgadm.py @@ -9,7 +9,8 @@ import logging import json # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform import salt.utils.decorators as decorators log = logging.getLogger(__name__) @@ -30,7 +31,7 @@ def _check_imgadm(): ''' Looks to see if imgadm is present on the system ''' - return salt.utils.which('imgadm') + return salt.utils.path.which('imgadm') def _exit_status(retcode): @@ -69,7 +70,7 @@ def __virtual__(): ''' Provides imgadm only on SmartOS ''' - if salt.utils.is_smartos_globalzone() and _check_imgadm(): + if salt.utils.platform.is_smartos_globalzone() and _check_imgadm(): return __virtualname__ return ( False, diff --git a/salt/modules/smartos_nictagadm.py b/salt/modules/smartos_nictagadm.py index d0fad1cf19..a2ad7a0769 100644 --- a/salt/modules/smartos_nictagadm.py +++ b/salt/modules/smartos_nictagadm.py @@ -15,7 +15,8 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform import salt.utils.decorators as decorators log = logging.getLogger(__name__) @@ -34,21 +35,22 @@ def _check_nictagadm(): ''' Looks to see if nictagadm is present on the system ''' - return salt.utils.which('nictagadm') + return salt.utils.path.which('nictagadm') def _check_dladm(): ''' Looks to see if dladm is present on the system ''' - return salt.utils.which('dladm') + return salt.utils.path.which('dladm') def __virtual__(): ''' Provides nictagadm on SmartOS ''' - if salt.utils.is_smartos_globalzone() and _check_nictagadm() and _check_dladm(): + if salt.utils.platform.is_smartos_globalzone() \ + and _check_nictagadm() and _check_dladm(): return __virtualname__ return ( False, diff --git a/salt/modules/smartos_virt.py b/salt/modules/smartos_virt.py index e762da9c23..6c4c964e95 100644 --- a/salt/modules/smartos_virt.py +++ b/salt/modules/smartos_virt.py @@ -8,8 +8,9 @@ from __future__ import absolute_import import logging # Import Salt libs +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError -import salt.utils log = logging.getLogger(__name__) @@ -21,7 +22,8 @@ def __virtual__(): ''' Provides virt on SmartOS ''' - if salt.utils.is_smartos_globalzone() and salt.utils.which('vmadm'): + if salt.utils.platform.is_smartos_globalzone() \ + and salt.utils.path.which('vmadm'): return __virtualname__ return ( False, diff --git a/salt/modules/smartos_vmadm.py b/salt/modules/smartos_vmadm.py index 3fefce60aa..7b173c9fbb 100644 --- a/salt/modules/smartos_vmadm.py +++ b/salt/modules/smartos_vmadm.py @@ -14,12 +14,17 @@ except ImportError: from pipes import quote as _quote_args # Import Salt libs -import salt.ext.six as six import salt.utils +import salt.utils.args +import salt.utils.path +import salt.utils.platform import salt.utils.decorators as decorators import salt.utils.files from salt.utils.odict import OrderedDict +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) # Function aliases @@ -36,21 +41,21 @@ def _check_vmadm(): ''' Looks to see if vmadm is present on the system ''' - return salt.utils.which('vmadm') + return salt.utils.path.which('vmadm') def _check_zfs(): ''' Looks to see if zfs is present on the system ''' - return salt.utils.which('zfs') + return salt.utils.path.which('zfs') def __virtual__(): ''' Provides vmadm on SmartOS ''' - if salt.utils.is_smartos_globalzone() and _check_vmadm(): + if salt.utils.platform.is_smartos_globalzone() and _check_vmadm(): return __virtualname__ return ( False, @@ -784,7 +789,7 @@ def create(from_file=None, **kwargs): ret = {} # prepare vmcfg vmcfg = {} - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) for k, v in six.iteritems(kwargs): vmcfg[k] = v @@ -819,7 +824,7 @@ def update(vm, from_file=None, key='uuid', **kwargs): vmadm = _check_vmadm() # prepare vmcfg vmcfg = {} - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) for k, v in six.iteritems(kwargs): vmcfg[k] = v diff --git a/salt/modules/smbios.py b/salt/modules/smbios.py index a7023bed57..4b4156962a 100644 --- a/salt/modules/smbios.py +++ b/salt/modules/smbios.py @@ -18,8 +18,7 @@ import uuid import re # Import salt libs -# import salt.log -import salt.utils +import salt.utils.path # Solve the Chicken and egg problem where grains need to run before any # of the modules are loaded and are generally available for any usage. @@ -30,7 +29,7 @@ from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-bui log = logging.getLogger(__name__) -DMIDECODER = salt.utils.which_bin(['dmidecode', 'smbios']) +DMIDECODER = salt.utils.path.which_bin(['dmidecode', 'smbios']) def __virtual__(): diff --git a/salt/modules/solaris_fmadm.py b/salt/modules/solaris_fmadm.py index 71cc3fd1b8..585bb01976 100644 --- a/salt/modules/solaris_fmadm.py +++ b/salt/modules/solaris_fmadm.py @@ -14,7 +14,8 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform import salt.utils.decorators as decorators from salt.utils.odict import OrderedDict @@ -34,21 +35,21 @@ def _check_fmadm(): ''' Looks to see if fmadm is present on the system ''' - return salt.utils.which('fmadm') + return salt.utils.path.which('fmadm') def _check_fmdump(): ''' Looks to see if fmdump is present on the system ''' - return salt.utils.which('fmdump') + return salt.utils.path.which('fmdump') def __virtual__(): ''' Provides fmadm only on Solaris ''' - if salt.utils.is_sunos() and \ + if salt.utils.platform.is_sunos() and \ _check_fmadm() and _check_fmdump(): return __virtualname__ return ( diff --git a/salt/modules/solaris_system.py b/salt/modules/solaris_system.py index 5335146e46..0638b94fa4 100644 --- a/salt/modules/solaris_system.py +++ b/salt/modules/solaris_system.py @@ -6,9 +6,12 @@ This module is assumes we are using solaris-like shutdown .. versionadded:: 2016.3.0 ''' +# Import Python libs from __future__ import absolute_import -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform # Define the module's virtual name __virtualname__ = 'system' @@ -18,9 +21,13 @@ def __virtual__(): ''' Only supported on Solaris-like systems ''' - if not salt.utils.is_sunos() or not salt.utils.which('shutdown'): - return (False, 'The system execution module failed to load: ' - 'only available on Solaris-like ystems with shutdown command.') + if not salt.utils.platform.is_sunos() \ + or not salt.utils.path.which('shutdown'): + return ( + False, + 'The system execution module failed to load: only available on ' + 'Solaris-like ystems with shutdown command.' + ) return __virtualname__ diff --git a/salt/modules/solaris_user.py b/salt/modules/solaris_user.py index f135d1dcf4..e52c82c137 100644 --- a/salt/modules/solaris_user.py +++ b/salt/modules/solaris_user.py @@ -23,7 +23,7 @@ import logging # Import salt libs import salt.utils -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) diff --git a/salt/modules/solarisips.py b/salt/modules/solarisips.py index b3f48a61cd..1e87a46144 100644 --- a/salt/modules/solarisips.py +++ b/salt/modules/solarisips.py @@ -44,6 +44,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.path import salt.utils.pkg from salt.exceptions import CommandExecutionError @@ -56,9 +57,9 @@ def __virtual__(): ''' Set the virtual pkg module if the os is Solaris 11 ''' - if __grains__['os'] == 'Solaris' \ + if __grains__['os_family'] == 'Solaris' \ and float(__grains__['kernelrelease']) > 5.10 \ - and salt.utils.which('pkg'): + and salt.utils.path.which('pkg'): return __virtualname__ return (False, 'The solarisips execution module failed to load: only available ' diff --git a/salt/modules/solarispkg.py b/salt/modules/solarispkg.py index f51667b6eb..84588976d7 100644 --- a/salt/modules/solarispkg.py +++ b/salt/modules/solarispkg.py @@ -30,7 +30,7 @@ def __virtual__(): ''' Set the virtual pkg module if the os is Solaris ''' - if __grains__['os'] == 'Solaris' and float(__grains__['kernelrelease']) <= 5.10: + if __grains__['os_family'] == 'Solaris' and float(__grains__['kernelrelease']) <= 5.10: return __virtualname__ return (False, 'The solarispkg execution module failed to load: only available ' diff --git a/salt/modules/solr.py b/salt/modules/solr.py index 46f8dfd1ef..4306e77af8 100644 --- a/salt/modules/solr.py +++ b/salt/modules/solr.py @@ -66,7 +66,7 @@ import os # Import 3rd-party libs # pylint: disable=no-name-in-module,import-error -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.request import ( urlopen as _urlopen, HTTPBasicAuthHandler as _HTTPBasicAuthHandler, @@ -77,7 +77,7 @@ from salt.ext.six.moves.urllib.request import ( # pylint: enable=no-name-in-module,import-error # Import salt libs -import salt.utils +import salt.utils.path # ######################### PRIVATE METHODS ############################## @@ -89,9 +89,9 @@ def __virtual__(): Return: str/bool ''' - if salt.utils.which('solr'): + if salt.utils.path.which('solr'): return 'solr' - if salt.utils.which('apache-solr'): + if salt.utils.path.which('apache-solr'): return 'solr' return (False, 'The solr execution module failed to load: requires both the solr and apache-solr binaries in the path.') diff --git a/salt/modules/splunk.py b/salt/modules/splunk.py index 7fd589264d..0d64fb30b1 100644 --- a/salt/modules/splunk.py +++ b/salt/modules/splunk.py @@ -28,7 +28,8 @@ import hmac import base64 import subprocess -# Import third party libs +# Import 3rd-party libs +from salt.ext import six HAS_LIBS = False try: import splunklib.client @@ -280,7 +281,7 @@ def update_user(email, profile="splunk", **kwargs): if k.lower() == 'name': continue if k.lower() == 'roles': - if isinstance(v, str): + if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) diff --git a/salt/modules/splunk_search.py b/salt/modules/splunk_search.py index 150dd4f708..c78192a055 100644 --- a/salt/modules/splunk_search.py +++ b/salt/modules/splunk_search.py @@ -27,7 +27,7 @@ import yaml import urllib # Import third party libs -import salt.ext.six as six +from salt.ext import six HAS_LIBS = False try: import splunklib.client diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 531dafb029..7ca5c00702 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -19,17 +19,19 @@ import re import subprocess # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils +import salt.utils.decorators.path import salt.utils.files -import salt.utils.decorators as decorators +import salt.utils.path +import salt.utils.platform from salt.exceptions import ( SaltInvocationError, CommandExecutionError, ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range log = logging.getLogger(__name__) @@ -41,7 +43,7 @@ if six.PY3: def __virtual__(): # TODO: This could work on windows with some love - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The module cannot be loaded on windows.') return True @@ -734,7 +736,7 @@ def set_auth_key( os.chown(dpath, uinfo['uid'], uinfo['gid']) os.chmod(dpath, 448) # If SELINUX is available run a restorecon on the file - rcon = salt.utils.which('restorecon') + rcon = salt.utils.path.which('restorecon') if rcon: cmd = [rcon, dpath] subprocess.call(cmd) @@ -764,7 +766,7 @@ def set_auth_key( os.chown(fconfig, uinfo['uid'], uinfo['gid']) os.chmod(fconfig, 384) # If SELINUX is available run a restorecon on the file - rcon = salt.utils.which('restorecon') + rcon = salt.utils.path.which('restorecon') if rcon: cmd = [rcon, fconfig] subprocess.call(cmd) @@ -791,7 +793,7 @@ def _parse_openssh_output(lines, fingerprint_hash_type=None): 'fingerprint': fingerprint} -@decorators.which('ssh-keygen') +@salt.utils.decorators.path.which('ssh-keygen') def get_known_host(user, hostname, config=None, @@ -824,7 +826,7 @@ def get_known_host(user, return known_hosts[0] if known_hosts else None -@decorators.which('ssh-keyscan') +@salt.utils.decorators.path.which('ssh-keyscan') def recv_known_host(hostname, enc=None, port=None, @@ -1243,7 +1245,7 @@ def user_keys(user=None, pubfile=None, prvfile=None): return _keys -@decorators.which('ssh-keygen') +@salt.utils.decorators.path.which('ssh-keygen') def hash_known_hosts(user=None, config=None): ''' diff --git a/salt/modules/ssh_package.py b/salt/modules/ssh_package.py index b675fd497b..d71d479180 100644 --- a/salt/modules/ssh_package.py +++ b/salt/modules/ssh_package.py @@ -4,11 +4,11 @@ Service support for the REST example ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import Salt's libs -import salt.utils +# Import Salts libs +import salt.utils.platform log = logging.getLogger(__name__) @@ -22,12 +22,21 @@ def __virtual__(): Only work on proxy ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'ssh_sample': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'ssh_sample': return __virtualname__ except KeyError: - return (False, 'The ssh_package execution module failed to load. Check the proxy key in pillar.') + return ( + False, + 'The ssh_package execution module failed to load. Check the ' + 'proxy key in pillar.' + ) - return (False, 'The ssh_package execution module failed to load: only works on an ssh_sample proxy minion.') + return ( + False, + 'The ssh_package execution module failed to load: only works on an ' + 'ssh_sample proxy minion.' + ) def list_pkgs(versions_as_list=False, **kwargs): diff --git a/salt/modules/ssh_service.py b/salt/modules/ssh_service.py index 04a1c92b96..731dd281f1 100644 --- a/salt/modules/ssh_service.py +++ b/salt/modules/ssh_service.py @@ -3,13 +3,15 @@ Provide the service module for the proxy-minion SSH sample .. versionadded:: 2015.8.2 ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -import salt.utils import fnmatch import re +# Import Salt libs +import salt.utils.platform + log = logging.getLogger(__name__) __func_alias__ = { @@ -25,12 +27,21 @@ def __virtual__(): Only work on systems that are a proxy minion ''' try: - if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'ssh_sample': + if salt.utils.platform.is_proxy() \ + and __opts__['proxy']['proxytype'] == 'ssh_sample': return __virtualname__ except KeyError: - return (False, 'The ssh_service execution module failed to load. Check the proxy key in pillar.') + return ( + False, + 'The ssh_service execution module failed to load. Check the ' + 'proxy key in pillar.' + ) - return (False, 'The ssh_service execution module failed to load: only works on an ssh_sample proxy minion.') + return ( + False, + 'The ssh_service execution module failed to load: only works on an ' + 'ssh_sample proxy minion.' + ) def get_all(): diff --git a/salt/modules/state.py b/salt/modules/state.py index 35a7867ead..09f0f774a2 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -29,14 +29,17 @@ import salt.config import salt.payload import salt.state import salt.utils +import salt.utils.event import salt.utils.files import salt.utils.jid +import salt.utils.platform import salt.utils.url +import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.runners.state import orchestrate as _orchestrate # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __proxyenabled__ = ['*'] @@ -265,14 +268,14 @@ def _get_opts(**kwargs): if 'saltenv' in kwargs: saltenv = kwargs['saltenv'] - if not isinstance(saltenv, six.string_types): + if saltenv is not None and not isinstance(saltenv, six.string_types): opts['environment'] = str(kwargs['saltenv']) else: opts['environment'] = kwargs['saltenv'] if 'pillarenv' in kwargs: pillarenv = kwargs['pillarenv'] - if not isinstance(pillarenv, six.string_types): + if pillarenv is not None and not isinstance(pillarenv, six.string_types): opts['pillarenv'] = str(kwargs['pillarenv']) else: opts['pillarenv'] = kwargs['pillarenv'] @@ -394,7 +397,7 @@ def template(tem, queue=False, **kwargs): salt '*' state.template '' ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -629,7 +632,7 @@ def request(mods=None, }) cumask = os.umask(0o77) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path)) with salt.utils.files.fopen(notify_path, 'w+b') as fp_: @@ -693,7 +696,7 @@ def clear_request(name=None): return False cumask = os.umask(0o77) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path)) with salt.utils.files.fopen(notify_path, 'w+b') as fp_: @@ -836,7 +839,7 @@ def highstate(test=None, queue=False, **kwargs): opts['test'] = _get_test_value(test, **kwargs) if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -1003,7 +1006,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs): ''' concurrent = kwargs.get('concurrent', False) if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -1105,7 +1108,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs): return errors if exclude: - if isinstance(exclude, str): + if isinstance(exclude, six.string_types): exclude = exclude.split(',') if '__exclude__' in high_: high_['__exclude__'].extend(exclude) @@ -1120,7 +1123,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs): cache_file = os.path.join(__opts__['cachedir'], 'sls.p') cumask = os.umask(0o77) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False) with salt.utils.files.fopen(cache_file, 'w+b') as fp_: @@ -1475,7 +1478,7 @@ def show_low_sls(mods, test=None, queue=False, **kwargs): salt '*' state.show_low_sls foo saltenv=dev ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -1552,7 +1555,7 @@ def show_sls(mods, test=None, queue=False, **kwargs): salt '*' state.show_sls core,edit.vim dev ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -1628,7 +1631,7 @@ def show_top(queue=False, **kwargs): salt '*' state.show_top ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -1723,7 +1726,7 @@ def single(fun, name, test=None, queue=False, **kwargs): st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) - ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): + ret = {u'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to diff --git a/salt/modules/status.py b/salt/modules/status.py index 29c556f590..edb268267f 100644 --- a/salt/modules/status.py +++ b/salt/modules/status.py @@ -17,17 +17,17 @@ import time import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin # Import salt libs import salt.config import salt.minion -import salt.utils import salt.utils.event import salt.utils.files -from salt.utils.network import host_to_ips as _host_to_ips -from salt.utils.network import remote_port_tcp as _remote_port_tcp +import salt.utils.network +import salt.utils.path +import salt.utils.platform from salt.ext.six.moves import zip from salt.exceptions import CommandExecutionError @@ -49,7 +49,7 @@ def __virtual__(): ''' Not all functions supported by Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return False, 'Windows platform is not supported by this module' return __virtualname__ @@ -212,25 +212,25 @@ def uptime(): curr_seconds = time.time() # Get uptime in seconds - if salt.utils.is_linux(): + if salt.utils.platform.is_linux(): ut_path = "/proc/uptime" if not os.path.exists(ut_path): raise CommandExecutionError("File {ut_path} was not found.".format(ut_path=ut_path)) with salt.utils.files.fopen(ut_path) as rfh: seconds = int(float(rfh.read().split()[0])) - elif salt.utils.is_sunos(): - # note: some flavors/vesions report the host uptime inside a zone + elif salt.utils.platform.is_sunos(): + # note: some flavors/versions report the host uptime inside a zone # https://support.oracle.com/epmos/faces/BugDisplay?id=15611584 res = __salt__['cmd.run_all']('kstat -p unix:0:system_misc:boot_time') if res['retcode'] > 0: raise CommandExecutionError('The boot_time kstat was not found.') seconds = int(curr_seconds - int(res['stdout'].split()[-1])) - elif salt.utils.is_openbsd() or salt.utils.is_netbsd(): + elif salt.utils.platform.is_openbsd() or salt.utils.platform.is_netbsd(): bt_data = __salt__['sysctl.get']('kern.boottime') if not bt_data: raise CommandExecutionError('Cannot find kern.boottime system parameter') seconds = int(curr_seconds - int(bt_data)) - elif salt.utils.is_freebsd() or salt.utils.is_darwin(): + elif salt.utils.platform.is_freebsd() or salt.utils.platform.is_darwin(): # format: { sec = 1477761334, usec = 664698 } Sat Oct 29 17:15:34 2016 bt_data = __salt__['sysctl.get']('kern.boottime') if not bt_data: @@ -238,7 +238,7 @@ def uptime(): data = bt_data.split("{")[-1].split("}")[0].strip().replace(' ', '') uptime = dict([(k, int(v,)) for k, v in [p.strip().split('=') for p in data.split(',')]]) seconds = int(curr_seconds - uptime['sec']) - elif salt.utils.is_aix(): + elif salt.utils.platform.is_aix(): seconds = _get_boot_time_aix() else: return __salt__['cmd.run']('uptime') @@ -257,8 +257,8 @@ def uptime(): 'time': '{0}:{1}'.format(up_time.seconds // 3600, up_time.seconds % 3600 // 60), } - if salt.utils.which('who'): - who_cmd = 'who' if salt.utils.is_openbsd() else 'who -s' # OpenBSD does not support -s + if salt.utils.path.which('who'): + who_cmd = 'who' if salt.utils.platform.is_openbsd() else 'who -s' # OpenBSD does not support -s ut_ret['users'] = len(__salt__['cmd.run'](who_cmd).split(os.linesep)) return ut_ret @@ -1486,14 +1486,14 @@ def master(master=None, connected=True): master_ips = None if master: - master_ips = _host_to_ips(master) + master_ips = salt.utils.network.host_to_ips(master) if not master_ips: return master_connection_status = False port = __salt__['config.get']('publish_port', default=4505) - connected_ips = _remote_port_tcp(port) + connected_ips = salt.utils.network.remote_port_tcp(port) # Get connection status for master for master_ip in master_ips: diff --git a/salt/modules/stormpath.py b/salt/modules/stormpath.py deleted file mode 100644 index 020d70b44e..0000000000 --- a/salt/modules/stormpath.py +++ /dev/null @@ -1,242 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Support for Stormpath - -.. versionadded:: 2015.8.0 -''' - -# Import python libs -from __future__ import absolute_import, print_function -import json -import logging - -# Import salt libs -import salt.utils.http - -log = logging.getLogger(__name__) - - -def __virtual__(): - ''' - Only load the module if apache is installed - ''' - if not __opts__.get('stormpath', {}).get('apiid', None): - return (False, 'The stormpath execution module failed to load: requires the stormpath:apiid config option to be set.') - if not __opts__.get('stormpath', {}).get('apikey', None): - return (False, 'The stormpath execution module failed to load: requires the stormpath:apikey config option to be set.') - return True - - -def create_account(directory_id, email, password, givenName, surname, **kwargs): - ''' - Create an account - - CLI Examples: - - salt myminion stormpath.create_account shemp@example.com letmein Shemp Howard - - ''' - items = { - 'email': email, - 'password': password, - 'givenName': givenName, - 'surname': surname, - } - items.update(**kwargs) - - status, result = _query( - action='directories', - command='{0}/accounts'.format(directory_id), - data=json.dumps(items), - header_dict={'Content-Type': 'application/json;charset=UTF-8'}, - method='POST', - ) - - comps = result['href'].split('/') - return show_account(comps[-1]) - - -def list_accounts(): - ''' - Show all accounts. - - CLI Example: - - salt myminion stormpath.list_accounts - - ''' - status, result = _query(action='accounts', command='current') - return result - - -def show_account(account_id=None, - email=None, - directory_id=None, - application_id=None, - group_id=None, - **kwargs): - ''' - Show a specific account. - - CLI Example: - - salt myminion stormpath.show_account - - ''' - if account_id: - status, result = _query( - action='accounts', - command=account_id, - ) - return result - - if email: - if not directory_id and not application_id and not group_id: - return {'Error': 'Either a directory_id, application_id, or ' - 'group_id must be specified with an email address'} - if directory_id: - status, result = _query( - action='directories', - command='{0}/accounts'.format(directory_id), - args={'email': email} - ) - elif application_id: - status, result = _query( - action='applications', - command='{0}/accounts'.format(application_id), - args={'email': email} - ) - elif group_id: - status, result = _query( - action='groups', - command='{0}/accounts'.format(group_id), - args={'email': email} - ) - return result - - -def update_account(account_id, key=None, value=None, items=None): - ''' - Update one or more items for this account. Specifying an empty value will - clear it for that account. - - CLI Examples: - - salt myminion stormpath.update_account givenName shemp - salt myminion stormpath.update_account middleName '' - salt myminion stormpath.update_account items='{"givenName": "Shemp"} - salt myminion stormpath.update_account items='{"middlename": ""} - - ''' - if items is None: - if key is None or value is None: - return {'Error': 'At least one key/value pair is required'} - items = {key: value} - - status, result = _query( - action='accounts', - command=account_id, - data=json.dumps(items), - header_dict={'Content-Type': 'application/json;charset=UTF-8'}, - method='POST', - ) - - return show_account(account_id) - - -def delete_account(account_id): - ''' - Delete an account. - - CLI Examples: - - salt myminion stormpath.delete_account - ''' - _query( - action='accounts', - command=account_id, - method='DELETE', - ) - - return True - - -def list_directories(): - ''' - Show all directories. - - CLI Example: - - salt myminion stormpath.list_directories - - ''' - tenant = show_tenant() - tenant_id = tenant.get('href', '').split('/')[-1] - status, result = _query(action='tenants', command='{0}/directories'.format(tenant_id)) - return result - - -def show_tenant(): - ''' - Get the tenant for the login being used. - ''' - status, result = _query(action='tenants', command='current') - return result - - -def _query(action=None, - command=None, - args=None, - method='GET', - header_dict=None, - data=None): - ''' - Make a web call to Stormpath. - ''' - apiid = __opts__.get('stormpath', {}).get('apiid', None) - apikey = __opts__.get('stormpath', {}).get('apikey', None) - path = 'https://api.stormpath.com/v1/' - - if action: - path += action - - if command: - path += '/{0}'.format(command) - - log.debug('Stormpath URL: {0}'.format(path)) - - if not isinstance(args, dict): - args = {} - - if header_dict is None: - header_dict = {} - - if method != 'POST': - header_dict['Accept'] = 'application/json' - - decode = True - if method == 'DELETE': - decode = False - - return_content = None - result = salt.utils.http.query( - path, - method, - username=apiid, - password=apikey, - params=args, - data=data, - header_dict=header_dict, - decode=decode, - decode_type='json', - text=True, - status=True, - opts=__opts__, - ) - log.debug( - 'Stormpath Response Status Code: {0}'.format( - result['status'] - ) - ) - - return [result['status'], result.get('dict', {})] diff --git a/salt/modules/supervisord.py b/salt/modules/supervisord.py index dd22987cff..648a65e2b9 100644 --- a/salt/modules/supervisord.py +++ b/salt/modules/supervisord.py @@ -15,6 +15,7 @@ from salt.ext.six.moves import configparser # pylint: disable=import-error # Import salt libs import salt.utils +import salt.utils.stringutils from salt.exceptions import CommandExecutionError, CommandNotFoundError @@ -408,7 +409,7 @@ def options(name, conf_file=None): raise CommandExecutionError('Process \'{0}\' not found'.format(name)) ret = {} for key, val in config.items(section_name): - val = salt.utils.str_to_num(val.split(';')[0].strip()) + val = salt.utils.stringutils.to_num(val.split(';')[0].strip()) # pylint: disable=maybe-no-member if isinstance(val, string_types): if val.lower() == 'true': diff --git a/salt/modules/suse_apache.py b/salt/modules/suse_apache.py index 00c2930ad3..ab4245abfd 100644 --- a/salt/modules/suse_apache.py +++ b/salt/modules/suse_apache.py @@ -12,7 +12,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -23,7 +23,7 @@ def __virtual__(): ''' Only load the module if apache is installed. ''' - if salt.utils.which('apache2ctl') and __grains__['os_family'] == 'SUSE': + if salt.utils.path.which('apache2ctl') and __grains__['os_family'] == 'SUSE': return __virtualname__ return (False, 'apache execution module not loaded: apache not installed.') diff --git a/salt/modules/svn.py b/salt/modules/svn.py index 4126214451..cdc76320ea 100644 --- a/salt/modules/svn.py +++ b/salt/modules/svn.py @@ -8,8 +8,9 @@ from __future__ import absolute_import import re # Import salt libs -import salt.utils -from salt import utils, exceptions +import salt.utils.args +import salt.utils.path +from salt.exceptions import CommandExecutionError _INI_RE = re.compile(r"^([^:]+):\s+(\S.*)$", re.M) @@ -18,7 +19,7 @@ def __virtual__(): ''' Only load if svn is installed ''' - if utils.which('svn') is None: + if salt.utils.path.which('svn') is None: return (False, 'The svn execution module cannot be loaded: svn unavailable.') else: @@ -68,7 +69,7 @@ def _run_svn(cmd, cwd, user, username, password, opts, **kwargs): if retcode == 0: return result['stdout'] - raise exceptions.CommandExecutionError(result['stderr'] + '\n\n' + ' '.join(cmd)) + raise CommandExecutionError(result['stderr'] + '\n\n' + ' '.join(cmd)) def info(cwd, @@ -112,7 +113,7 @@ def info(cwd, if fmt == 'xml': opts.append('--xml') if targets: - opts += salt.utils.shlex_split(targets) + opts += salt.utils.args.shlex_split(targets) infos = _run_svn('info', cwd, user, username, password, opts) if fmt in ('str', 'xml'): @@ -241,7 +242,7 @@ def update(cwd, targets=None, user=None, username=None, password=None, *opts): salt '*' svn.update /path/to/repo ''' if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('update', cwd, user, username, password, opts) @@ -275,7 +276,7 @@ def diff(cwd, targets=None, user=None, username=None, password=None, *opts): salt '*' svn.diff /path/to/repo ''' if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('diff', cwd, user, username, password, opts) @@ -320,7 +321,7 @@ def commit(cwd, if msg: opts += ('-m', msg) if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('commit', cwd, user, username, password, opts) @@ -352,7 +353,7 @@ def add(cwd, targets, user=None, username=None, password=None, *opts): salt '*' svn.add /path/to/repo /path/to/new/file ''' if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('add', cwd, user, username, password, opts) @@ -395,7 +396,7 @@ def remove(cwd, if msg: opts += ('-m', msg) if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('remove', cwd, user, username, password, opts) @@ -429,7 +430,7 @@ def status(cwd, targets=None, user=None, username=None, password=None, *opts): salt '*' svn.status /path/to/repo ''' if targets: - opts += tuple(salt.utils.shlex_split(targets)) + opts += tuple(salt.utils.args.shlex_split(targets)) return _run_svn('status', cwd, user, username, password, opts) diff --git a/salt/modules/sysbench.py b/salt/modules/sysbench.py index 439902d182..1ee86e02ca 100644 --- a/salt/modules/sysbench.py +++ b/salt/modules/sysbench.py @@ -8,7 +8,7 @@ CPU, Memory, File I/O, Threads and Mutex. from __future__ import absolute_import import re -import salt.utils +import salt.utils.path from salt.ext.six.moves import zip @@ -17,7 +17,7 @@ def __virtual__(): loads the module, if only sysbench is installed ''' # finding the path of the binary - if salt.utils.which('sysbench'): + if salt.utils.path.which('sysbench'): return 'sysbench' return (False, 'The sysbench execution module failed to load: the sysbench binary is not in the path.') diff --git a/salt/modules/sysfs.py b/salt/modules/sysfs.py index c80437140c..69f5325c87 100644 --- a/salt/modules/sysfs.py +++ b/salt/modules/sysfs.py @@ -5,18 +5,18 @@ Module for interfacing with SysFS .. seealso:: https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt .. versionadded:: 2016.3.0 ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging import os import stat -# Import external libs -import salt.ext.six as six - -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -25,7 +25,7 @@ def __virtual__(): ''' Only work on Linux ''' - return salt.utils.is_linux() + return salt.utils.platform.is_linux() def attr(key, value=None): diff --git a/salt/modules/syslog_ng.py b/salt/modules/syslog_ng.py index f2c4a5f3da..8b03b2229f 100644 --- a/salt/modules/syslog_ng.py +++ b/salt/modules/syslog_ng.py @@ -34,12 +34,12 @@ import os import os.path # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin @@ -426,7 +426,7 @@ def _is_simple_type(value): Returns True, if the given parameter value is an instance of either int, str, float or bool. ''' - return isinstance(value, str) or isinstance(value, int) or isinstance(value, float) or isinstance(value, bool) + return isinstance(value, six.string_types) or isinstance(value, int) or isinstance(value, float) or isinstance(value, bool) def _get_type_id_options(name, configuration): @@ -815,7 +815,7 @@ def _run_command_in_extended_path(syslog_ng_sbin_dir, command, params): ''' orig_path = _add_to_path_envvar(syslog_ng_sbin_dir) - if not salt.utils.which(command): + if not salt.utils.path.which(command): error_message = ( 'Unable to execute the command \'{0}\'. It is not in the PATH.' .format(command) diff --git a/salt/modules/sysmod.py b/salt/modules/sysmod.py index a7c731e93e..93a0c99321 100644 --- a/salt/modules/sysmod.py +++ b/salt/modules/sysmod.py @@ -18,7 +18,7 @@ from salt.utils.doc import strip_rst as _strip_rst from salt.ext.six.moves import zip # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/modules/sysrc.py b/salt/modules/sysrc.py index 7438281c37..be0cec4416 100644 --- a/salt/modules/sysrc.py +++ b/salt/modules/sysrc.py @@ -7,7 +7,7 @@ sysrc module for FreeBSD from __future__ import absolute_import # Import Salt libs -import salt.utils +import salt.utils.path from salt.exceptions import CommandExecutionError @@ -22,7 +22,7 @@ def __virtual__(): ''' Only runs if sysrc exists ''' - if salt.utils.which('sysrc') is not None: + if salt.utils.path.which('sysrc') is not None: return True return (False, 'The sysrc execution module failed to load: the sysrc binary is not in the path.') diff --git a/salt/modules/system.py b/salt/modules/system.py index feada3bd24..54caa7c642 100644 --- a/salt/modules/system.py +++ b/salt/modules/system.py @@ -14,17 +14,19 @@ Support for reboot, shutdown, etc on POSIX-like systems. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs from datetime import datetime, timedelta, tzinfo import re import os.path -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files -import salt.ext.six as six +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError, SaltInvocationError +# Import 3rd-party libs +from salt.ext import six __virtualname__ = 'system' @@ -34,13 +36,13 @@ def __virtual__(): Only supported on POSIX-like systems Windows, Solaris, and Mac have their own modules ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'This module is not available on Windows') - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return (False, 'This module is not available on Mac OS') - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): return (False, 'This module is not available on SunOS') return __virtualname__ @@ -178,7 +180,7 @@ def has_settable_hwclock(): salt '*' system.has_settable_hwclock ''' - if salt.utils.which_bin(['hwclock']) is not None: + if salt.utils.path.which_bin(['hwclock']) is not None: res = __salt__['cmd.run_all'](['hwclock', '--test', '--systohc'], python_shell=False) return res['retcode'] == 0 return False @@ -505,7 +507,7 @@ def get_computer_desc(): salt '*' system.get_computer_desc ''' desc = None - hostname_cmd = salt.utils.which('hostnamectl') + hostname_cmd = salt.utils.path.which('hostnamectl') if hostname_cmd: desc = __salt__['cmd.run']( [hostname_cmd, 'status', '--pretty'], @@ -549,7 +551,7 @@ def set_computer_desc(desc): desc = desc.replace('"', '\\"') else: desc = desc.encode('string_escape').replace('"', '\\"') - hostname_cmd = salt.utils.which('hostnamectl') + hostname_cmd = salt.utils.path.which('hostnamectl') if hostname_cmd: result = __salt__['cmd.retcode']( [hostname_cmd, 'set-hostname', '--pretty', desc], diff --git a/salt/modules/system_profiler.py b/salt/modules/system_profiler.py index 31e0b8efe3..230a5f9548 100644 --- a/salt/modules/system_profiler.py +++ b/salt/modules/system_profiler.py @@ -13,7 +13,7 @@ from __future__ import absolute_import import plistlib import subprocess -import salt.utils +import salt.utils.path from salt.ext import six PROFILER_BINARY = '/usr/sbin/system_profiler' @@ -23,7 +23,7 @@ def __virtual__(): ''' Check to see if the system_profiler binary is available ''' - PROFILER_BINARY = salt.utils.which('system_profiler') + PROFILER_BINARY = salt.utils.path.which('system_profiler') if PROFILER_BINARY: return True diff --git a/salt/modules/systemd.py b/salt/modules/systemd.py index 1be51a4e6e..f2e6a6f1a3 100644 --- a/salt/modules/systemd.py +++ b/salt/modules/systemd.py @@ -10,7 +10,7 @@ Provides the service module for systemd *'service.start' is not available*), see :ref:`here `. ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import errno import glob @@ -20,11 +20,14 @@ import fnmatch import re import shlex -import salt.utils +# Import Salt libs import salt.utils.files import salt.utils.itertools +import salt.utils.path import salt.utils.systemd from salt.exceptions import CommandExecutionError + +# Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) @@ -254,7 +257,7 @@ def _get_service_exec(): if contextkey not in __context__: executables = ('update-rc.d', 'chkconfig') for executable in executables: - service_exec = salt.utils.which(executable) + service_exec = salt.utils.path.which(executable) if service_exec is not None: break else: diff --git a/salt/modules/test.py b/salt/modules/test.py index 7776aeedc2..30008285f8 100644 --- a/salt/modules/test.py +++ b/salt/modules/test.py @@ -15,9 +15,11 @@ import random # Import Salt libs import salt import salt.utils +import salt.utils.hashutils +import salt.utils.platform import salt.version import salt.loader -import salt.ext.six as six +from salt.ext import six from salt.utils.decorators import depends __proxyenabled__ = ['*'] @@ -115,7 +117,7 @@ def ping(): salt '*' test.ping ''' - if not salt.utils.is_proxy(): + if not salt.utils.platform.is_proxy(): log.debug('test.ping received for minion \'%s\'', __opts__.get('id')) return True else: @@ -312,6 +314,18 @@ def arg_repr(*args, **kwargs): return {"args": repr(args), "kwargs": repr(kwargs)} +def arg_clean(*args, **kwargs): + ''' + Like test.arg but cleans kwargs of the __pub* items + CLI Example: + + .. code-block:: bash + + salt '*' test.arg_clean 1 "two" 3.1 txt="hello" wow='{a: 1, b: "hello"}' + ''' + return dict(args=args, kwargs=salt.utils.clean_kwargs(**kwargs)) + + def fib(num): ''' Return the num-th Fibonacci number, and the time it took to compute in @@ -480,25 +494,34 @@ def opts_pkg(): def rand_str(size=9999999999, hash_type=None): + salt.utils.warn_until( + 'Neon', + 'test.rand_str has been renamed to test.random_hash' + ) + return random_hash(size=size, hash_type=hash_type) + + +def random_hash(size=9999999999, hash_type=None): ''' - Return a random string + .. versionadded:: 2015.5.2 + .. versionchanged:: Oxygen + Function has been renamed from ``test.rand_str`` to + ``test.random_hash`` - size - size of the string to generate - hash_type - hash type to use - - .. versionadded:: 2015.5.2 + Generates a random number between 1 and ``size``, then returns a hash of + that number. If no ``hash_type`` is passed, the hash_type specified by the + minion's :conf_minion:`hash_type` config option is used. CLI Example: .. code-block:: bash - salt '*' test.rand_str + salt '*' test.random_hash + salt '*' test.random_hash hash_type=sha512 ''' if not hash_type: hash_type = __opts__.get('hash_type', 'md5') - return salt.utils.rand_str(hash_type=hash_type, size=size) + return salt.utils.hashutils.random_hash(size=size, hash_type=hash_type) def exception(message='Test Exception'): diff --git a/salt/modules/testinframod.py b/salt/modules/testinframod.py index c4ad1df0ef..c6d440b27a 100644 --- a/salt/modules/testinframod.py +++ b/salt/modules/testinframod.py @@ -285,13 +285,16 @@ def _register_functions(): functions, and then register them in the module namespace so that they can be called via salt. """ - for module_ in modules.__all__: - mod_name = _to_snake_case(module_) + try: + modules_ = [_to_snake_case(module_) for module_ in modules.__all__] + except AttributeError: + modules_ = [module_ for module_ in modules.modules] + + for mod_name in modules_: mod_func = _copy_function(mod_name, str(mod_name)) - mod_func.__doc__ = _build_doc(module_) + mod_func.__doc__ = _build_doc(mod_name) __all__.append(mod_name) globals()[mod_name] = mod_func - if TESTINFRA_PRESENT: _register_functions() diff --git a/salt/modules/timezone.py b/salt/modules/timezone.py index f60bb376d8..d3f4b645b4 100644 --- a/salt/modules/timezone.py +++ b/salt/modules/timezone.py @@ -16,6 +16,8 @@ import string import salt.utils import salt.utils.files import salt.utils.itertools +import salt.utils.path +import salt.utils.platform from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) @@ -27,12 +29,12 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return (False, 'The timezone execution module failed to load: ' 'win_timezone.py should replace this module on Windows.' 'There was a problem loading win_timezone.py.') - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return (False, 'The timezone execution module failed to load: ' 'mac_timezone.py should replace this module on macOS.' 'There was a problem loading mac_timezone.py.') @@ -169,7 +171,7 @@ def get_zone(): salt '*' timezone.get_zone ''' - if salt.utils.which('timedatectl'): + if salt.utils.path.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')): @@ -261,7 +263,7 @@ def set_zone(timezone): salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00' ''' - if salt.utils.which('timedatectl'): + if salt.utils.path.which('timedatectl'): try: __salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone)) except CommandExecutionError: @@ -372,7 +374,7 @@ def get_hwclock(): salt '*' timezone.get_hwclock ''' - if salt.utils.which('timedatectl'): + if salt.utils.path.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in ret['stdout'].splitlines()): if 'rtc in local tz' in line.lower(): diff --git a/salt/modules/tls.py b/salt/modules/tls.py index ac1df7e740..d065d4a95d 100644 --- a/salt/modules/tls.py +++ b/salt/modules/tls.py @@ -101,7 +101,7 @@ Create a server req + cert with non-CN filename for the cert from __future__ import absolute_import # pylint: disable=C0103 -# Import python libs +# Import Python libs import os import re import time @@ -109,15 +109,15 @@ import calendar import logging import math import binascii -import salt.utils -import salt.utils.files from datetime import datetime -# Import salt libs +# Import Salt libs +import salt.utils.files +import salt.utils.stringutils from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range as _range HAS_SSL = False @@ -264,7 +264,7 @@ def _get_basic_info(ca_name, cert, ca_dir=None): expire_date = _four_digit_year_to_two_digit( datetime.strptime( - salt.utils.to_str(cert.get_notAfter()), + salt.utils.stringutils.to_str(cert.get_notAfter()), four_digit_year_fmt) ) serial_number = format(cert.get_serial_number(), 'X') @@ -1010,9 +1010,9 @@ def create_csr(ca_name, for ext, value in extensions.items(): if six.PY3: - ext = salt.utils.to_bytes(ext) + ext = salt.utils.stringutils.to_bytes(ext) if isinstance(value, six.string_types): - value = salt.utils.to_bytes(value) + value = salt.utils.stringutils.to_bytes(value) extension_adds.append( OpenSSL.crypto.X509Extension( ext, False, value)) @@ -1022,7 +1022,7 @@ def create_csr(ca_name, if subjectAltName: if X509_EXT_ENABLED: - if isinstance(subjectAltName, str): + if isinstance(subjectAltName, six.string_types): subjectAltName = [subjectAltName] extension_adds.append( @@ -1566,21 +1566,21 @@ def cert_info(cert_path, digest='sha256'): issuer = {} for key, value in cert.get_issuer().get_components(): if isinstance(key, bytes): - key = salt.utils.to_str(key, __salt_system_encoding__) + key = salt.utils.stringutils.to_str(key, __salt_system_encoding__) if isinstance(value, bytes): - value = salt.utils.to_str(value, __salt_system_encoding__) + value = salt.utils.stringutils.to_str(value, __salt_system_encoding__) issuer[key] = value subject = {} for key, value in cert.get_subject().get_components(): if isinstance(key, bytes): - key = salt.utils.to_str(key, __salt_system_encoding__) + key = salt.utils.stringutils.to_str(key, __salt_system_encoding__) if isinstance(value, bytes): - value = salt.utils.to_str(value, __salt_system_encoding__) + value = salt.utils.stringutils.to_str(value, __salt_system_encoding__) subject[key] = value ret = { - 'fingerprint': salt.utils.to_str(cert.digest(digest)), + 'fingerprint': salt.utils.stringutils.to_str(cert.digest(digest)), 'subject': subject, 'issuer': issuer, 'serial_number': cert.get_serial_number(), @@ -1616,7 +1616,7 @@ def cert_info(cert_path, digest='sha256'): try: value = cert.get_signature_algorithm() if isinstance(value, bytes): - value = salt.utils.to_str(value, __salt_system_encoding__) + value = salt.utils.stringutils.to_str(value, __salt_system_encoding__) ret['signature_algorithm'] = value except AttributeError: # On py3 at least diff --git a/salt/modules/trafficserver.py b/salt/modules/trafficserver.py index 19b49e732d..7f3bdaab0a 100644 --- a/salt/modules/trafficserver.py +++ b/salt/modules/trafficserver.py @@ -14,7 +14,9 @@ import logging import subprocess # Import salt libs -import salt.utils +import salt.utils.path +import salt.utils.stringutils +import salt.utils.versions __virtualname__ = 'trafficserver' @@ -22,14 +24,14 @@ log = logging.getLogger(__name__) def __virtual__(): - if salt.utils.which('traffic_ctl') or salt.utils.which('traffic_line'): + if salt.utils.path.which('traffic_ctl') or salt.utils.path.which('traffic_line'): return __virtualname__ return (False, 'trafficserver execution module not loaded: ' 'neither traffic_ctl nor traffic_line was found.') -_TRAFFICLINE = salt.utils.which('traffic_line') -_TRAFFICCTL = salt.utils.which('traffic_ctl') +_TRAFFICLINE = salt.utils.path.which('traffic_line') +_TRAFFICCTL = salt.utils.path.which('traffic_ctl') def _traffic_ctl(*args): @@ -57,7 +59,7 @@ def _subprocess(cmd): try: proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) - ret = salt.utils.to_str(proc.communicate()[0]).strip() + ret = salt.utils.stringutils.to_str(proc.communicate()[0]).strip() retcode = proc.wait() if ret: @@ -213,7 +215,7 @@ def match_var(regex): salt '*' trafficserver.match_var regex ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'match_var\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'match_metric\' or \'match_config\' instead.' @@ -355,7 +357,7 @@ def read_var(*args): salt '*' trafficserver.read_var proxy.process.http.tcp_hit_count_stat ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'read_var\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'read_metric\' or \'read_config\' instead.' @@ -384,7 +386,7 @@ def set_var(variable, value): salt '*' trafficserver.set_var proxy.config.http.server_ports ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'set_var\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'set_config\' instead.' diff --git a/salt/modules/tuned.py b/salt/modules/tuned.py index 45d54b512b..1f4889cf50 100644 --- a/salt/modules/tuned.py +++ b/salt/modules/tuned.py @@ -13,7 +13,7 @@ from __future__ import absolute_import import re # Import Salt libs -import salt.utils +import salt.utils.path __func_alias__ = { 'list_': 'list', @@ -27,7 +27,7 @@ def __virtual__(): Check to see if tuned-adm binary is installed on the system ''' - tuned_adm = salt.utils.which('tuned-adm') + tuned_adm = salt.utils.path.which('tuned-adm') if not tuned_adm: return (False, 'The tuned execution module failed to load: the tuned-adm binary is not in the path.') return __virtualname__ diff --git a/salt/modules/twilio_notify.py b/salt/modules/twilio_notify.py index 520a7c0cc3..334ecd167e 100644 --- a/salt/modules/twilio_notify.py +++ b/salt/modules/twilio_notify.py @@ -21,8 +21,15 @@ import logging HAS_LIBS = False try: - from twilio.rest import TwilioRestClient - from twilio import TwilioRestException + import twilio + if twilio.__version__ > 5: + TWILIO_5 = False + from twilio.rest import Client as TwilioRestClient + from twilio.rest import TwilioException as TwilioRestException + else: + TWILIO_5 = True + from twilio.rest import TwilioRestClient + from twilio import TwilioRestException HAS_LIBS = True except ImportError: pass @@ -67,7 +74,10 @@ def send_sms(profile, body, to, from_): ret['message']['sid'] = None client = _get_twilio(profile) try: - message = client.sms.messages.create(body=body, to=to, from_=from_) + if TWILIO_5: + message = client.sms.messages.create(body=body, to=to, from_=from_) + else: + message = client.messages.create(body=body, to=to, from_=from_) except TwilioRestException as exc: ret['_error'] = {} ret['_error']['code'] = exc.code diff --git a/salt/modules/udev.py b/salt/modules/udev.py index 285f9f1fee..758c2bb32a 100644 --- a/salt/modules/udev.py +++ b/salt/modules/udev.py @@ -8,7 +8,7 @@ Manage and query udev info from __future__ import absolute_import import logging -import salt.utils +import salt.utils.path import salt.modules.cmdmod from salt.exceptions import CommandExecutionError @@ -23,7 +23,7 @@ def __virtual__(): ''' Only work when udevadm is installed. ''' - return salt.utils.which_bin(['udevadm']) is not None + return salt.utils.path.which_bin(['udevadm']) is not None def _parse_udevadm_info(udev_info): diff --git a/salt/modules/useradd.py b/salt/modules/useradd.py index 2f16336fd4..24900f3480 100644 --- a/salt/modules/useradd.py +++ b/salt/modules/useradd.py @@ -19,12 +19,14 @@ import logging import copy # Import salt libs -import salt.utils +import salt.utils # Can be removed when get_group_list is moved import salt.utils.files -import salt.utils.decorators as decorators -from salt.ext import six +import salt.utils.decorators.path +import salt.utils.locales from salt.exceptions import CommandExecutionError -from salt.utils import locales + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -60,10 +62,10 @@ def _get_gecos(name): # Assign empty strings for any unspecified trailing GECOS fields while len(gecos_field) < 4: gecos_field.append('') - return {'fullname': locales.sdecode(gecos_field[0]), - 'roomnumber': locales.sdecode(gecos_field[1]), - 'workphone': locales.sdecode(gecos_field[2]), - 'homephone': locales.sdecode(gecos_field[3])} + return {'fullname': salt.utils.locales.sdecode(gecos_field[0]), + 'roomnumber': salt.utils.locales.sdecode(gecos_field[1]), + 'workphone': salt.utils.locales.sdecode(gecos_field[2]), + 'homephone': salt.utils.locales.sdecode(gecos_field[3])} def _build_gecos(gecos_dict): @@ -595,7 +597,7 @@ def _format_info(data): 'homephone': gecos_field[3]} -@decorators.which('id') +@salt.utils.decorators.path.which('id') def primary_group(name): ''' Return the primary group of the named user diff --git a/salt/modules/uwsgi.py b/salt/modules/uwsgi.py index d5df5e5826..ec361cf69b 100644 --- a/salt/modules/uwsgi.py +++ b/salt/modules/uwsgi.py @@ -12,7 +12,7 @@ from __future__ import absolute_import import json # Import Salt libs -import salt.utils +import salt.utils.path def __virtual__(): @@ -20,7 +20,7 @@ def __virtual__(): Only load the module if uwsgi is installed ''' cmd = 'uwsgi' - if salt.utils.which(cmd): + if salt.utils.path.which(cmd): return cmd return (False, 'The uwsgi execution module failed to load: the uwsgi binary is not in the path.') diff --git a/salt/modules/varnish.py b/salt/modules/varnish.py index 75050657cc..cd325f2405 100644 --- a/salt/modules/varnish.py +++ b/salt/modules/varnish.py @@ -16,7 +16,7 @@ import logging import re # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -28,7 +28,7 @@ def __virtual__(): ''' Only load the module if varnish is installed ''' - if salt.utils.which('varnishd') and salt.utils.which('varnishadm'): + if salt.utils.path.which('varnishd') and salt.utils.path.which('varnishadm'): return __virtualname__ return (False, 'The varnish execution module failed to load: either varnishd or varnishadm is not in the path.') diff --git a/salt/modules/vboxmanage.py b/salt/modules/vboxmanage.py index e84761e75c..642914f070 100644 --- a/salt/modules/vboxmanage.py +++ b/salt/modules/vboxmanage.py @@ -22,12 +22,14 @@ import os.path import logging # pylint: disable=import-error,no-name-in-module -import salt.utils import salt.utils.files -from salt.ext.six import string_types +import salt.utils.path from salt.exceptions import CommandExecutionError # pylint: enable=import-error,no-name-in-module +# Import 3rd-party libs +from salt.ext import six + LOG = logging.getLogger(__name__) UUID_RE = re.compile('[^{0}]'.format('a-zA-Z0-9._-')) @@ -56,7 +58,7 @@ def vboxcmd(): salt '*' vboxmanage.vboxcmd ''' - return salt.utils.which('VBoxManage') + return salt.utils.path.which('VBoxManage') def list_ostypes(): @@ -255,7 +257,7 @@ def create(name, params += ' --name {0}'.format(name) if groups: - if isinstance(groups, string_types): + if isinstance(groups, six.string_types): groups = [groups] if isinstance(groups, list): params += ' --groups {0}'.format(','.join(groups)) @@ -370,7 +372,7 @@ def clonevm(name=None, params += ' --name {0}'.format(new_name) if groups: - if isinstance(groups, string_types): + if isinstance(groups, six.string_types): groups = [groups] if isinstance(groups, list): params += ' --groups {0}'.format(','.join(groups)) diff --git a/salt/modules/virt.py b/salt/modules/virt.py index e0c841a847..3354493736 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -24,7 +24,7 @@ from xml.etree import ElementTree import yaml import jinja2 import jinja2.exceptions -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO as _StringIO # pylint: disable=import-error from xml.dom import minidom try: @@ -37,6 +37,8 @@ except ImportError: # Import salt libs import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -196,14 +198,14 @@ def _libvirt_creds(): stdout = subprocess.Popen(g_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] - group = salt.utils.to_str(stdout).split('"')[1] + group = salt.utils.stringutils.to_str(stdout).split('"')[1] except IndexError: group = 'root' try: stdout = subprocess.Popen(u_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] - user = salt.utils.to_str(stdout).split('"')[1] + user = salt.utils.stringutils.to_str(stdout).split('"')[1] except IndexError: user = 'root' return {'user': user, 'group': group} @@ -395,7 +397,7 @@ def _qemu_image_create(vm_name, sfn = __salt__['cp.cache_file'](disk_image, saltenv) qcow2 = False - if salt.utils.which('qemu-img'): + if salt.utils.path.which('qemu-img'): res = __salt__['cmd.run']('qemu-img info {}'.format(sfn)) imageinfo = yaml.load(res) qcow2 = imageinfo['file format'] == 'qcow2' @@ -1086,7 +1088,7 @@ def get_disks(vm_): ['qemu-img', 'info', disks[dev]['file']], shell=False, stdout=subprocess.PIPE).communicate()[0] - qemu_output = salt.utils.to_str(stdout) + qemu_output = salt.utils.stringutils.to_str(stdout) snapshots = False columns = None lines = qemu_output.strip().split('\n') @@ -1527,7 +1529,7 @@ def migrate_non_shared(vm_, target, ssh=False): stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] - return salt.utils.to_str(stdout) + return salt.utils.stringutils.to_str(stdout) def migrate_non_shared_inc(vm_, target, ssh=False): @@ -1546,7 +1548,7 @@ def migrate_non_shared_inc(vm_, target, ssh=False): stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] - return salt.utils.to_str(stdout) + return salt.utils.stringutils.to_str(stdout) def migrate(vm_, target, ssh=False): @@ -1565,7 +1567,7 @@ def migrate(vm_, target, ssh=False): stdout = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] - return salt.utils.to_str(stdout) + return salt.utils.stringutils.to_str(stdout) def seed_non_shared_migrate(disks, force=False): diff --git a/salt/modules/virtualenv_mod.py b/salt/modules/virtualenv_mod.py index 77164a31a3..8c2849e2ef 100644 --- a/salt/modules/virtualenv_mod.py +++ b/salt/modules/virtualenv_mod.py @@ -14,8 +14,9 @@ import os import sys # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.utils.verify from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.ext.six import string_types @@ -29,7 +30,7 @@ KNOWN_BINARY_NAMES = frozenset([ log = logging.getLogger(__name__) __opts__ = { - 'venv_bin': salt.utils.which_bin(KNOWN_BINARY_NAMES) or 'virtualenv' + 'venv_bin': salt.utils.path.which_bin(KNOWN_BINARY_NAMES) or 'virtualenv' } __pillar__ = {} @@ -185,7 +186,7 @@ def create(path, cmd.append('--distribute') if python is not None and python.strip() != '': - if not salt.utils.which(python): + if not salt.utils.path.which(python): raise CommandExecutionError( 'Cannot find requested python ({0}).'.format(python) ) @@ -259,7 +260,7 @@ def create(path, return ret # Check if distribute and pip are already installed - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): venv_python = os.path.join(path, 'Scripts', 'python.exe') venv_pip = os.path.join(path, 'Scripts', 'pip.exe') venv_setuptools = os.path.join(path, 'Scripts', 'easy_install.exe') @@ -453,12 +454,12 @@ def get_resource_content(venv, def _install_script(source, cwd, python, user, saltenv='base', use_vt=False): - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): tmppath = salt.utils.files.mkstemp(dir=cwd) else: tmppath = __salt__['cp.cache_file'](source, saltenv) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): fn_ = __salt__['cp.cache_file'](source, saltenv) shutil.copyfile(fn_, tmppath) os.chmod(tmppath, 0o500) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 7744b3983d..326294c7e6 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -169,14 +169,15 @@ import inspect from functools import wraps # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils -import salt.utils.vmware +import salt.utils.args +import salt.utils.dictupdate as dictupdate import salt.utils.http -from salt.utils import dictupdate +import salt.utils.path +import salt.utils.vmware from salt.exceptions import CommandExecutionError, VMwareSaltError from salt.utils.decorators import depends, ignores_kwargs -from salt.utils import clean_kwargs # Import Third Party Libs try: @@ -185,7 +186,7 @@ try: except ImportError: HAS_PYVMOMI = False -esx_cli = salt.utils.which('esxcli') +esx_cli = salt.utils.path.which('esxcli') if esx_cli: HAS_ESX_CLI = True else: @@ -194,7 +195,7 @@ else: log = logging.getLogger(__name__) __virtualname__ = 'vsphere' -__proxyenabled__ = ['esxi'] +__proxyenabled__ = ['esxi', 'esxdatacenter'] def __virtual__(): @@ -226,6 +227,8 @@ def _get_proxy_connection_details(): proxytype = get_proxy_type() if proxytype == 'esxi': details = __salt__['esxi.get_details']() + elif proxytype == 'esxdatacenter': + details = __salt__['esxdatacenter.get_details']() else: raise CommandExecutionError('\'{0}\' proxy is not supported' ''.format(proxytype)) @@ -253,7 +256,7 @@ def supports_proxies(*proxy_types): raise CommandExecutionError( '\'{0}\' proxy is not supported by function {1}' ''.format(proxy_type, fn.__name__)) - return fn(*args, **clean_kwargs(**kwargs)) + return fn(*args, **salt.utils.args.clean_kwargs(**kwargs)) return __supports_proxies return _supports_proxies @@ -264,6 +267,8 @@ def gets_service_instance_via_proxy(fn): proxy details and passes the connection (vim.ServiceInstance) to the decorated function. + Supported proxies: esxi, esxdatacenter. + Notes: 1. The decorated function must have a ``service_instance`` parameter or a ``**kwarg`` type argument (name of argument is not important); @@ -334,7 +339,7 @@ def gets_service_instance_via_proxy(fn): *connection_details) kwargs['service_instance'] = local_service_instance try: - ret = fn(*args, **clean_kwargs(**kwargs)) + ret = fn(*args, **salt.utils.args.clean_kwargs(**kwargs)) # Disconnect if connected in the decorator if local_service_instance: salt.utils.vmware.disconnect(local_service_instance) @@ -349,7 +354,7 @@ def gets_service_instance_via_proxy(fn): @depends(HAS_PYVMOMI) -@supports_proxies('esxi') +@supports_proxies('esxi', 'esxdatacenter') def get_service_instance_via_proxy(service_instance=None): ''' Returns a service instance to the proxied endpoint (vCenter/ESXi host). @@ -368,7 +373,8 @@ def get_service_instance_via_proxy(service_instance=None): return salt.utils.vmware.get_service_instance(*connection_details) -@supports_proxies('esxi') +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxdatacenter') def disconnect(service_instance): ''' Disconnects from a vCenter or ESXi host @@ -1903,7 +1909,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, @depends(HAS_PYVMOMI) -@supports_proxies('esxi') +@supports_proxies('esxi', 'esxdatacenter') @gets_service_instance_via_proxy def test_vcenter_connection(service_instance=None): ''' @@ -4278,3 +4284,13 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, raise return ret + + +def _get_esxdatacenter_proxy_details(): + ''' + Returns the running esxdatacenter's proxy details + ''' + det = __salt__['esxdatacenter.get_details']() + return det.get('vcenter'), det.get('username'), det.get('password'), \ + det.get('protocol'), det.get('port'), det.get('mechanism'), \ + det.get('principal'), det.get('domain'), det.get('datacenter') diff --git a/salt/modules/win_autoruns.py b/salt/modules/win_autoruns.py index a31f0946f0..e630e7209a 100644 --- a/salt/modules/win_autoruns.py +++ b/salt/modules/win_autoruns.py @@ -4,12 +4,12 @@ Module for listing programs that automatically run on startup (very alpha...not tested on anything but my Win 7x64) ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform # Define a function alias in order not to shadow built-in's @@ -26,7 +26,7 @@ def __virtual__(): Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_autoruns: module only works on Windows systems") diff --git a/salt/modules/win_certutil.py b/salt/modules/win_certutil.py index de11351202..c45adb3f76 100644 --- a/salt/modules/win_certutil.py +++ b/salt/modules/win_certutil.py @@ -14,7 +14,7 @@ import re import logging # Import Salt Libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "certutil" @@ -24,7 +24,7 @@ def __virtual__(): ''' Only work on Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return False diff --git a/salt/modules/win_dacl.py b/salt/modules/win_dacl.py index a72da3be85..e4421bd5e5 100644 --- a/salt/modules/win_dacl.py +++ b/salt/modules/win_dacl.py @@ -16,8 +16,8 @@ import re # may also need to add the ability to take ownership of an object to set # permissions if the minion is running as a user and not LOCALSYSTEM -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError from salt.ext.six import string_types from salt.ext.six.moves import range # pylint: disable=redefined-builtin @@ -342,7 +342,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows() and HAS_WINDOWS_MODULES: + if salt.utils.platform.is_windows() and HAS_WINDOWS_MODULES: return __virtualname__ return (False, "Module win_dacl: module only works on Windows systems") diff --git a/salt/modules/win_disk.py b/salt/modules/win_disk.py index 52c95914bb..278a3de90a 100644 --- a/salt/modules/win_disk.py +++ b/salt/modules/win_disk.py @@ -6,13 +6,15 @@ Module for gathering disk information on Windows ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import ctypes import string -# Import salt libs -import salt.ext.six as six -import salt.utils +# Import Salt libs +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six try: import win32api @@ -33,7 +35,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_disk: module only works on Windows systems") diff --git a/salt/modules/win_dism.py b/salt/modules/win_dism.py index ef09af2e0a..20fb33d1b8 100644 --- a/salt/modules/win_dism.py +++ b/salt/modules/win_dism.py @@ -7,12 +7,13 @@ Windows 10. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import re import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform +import salt.utils.versions log = logging.getLogger(__name__) __virtualname__ = "dism" @@ -22,7 +23,7 @@ def __virtual__(): ''' Only work on Windows ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, "Only available on Windows systems" return __virtualname__ @@ -73,7 +74,7 @@ def add_capability(capability, salt '*' dism.add_capability Tools.Graphics.DirectX~~~~0.0.1.0 ''' - if salt.utils.version_cmp(__grains__['osversion'], '10') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`install_capability` is not available on this version of Windows: ' '{0}'.format(__grains__['osversion'])) @@ -118,7 +119,7 @@ def remove_capability(capability, image=None, restart=False): salt '*' dism.remove_capability Tools.Graphics.DirectX~~~~0.0.1.0 ''' - if salt.utils.version_cmp(__grains__['osversion'], '10') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`uninstall_capability` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) @@ -157,7 +158,7 @@ def get_capabilities(image=None): salt '*' dism.get_capabilities ''' - if salt.utils.version_cmp(__grains__['osversion'], '10') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`installed_capabilities` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) @@ -197,7 +198,7 @@ def installed_capabilities(image=None): salt '*' dism.installed_capabilities ''' - if salt.utils.version_cmp(__grains__['osversion'], '10') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`installed_capabilities` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) @@ -226,7 +227,7 @@ def available_capabilities(image=None): salt '*' dism.installed_capabilities ''' - if salt.utils.version_cmp(__grains__['osversion'], '10') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '10') == -1: raise NotImplementedError( '`installed_capabilities` is not available on this version of ' 'Windows: {0}'.format(__grains__['osversion'])) diff --git a/salt/modules/win_dns_client.py b/salt/modules/win_dns_client.py index 29e6997656..e030a2ce6f 100644 --- a/salt/modules/win_dns_client.py +++ b/salt/modules/win_dns_client.py @@ -4,11 +4,12 @@ Module for configuring DNS Client on Windows systems ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform + try: import wmi except ImportError: @@ -21,7 +22,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return 'win_dns_client' return (False, "Module win_dns_client: module only works on Windows systems") diff --git a/salt/modules/win_dsc.py b/salt/modules/win_dsc.py index 90aef68630..56a2b28052 100644 --- a/salt/modules/win_dsc.py +++ b/salt/modules/win_dsc.py @@ -17,13 +17,14 @@ the Minion. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import json import os -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform +import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging @@ -38,7 +39,7 @@ def __virtual__(): Set the system module of the kernel is Windows ''' # Verify Windows - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): log.debug('Module DSC: Only available on Windows systems') return False, 'Module DSC: Only available on Windows systems' @@ -49,7 +50,7 @@ def __virtual__(): return False, 'Module DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater - if salt.utils.compare_versions(powershell_info['version'], '<', '5.0'): + if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('Module DSC: Requires PowerShell 5 or later') return False, 'Module DSC: Requires PowerShell 5 or later' diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index 12d9319031..26fa0a3c1a 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -44,6 +44,7 @@ from salt.exceptions import CommandExecutionError, SaltInvocationError # Import salt libs import salt.utils import salt.utils.path +import salt.utils.platform from salt.modules.file import (check_hash, # pylint: disable=W0611 directory_exists, get_managed, check_managed, check_managed_changes, source_list, @@ -65,7 +66,7 @@ from salt.utils import namespaced_function as _namespaced_function HAS_WINDOWS_MODULES = False try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): import win32api import win32file import win32con @@ -83,7 +84,7 @@ except ImportError: HAS_WIN_DACL = False try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): import salt.utils.win_dacl HAS_WIN_DACL = True except ImportError: @@ -99,7 +100,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if HAS_WINDOWS_MODULES: # Load functions from file.py global get_managed, manage_file @@ -1687,9 +1688,6 @@ def check_perms(path, perms = changes[user]['perms'] try: - log.debug('*' * 68) - log.debug(perms) - log.debug('*' * 68) salt.utils.win_dacl.set_permissions( path, user, perms, 'deny', applies_to) ret['changes']['deny_perms'][user] = changes[user] diff --git a/salt/modules/win_firewall.py b/salt/modules/win_firewall.py index e59bde3148..927ec231c0 100644 --- a/salt/modules/win_firewall.py +++ b/salt/modules/win_firewall.py @@ -4,11 +4,11 @@ Module for configuring Windows Firewall using ``netsh`` ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import re -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name @@ -19,7 +19,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, "Module win_firewall: module only available on Windows" return __virtualname__ diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py index d466380d70..4434615244 100644 --- a/salt/modules/win_groupadd.py +++ b/salt/modules/win_groupadd.py @@ -10,8 +10,8 @@ Manage groups on Windows ''' from __future__ import absolute_import -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform try: @@ -30,7 +30,7 @@ def __virtual__(): ''' Set the group module if the kernel is Windows ''' - if salt.utils.is_windows() and HAS_DEPENDENCIES: + if salt.utils.platform.is_windows() and HAS_DEPENDENCIES: return __virtualname__ return (False, "Module win_groupadd: module only works on Windows systems") diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index d0fe647b03..f206c91116 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -18,10 +18,10 @@ import os import decimal # Import salt libs +import salt.utils.platform from salt.ext.six.moves import range from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.ext import six -import salt.utils log = logging.getLogger(__name__) @@ -38,7 +38,7 @@ def __virtual__(): Load only on Windows Requires PowerShell and the WebAdministration module ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Only available on Windows systems' powershell_info = __salt__['cmd.shell_info']('powershell', True) @@ -856,7 +856,7 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, new_cert_bindings = list_cert_bindings(site) - if binding_info not in new_cert_bindings(site): + if binding_info not in new_cert_bindings: log.error('Binding not present: {0}'.format(binding_info)) return False diff --git a/salt/modules/win_ip.py b/salt/modules/win_ip.py index dc7416b78e..7f818e56e5 100644 --- a/salt/modules/win_ip.py +++ b/salt/modules/win_ip.py @@ -4,13 +4,13 @@ The networking module for Windows based systems ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import time -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.network +import salt.utils.platform import salt.utils.validate.net from salt.exceptions import ( CommandExecutionError, @@ -29,7 +29,7 @@ def __virtual__(): ''' Confine this module to Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_ip: module only works on Windows systems") diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 514ac6f003..8ff47b9014 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -38,22 +38,20 @@ Current known limitations - struct - salt.modules.reg ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import os import logging import re -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files -from salt.exceptions import CommandExecutionError -from salt.exceptions import SaltInvocationError +import salt.utils.platform import salt.utils.dictupdate as dictupdate +from salt.exceptions import CommandExecutionError, SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range log = logging.getLogger(__name__) @@ -2650,7 +2648,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows() and HAS_WINDOWS_MODULES: + if salt.utils.platform.is_windows() and HAS_WINDOWS_MODULES: return __virtualname__ return False @@ -3983,78 +3981,77 @@ def _write_regpol_data(data_to_write, gpt_extension_guid: admx registry extension guid for the class ''' try: - if data_to_write: - reg_pol_header = u'\u5250\u6765\x01\x00' - if not os.path.exists(policy_file_path): - ret = __salt__['file.makedirs'](policy_file_path) - with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file: - if not data_to_write.startswith(reg_pol_header): - pol_file.write(reg_pol_header.encode('utf-16-le')) - pol_file.write(data_to_write.encode('utf-16-le')) - try: - gpt_ini_data = '' - if os.path.exists(gpt_ini_path): - with salt.utils.files.fopen(gpt_ini_path, 'rb') as gpt_file: - gpt_ini_data = gpt_file.read() - if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): - gpt_ini_data = '[General]\r\n' + gpt_ini_data - if _regexSearchRegPolData(r'{0}='.format(re.escape(gpt_extension)), gpt_ini_data): - # ensure the line contains the ADM guid - gpt_ext_loc = re.search(r'^{0}=.*\r\n'.format(re.escape(gpt_extension)), - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - gpt_ext_str = gpt_ini_data[gpt_ext_loc.start():gpt_ext_loc.end()] - if not _regexSearchRegPolData(r'{0}'.format(re.escape(gpt_extension_guid)), - gpt_ext_str): - gpt_ext_str = gpt_ext_str.split('=') - gpt_ext_str[1] = gpt_extension_guid + gpt_ext_str[1] - gpt_ext_str = '='.join(gpt_ext_str) - gpt_ini_data = gpt_ini_data[0:gpt_ext_loc.start()] + gpt_ext_str + gpt_ini_data[gpt_ext_loc.end():] - else: - general_location = re.search(r'^\[General\]\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[general_location.start():general_location.end()], - gpt_extension, gpt_extension_guid, - gpt_ini_data[general_location.end():]) - # https://technet.microsoft.com/en-us/library/cc978247.aspx - if _regexSearchRegPolData(r'Version=', gpt_ini_data): - version_loc = re.search(r'^Version=.*\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - version_str = gpt_ini_data[version_loc.start():version_loc.end()] - version_str = version_str.split('=') - version_nums = struct.unpack('>2H', struct.pack('>I', int(version_str[1]))) - if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): - version_nums = (version_nums[0], version_nums[1] + 1) - elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): - version_nums = (version_nums[0] + 1, version_nums[1]) - version_num = struct.unpack('>I', struct.pack('>2H', *version_nums))[0] - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[0:version_loc.start()], - 'Version', version_num, - gpt_ini_data[version_loc.end():]) - else: - general_location = re.search(r'^\[General\]\r\n', - gpt_ini_data, - re.IGNORECASE | re.MULTILINE) - if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): - version_nums = (0, 1) - elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): - version_nums = (1, 0) - gpt_ini_data = "{0}{1}={2}\r\n{3}".format( - gpt_ini_data[general_location.start():general_location.end()], - 'Version', - int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), - gpt_ini_data[general_location.end():]) - if gpt_ini_data: - with salt.utils.files.fopen(gpt_ini_path, 'wb') as gpt_file: - gpt_file.write(gpt_ini_data) - except Exception as e: - msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( - gpt_ini_path, e) - raise CommandExecutionError(msg) + reg_pol_header = u'\u5250\u6765\x01\x00' + if not os.path.exists(policy_file_path): + ret = __salt__['file.makedirs'](policy_file_path) + with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file: + if not data_to_write.startswith(reg_pol_header): + pol_file.write(reg_pol_header.encode('utf-16-le')) + pol_file.write(data_to_write.encode('utf-16-le')) + try: + gpt_ini_data = '' + if os.path.exists(gpt_ini_path): + with salt.utils.files.fopen(gpt_ini_path, 'rb') as gpt_file: + gpt_ini_data = gpt_file.read() + if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): + gpt_ini_data = '[General]\r\n' + gpt_ini_data + if _regexSearchRegPolData(r'{0}='.format(re.escape(gpt_extension)), gpt_ini_data): + # ensure the line contains the ADM guid + gpt_ext_loc = re.search(r'^{0}=.*\r\n'.format(re.escape(gpt_extension)), + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + gpt_ext_str = gpt_ini_data[gpt_ext_loc.start():gpt_ext_loc.end()] + if not _regexSearchRegPolData(r'{0}'.format(re.escape(gpt_extension_guid)), + gpt_ext_str): + gpt_ext_str = gpt_ext_str.split('=') + gpt_ext_str[1] = gpt_extension_guid + gpt_ext_str[1] + gpt_ext_str = '='.join(gpt_ext_str) + gpt_ini_data = gpt_ini_data[0:gpt_ext_loc.start()] + gpt_ext_str + gpt_ini_data[gpt_ext_loc.end():] + else: + general_location = re.search(r'^\[General\]\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[general_location.start():general_location.end()], + gpt_extension, gpt_extension_guid, + gpt_ini_data[general_location.end():]) + # https://technet.microsoft.com/en-us/library/cc978247.aspx + if _regexSearchRegPolData(r'Version=', gpt_ini_data): + version_loc = re.search(r'^Version=.*\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + version_str = gpt_ini_data[version_loc.start():version_loc.end()] + version_str = version_str.split('=') + version_nums = struct.unpack('>2H', struct.pack('>I', int(version_str[1]))) + if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): + version_nums = (version_nums[0], version_nums[1] + 1) + elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): + version_nums = (version_nums[0] + 1, version_nums[1]) + version_num = struct.unpack('>I', struct.pack('>2H', *version_nums))[0] + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[0:version_loc.start()], + 'Version', version_num, + gpt_ini_data[version_loc.end():]) + else: + general_location = re.search(r'^\[General\]\r\n', + gpt_ini_data, + re.IGNORECASE | re.MULTILINE) + if gpt_extension.lower() == 'gPCMachineExtensionNames'.lower(): + version_nums = (0, 1) + elif gpt_extension.lower() == 'gPCUserExtensionNames'.lower(): + version_nums = (1, 0) + gpt_ini_data = "{0}{1}={2}\r\n{3}".format( + gpt_ini_data[general_location.start():general_location.end()], + 'Version', + int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), + gpt_ini_data[general_location.end():]) + if gpt_ini_data: + with salt.utils.files.fopen(gpt_ini_path, 'wb') as gpt_file: + gpt_file.write(gpt_ini_data) + except Exception as e: + msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( + gpt_ini_path, e) + raise CommandExecutionError(msg) except Exception as e: msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format(policy_file_path, e) raise CommandExecutionError(msg) diff --git a/salt/modules/win_license.py b/salt/modules/win_license.py index 10598424fd..ce990b7959 100644 --- a/salt/modules/win_license.py +++ b/salt/modules/win_license.py @@ -13,7 +13,7 @@ import re import logging # Import Salt Libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'license' @@ -23,7 +23,7 @@ def __virtual__(): ''' Only work on Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return False diff --git a/salt/modules/win_network.py b/salt/modules/win_network.py index 3ee8fc8c5d..d01ea6d3e8 100644 --- a/salt/modules/win_network.py +++ b/salt/modules/win_network.py @@ -4,18 +4,16 @@ Module for gathering and managing network information ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import re - -# Import salt libs -import salt.utils import hashlib import datetime import socket -# Import salt libs +# Import Salt libs import salt.utils import salt.utils.network +import salt.utils.platform import salt.utils.validate.net from salt.modules.network import (wol, get_hostname, interface, interface_ip, subnets6, ip_in_subnet, convert_cidr, @@ -44,7 +42,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, "Module win_network: Only available on Windows" if not HAS_DEPENDENCIES: diff --git a/salt/modules/win_ntp.py b/salt/modules/win_ntp.py index 11051b9a76..3a3109aee3 100644 --- a/salt/modules/win_ntp.py +++ b/salt/modules/win_ntp.py @@ -6,11 +6,11 @@ Management of NTP servers on Windows ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) @@ -22,7 +22,7 @@ def __virtual__(): ''' This only supports Windows ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return (False, "Module win_system: module only works on Windows systems") return __virtualname__ diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index 3b446f7cea..27bba5f719 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -8,13 +8,16 @@ http://support.microsoft.com/kb/104011 ''' from __future__ import absolute_import -# Python Libs +# Import Python libs import logging -import re import os -from salt.ext.six.moves import map +import re -# Third party libs +# Import Salt libs +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext.six.moves import map try: from win32con import HWND_BROADCAST, WM_SETTINGCHANGE from win32api import SendMessage @@ -22,9 +25,6 @@ try: except ImportError: HAS_WIN32 = False -# Import salt libs -import salt.utils - # Settings log = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def __virtual__(): ''' Load only on Windows ''' - if salt.utils.is_windows() and HAS_WIN32: + if salt.utils.platform.is_windows() and HAS_WIN32: return 'win_path' return (False, "Module win_path: module only works on Windows systems") diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 02133c0041..156681f8b6 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -42,7 +42,7 @@ import time from functools import cmp_to_key # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error,no-name-in-module from salt.ext.six.moves.urllib.parse import urlparse as _urlparse @@ -50,9 +50,12 @@ from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import (CommandExecutionError, SaltInvocationError, SaltRenderError) -import salt.utils +import salt.utils # Can be removed once is_true, get_hash, compare_dicts are moved +import salt.utils.args import salt.utils.files import salt.utils.pkg +import salt.utils.platform +import salt.utils.versions import salt.syspaths import salt.payload from salt.exceptions import MinionError @@ -68,7 +71,7 @@ def __virtual__(): ''' Set the virtual pkg module if the os is Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return (False, "Module win_pkg: module only works on Windows systems") @@ -147,7 +150,7 @@ def latest_version(*names, **kwargs): # check, whether latest available version # is newer than latest installed version - if salt.utils.compare_versions(ver1=str(latest_available), + if salt.utils.versions.compare(ver1=str(latest_available), oper='>', ver2=str(latest_installed)): log.debug('Upgrade of {0} from {1} to {2} ' @@ -946,27 +949,63 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): packages listed under ``pkgs`` will be installed via a single command. + You can specify a version by passing the item as a dict: + + CLI Example: + + .. code-block:: bash + + # will install the latest version of foo and bar + salt '*' pkg.install pkgs='["foo", "bar"]' + + # will install the latest version of foo and version 1.2.3 of bar + salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]' + Kwargs: version (str): - The specific version to install. If omitted, the latest version - will be installed. If passed with multiple install, the version - will apply to all packages. Recommended for single installation - only. + The specific version to install. If omitted, the latest version will + be installed. Recommend for use when installing a single package. + + If passed with a list of packages in the ``pkgs`` parameter, the + version will be ignored. + + CLI Example: + + .. code-block:: bash + + # Version is ignored + salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3 + + If passed with a comma seperated list in the ``name`` parameter, the + version will apply to all packages in the list. + + CLI Example: + + .. code-block:: bash + + # Version 1.2.3 will apply to packages foo and bar + salt '*' pkg.install foo,bar version=1.2.3 cache_file (str): - A single file to copy down for use with the installer. Copied to - the same location as the installer. Use this over ``cache_dir`` if - there are many files in the directory and you only need a specific - file and don't want to cache additional files that may reside in - the installer directory. Only applies to files on ``salt://`` + A single file to copy down for use with the installer. Copied to the + same location as the installer. Use this over ``cache_dir`` if there + are many files in the directory and you only need a specific file + and don't want to cache additional files that may reside in the + installer directory. Only applies to files on ``salt://`` cache_dir (bool): True will copy the contents of the installer directory. This is - useful for installations that are not a single file. Only applies - to directories on ``salt://`` + useful for installations that are not a single file. Only applies to + directories on ``salt://`` - saltenv (str): Salt environment. Default 'base' + extra_install_flags (str): + Additional install flags that will be appended to the + ``install_flags`` defined in the software definition file. Only + applies when single package is passed. + + saltenv (str): + Salt environment. Default 'base' report_reboot_exit_codes (bool): If the installer exits with a recognized exit code indicating that @@ -1062,13 +1101,22 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # "sources" argument pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] + if len(pkg_params) > 1: + if kwargs.get('extra_install_flags') is not None: + log.warning('\'extra_install_flags\' argument will be ignored for ' + 'multiple package targets') + + # Windows expects an Options dictionary containing 'version' + for pkg in pkg_params: + pkg_params[pkg] = {'version': pkg_params[pkg]} + if pkg_params is None or len(pkg_params) == 0: log.error('No package definition found') return {} if not pkgs and len(pkg_params) == 1: - # Only use the 'version' param if 'name' was not specified as a - # comma-separated list + # Only use the 'version' param if a single item was passed to the 'name' + # parameter pkg_params = { name: { 'version': kwargs.get('version'), @@ -1237,6 +1285,18 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): #Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) + # Build cmd and arguments + # cmd and arguments must be seperated for use with the task scheduler + if use_msiexec: + cmd = msiexec + arguments = ['/i', cached_pkg] + if pkginfo['version_num'].get('allusers', True): + arguments.append('ALLUSERS="1"') + arguments.extend(salt.utils.shlex_split(install_flags)) + else: + cmd = cached_pkg + arguments = salt.utils.shlex_split(install_flags) + # Install the software # Check Use Scheduler Option if pkginfo[version_num].get('use_scheduler', False): @@ -1247,10 +1307,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): arguments = ['/i', cached_pkg] if pkginfo['version_num'].get('allusers', True): arguments.append('ALLUSERS="1"') - arguments.extend(salt.utils.shlex_split(install_flags)) + arguments.extend(salt.utils.args.shlex_split(install_flags)) else: cmd = cached_pkg - arguments = salt.utils.shlex_split(install_flags) + arguments = salt.utils.args.shlex_split(install_flags) # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', @@ -1265,21 +1325,43 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): start_time='01:00', ac_only=False, stop_if_on_batteries=False) + # Run Scheduled Task - if not __salt__['task.run_wait'](name='update-salt-software'): - log.error('Failed to install {0}'.format(pkg_name)) - log.error('Scheduled Task failed to run') - ret[pkg_name] = {'install status': 'failed'} - else: - # Build the install command - cmd = [] - if use_msiexec: - cmd.extend([msiexec, '/i', cached_pkg]) - if pkginfo[version_num].get('allusers', True): - cmd.append('ALLUSERS="1"') + # Special handling for installing salt + if pkg_name in ['salt-minion', 'salt-minion-py3']: + ret[pkg_name] = {'install status': 'task started'} + if not __salt__['task.run'](name='update-salt-software'): + log.error('Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + else: + + # Make sure the task is running, try for 5 secs + from time import time + t_end = time() + 5 + while time() < t_end: + task_running = __salt__['task.status']( + 'update-salt-software') == 'Running' + if task_running: + break + + if not task_running: + log.error( + 'Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + + # All other packages run with task scheduler else: - cmd.append(cached_pkg) - cmd.extend(salt.utils.shlex_split(install_flags)) + if not __salt__['task.run_wait'](name='update-salt-software'): + log.error('Failed to install {0}'.format(pkg_name)) + log.error('Scheduled Task failed to run') + ret[pkg_name] = {'install status': 'failed'} + else: + + # Combine cmd and arguments + cmd = [cmd].extend(arguments) + # Launch the command result = __salt__['cmd.run_all'](cmd, cache_path, @@ -1550,10 +1632,10 @@ def remove(name=None, pkgs=None, version=None, **kwargs): if use_msiexec: cmd = msiexec arguments = ['/x'] - arguments.extend(salt.utils.shlex_split(uninstall_flags)) + arguments.extend(salt.utils.args.shlex_split(uninstall_flags)) else: cmd = expanded_cached_pkg - arguments = salt.utils.shlex_split(uninstall_flags) + arguments = salt.utils.args.shlex_split(uninstall_flags) # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', @@ -1580,7 +1662,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs): cmd.extend([msiexec, '/x', expanded_cached_pkg]) else: cmd.append(expanded_cached_pkg) - cmd.extend(salt.utils.shlex_split(uninstall_flags)) + cmd.extend(salt.utils.args.shlex_split(uninstall_flags)) # Launch the command result = __salt__['cmd.run_all']( cmd, @@ -1768,4 +1850,4 @@ def compare_versions(ver1='', oper='==', ver2=''): salt '*' pkg.compare_versions 1.2 >= 1.3 ''' - return salt.utils.compare_versions(ver1, oper, ver2) + return salt.utils.versions.compare(ver1, oper, ver2) diff --git a/salt/modules/win_pki.py b/salt/modules/win_pki.py index 6000bd8bdd..ef277f2baf 100644 --- a/salt/modules/win_pki.py +++ b/salt/modules/win_pki.py @@ -1,22 +1,31 @@ # -*- coding: utf-8 -*- ''' -Microsoft certificate management via the Pki PowerShell module. +Microsoft certificate management via the PKI Client PowerShell module. +https://technet.microsoft.com/en-us/itpro/powershell/windows/pkiclient/pkiclient + +The PKI Client PowerShell module is only available on Windows 8+ and Windows +Server 2012+. +https://technet.microsoft.com/en-us/library/hh848636(v=wps.620).aspx :platform: Windows +:depends: + - PowerShell 4 + - PKI Client Module (Windows 8+ / Windows Server 2012+) + .. versionadded:: 2016.11.0 ''' -# Import python libs +# Import Python libs from __future__ import absolute_import +import ast import json import logging +import os # Import salt libs -from salt.exceptions import SaltInvocationError -import ast -import os -import salt.utils +import salt.utils.platform import salt.utils.powershell +from salt.exceptions import SaltInvocationError _DEFAULT_CONTEXT = 'LocalMachine' _DEFAULT_FORMAT = 'cer' @@ -29,11 +38,17 @@ __virtualname__ = 'win_pki' def __virtual__(): ''' - Only works on Windows systems with the PKI PowerShell module installed. + Requires Windows + Requires Windows 8+ / Windows Server 2012+ + Requires PowerShell + Requires PKI Client PowerShell module installed. ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Only available on Windows Systems' + if salt.utils.version_cmp(__grains__['osversion'], '6.2.9200') == -1: + return False, 'Only available on Windows 8+ / Windows Server 2012 +' + if not __salt__['cmd.shell_info']('powershell')['installed']: return False, 'Powershell not available' diff --git a/salt/modules/win_psget.py b/salt/modules/win_psget.py index c310349efd..a3d020795c 100644 --- a/salt/modules/win_psget.py +++ b/salt/modules/win_psget.py @@ -10,13 +10,14 @@ Support for PowerShell ''' from __future__ import absolute_import, unicode_literals -# Import python libs +# Import Python libs import copy import logging import json -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform +import salt.utils.versions from salt.exceptions import CommandExecutionError # Set up logging @@ -31,7 +32,7 @@ def __virtual__(): Set the system module of the kernel is Windows ''' # Verify Windows - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): log.debug('Module PSGet: Only available on Windows systems') return False, 'Module PSGet: Only available on Windows systems' @@ -42,7 +43,7 @@ def __virtual__(): return False, 'Module PSGet: Requires PowerShell' # Verify PowerShell 5.0 or greater - if salt.utils.compare_versions(powershell_info['version'], '<', '5.0'): + if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('Module PSGet: Requires PowerShell 5 or newer') return False, 'Module PSGet: Requires PowerShell 5 or newer.' diff --git a/salt/modules/win_repo.py b/salt/modules/win_repo.py index bf2a05d650..975ba40496 100644 --- a/salt/modules/win_repo.py +++ b/salt/modules/win_repo.py @@ -16,6 +16,8 @@ import os # Import salt libs import salt.output import salt.utils +import salt.utils.path +import salt.utils.platform import salt.loader import salt.template from salt.exceptions import CommandExecutionError, SaltRenderError @@ -45,7 +47,7 @@ def __virtual__(): ''' Set the winrepo module if the OS is Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): global _genrepo, _update_git_repos _genrepo = salt.utils.namespaced_function(_genrepo, globals()) _update_git_repos = \ @@ -113,7 +115,7 @@ def update_git_repos(clean=False): salt-call winrepo.update_git_repos ''' - if not salt.utils.which('git'): + if not salt.utils.path.which('git'): raise CommandExecutionError( 'Git for Windows is not installed, or not configured to be ' 'accessible from the Command Prompt' diff --git a/salt/modules/win_servermanager.py b/salt/modules/win_servermanager.py index 53d1333d2e..46c3b1d953 100644 --- a/salt/modules/win_servermanager.py +++ b/salt/modules/win_servermanager.py @@ -18,9 +18,10 @@ try: except ImportError: from pipes import quote as _cmd_quote -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform import salt.utils.powershell +import salt.utils.versions log = logging.getLogger(__name__) @@ -31,10 +32,10 @@ def __virtual__(): ''' Load only on windows with servermanager module ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False - if salt.utils.version_cmp(__grains__['osversion'], '6.1.7600') == -1: + if salt.utils.versions.version_cmp(__grains__['osversion'], '6.1.7600') == -1: return False, 'Failed to load win_servermanager module: ' \ 'Requires Remote Server Administration Tools which ' \ 'is only available on Windows 2008 R2 and later.' @@ -193,7 +194,7 @@ def install(feature, recurse=False, restart=False, source=None, exclude=None): # with old behavior. command = 'Add-WindowsFeature' management_tools = '' - if salt.utils.version_cmp(__grains__['osversion'], '6.2') >= 0: + if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2') >= 0: command = 'Install-WindowsFeature' management_tools = '-IncludeManagementTools' @@ -327,7 +328,7 @@ def remove(feature, remove_payload=False, restart=False): command = 'Remove-WindowsFeature' management_tools = '' _remove_payload = '' - if salt.utils.version_cmp(__grains__['osversion'], '6.2') >= 0: + if salt.utils.versions.version_cmp(__grains__['osversion'], '6.2') >= 0: command = 'Uninstall-WindowsFeature' management_tools = '-IncludeManagementTools' diff --git a/salt/modules/win_service.py b/salt/modules/win_service.py index 2c0e9954b4..8c017c89e7 100644 --- a/salt/modules/win_service.py +++ b/salt/modules/win_service.py @@ -5,13 +5,15 @@ Windows Service module. .. versionchanged:: 2016.11.0 - Rewritten to use PyWin32 ''' -# Import python libs +# Import Python libs from __future__ import absolute_import -import salt.utils -import time -import logging import fnmatch +import logging import re +import time + +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError # Import 3rd party libs @@ -93,7 +95,7 @@ def __virtual__(): ''' Only works on Windows systems with PyWin32 installed ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Module win_service: module only works on Windows.' if not HAS_WIN32_MODS: diff --git a/salt/modules/win_shadow.py b/salt/modules/win_shadow.py index dfbaa691c0..c6318bfcbe 100644 --- a/salt/modules/win_shadow.py +++ b/salt/modules/win_shadow.py @@ -8,9 +8,11 @@ Manage the shadow file *'shadow.info' is not available*), see :ref:`here `. ''' +# Import Python libs from __future__ import absolute_import -import salt.utils +# Import Salt libs +import salt.utils.platform # Define the module's virtual name __virtualname__ = 'shadow' @@ -20,7 +22,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return (False, 'Module win_shadow: module only works on Windows systems.') diff --git a/salt/modules/win_smtp_server.py b/salt/modules/win_smtp_server.py index 4ccdf7e7bf..399d7f085f 100644 --- a/salt/modules/win_smtp_server.py +++ b/salt/modules/win_smtp_server.py @@ -23,12 +23,13 @@ from __future__ import absolute_import import logging import re -# Import 3rd-party libs -import salt.ext.six as six - -# Import salt libs +# Import Salt libs from salt.exceptions import SaltInvocationError -import salt.utils +import salt.utils.args +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six try: import wmi @@ -49,7 +50,7 @@ def __virtual__(): ''' Only works on Windows systems. ''' - if salt.utils.is_windows() and _HAS_MODULE_DEPENDENCIES: + if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False @@ -102,7 +103,7 @@ def _normalize_server_settings(**settings): Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() - settings = salt.utils.clean_kwargs(**settings) + settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): diff --git a/salt/modules/win_snmp.py b/salt/modules/win_snmp.py index 15653a7510..52ce41e4ac 100644 --- a/salt/modules/win_snmp.py +++ b/salt/modules/win_snmp.py @@ -3,14 +3,13 @@ Module for managing SNMP service settings on Windows servers. The Windows feature 'SNMP-Service' must be installed. ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -# Import salt libs +# Import Salt libs +import salt.utils.platform from salt.exceptions import SaltInvocationError -import salt.utils # Import 3rd party libs from salt.ext import six @@ -42,7 +41,7 @@ def __virtual__(): ''' Only works on Windows systems. ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Module win_snmp: Requires Windows' if not __salt__['reg.read_value'](_HKEY, _SNMP_KEY)['success']: @@ -303,6 +302,11 @@ def get_community_names(): # Windows SNMP service GUI. if isinstance(current_values, list): for current_value in current_values: + + # Ignore error values + if not isinstance(current_value, dict): + continue + permissions = str() for permission_name in _PERMISSION_TYPES: if current_value['vdata'] == _PERMISSION_TYPES[permission_name]: diff --git a/salt/modules/win_status.py b/salt/modules/win_status.py index 7197152a06..9accd79ce8 100644 --- a/salt/modules/win_status.py +++ b/salt/modules/win_status.py @@ -18,8 +18,9 @@ import subprocess log = logging.getLogger(__name__) # Import Salt Libs -import salt.utils import salt.utils.event +import salt.utils.platform +import salt.utils.stringutils from salt.utils.network import host_to_ips as _host_to_ips from salt.utils import namespaced_function as _namespaced_function @@ -31,7 +32,7 @@ import copy # Import 3rd Party Libs try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): import wmi import salt.utils.winapi HAS_WMI = True @@ -41,7 +42,7 @@ except ImportError: HAS_WMI = False HAS_PSUTIL = False -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): import psutil HAS_PSUTIL = True @@ -53,7 +54,7 @@ def __virtual__(): ''' Only works on Windows systems with WMI and WinAPI ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'win_status.py: Requires Windows' if not HAS_WMI: @@ -215,8 +216,8 @@ def _get_process_info(proc): ''' Return process information ''' - cmd = salt.utils.to_str(proc.CommandLine or '') - name = salt.utils.to_str(proc.Name) + cmd = salt.utils.stringutils.to_str(proc.CommandLine or '') + name = salt.utils.stringutils.to_str(proc.Name) info = dict( cmd=cmd, name=name, @@ -230,13 +231,13 @@ def _get_process_owner(process): domain, error_code, user = None, None, None try: domain, error_code, user = process.GetOwner() - owner['user'] = salt.utils.to_str(user) - owner['user_domain'] = salt.utils.to_str(domain) + owner['user'] = salt.utils.stringutils.to_str(user) + owner['user_domain'] = salt.utils.stringutils.to_str(domain) except Exception as exc: pass if not error_code and all((user, domain)): - owner['user'] = salt.utils.to_str(user) - owner['user_domain'] = salt.utils.to_str(domain) + owner['user'] = salt.utils.stringutils.to_str(user) + owner['user_domain'] = salt.utils.stringutils.to_str(domain) elif process.ProcessId in [0, 4] and error_code == 2: # Access Denied for System Idle Process and System owner['user'] = 'SYSTEM' @@ -303,7 +304,7 @@ def master(master=None, connected=True): log.error('Failed netstat') raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue diff --git a/salt/modules/win_system.py b/salt/modules/win_system.py index a92f0923e5..15652d8493 100644 --- a/salt/modules/win_system.py +++ b/salt/modules/win_system.py @@ -14,13 +14,20 @@ Support for reboot, shutdown, etc ''' from __future__ import absolute_import -# Import python libs +# Import Python libs +import ctypes import logging import time -import ctypes from datetime import datetime -# Import 3rd Party Libs +# Import salt libs +import salt.utils +import salt.utils.locales +import salt.utils.platform +from salt.exceptions import CommandExecutionError + +# Import 3rd-party Libs +from salt.ext import six try: import pythoncom import wmi @@ -33,11 +40,6 @@ try: except ImportError: HAS_WIN32NET_MODS = False -# Import salt libs -import salt.utils -import salt.utils.locales -import salt.ext.six as six - # Set up logging log = logging.getLogger(__name__) @@ -49,7 +51,7 @@ def __virtual__(): ''' Only works on Windows Systems with Win32 Modules ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Module win_system: Requires Windows' if not HAS_WIN32NET_MODS: @@ -273,7 +275,7 @@ def shutdown(message=None, timeout=5, force_close=True, reboot=False, # pylint: if only_on_pending_reboot and not get_pending_reboot(): return False - if message and not isinstance(message, str): + if message and not isinstance(message, six.string_types): message = message.decode('utf-8') try: win32api.InitiateSystemShutdown('127.0.0.1', message, timeout, @@ -622,7 +624,11 @@ def join_domain(domain, .. versionadded:: 2015.8.2/2015.5.7 Returns: - dict: Dictionary if successful, otherwise False + dict: Dictionary if successful + + Raises: + CommandExecutionError: Raises an error if _join_domain returns anything + other than 0 CLI Example: @@ -651,10 +657,60 @@ def join_domain(domain, return 'Must specify a password if you pass a username' # remove any escape characters - if isinstance(account_ou, str): + if isinstance(account_ou, six.string_types): account_ou = account_ou.split('\\') account_ou = ''.join(account_ou) + err = _join_domain(domain=domain, username=username, password=password, + account_ou=account_ou, account_exists=account_exists) + + if not err: + ret = {'Domain': domain, + 'Restart': False} + if restart: + ret['Restart'] = reboot() + return ret + + raise CommandExecutionError(win32api.FormatMessage(err).rstrip()) + + +def _join_domain(domain, + username=None, + password=None, + account_ou=None, + account_exists=False): + ''' + Helper function to join the domain. + + Args: + domain (str): The domain to which the computer should be joined, e.g. + ``example.com`` + + username (str): Username of an account which is authorized to join + computers to the specified domain. Need to be either fully qualified + like ``user@domain.tld`` or simply ``user`` + + password (str): Password of the specified user + + account_ou (str): The DN of the OU below which the account for this + computer should be created when joining the domain, e.g. + ``ou=computers,ou=departm_432,dc=my-company,dc=com`` + + account_exists (bool): If set to ``True`` the computer will only join + the domain if the account already exists. If set to ``False`` the + computer account will be created if it does not exist, otherwise it + will use the existing account. Default is False. + + Returns: + int: + + :param domain: + :param username: + :param password: + :param account_ou: + :param account_exists: + :return: + ''' NETSETUP_JOIN_DOMAIN = 0x1 # pylint: disable=invalid-name NETSETUP_ACCOUNT_CREATE = 0x2 # pylint: disable=invalid-name NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20 # pylint: disable=invalid-name @@ -670,23 +726,13 @@ def join_domain(domain, pythoncom.CoInitialize() conn = wmi.WMI() comp = conn.Win32_ComputerSystem()[0] - err = comp.JoinDomainOrWorkgroup(Name=domain, - Password=password, - UserName=username, - AccountOU=account_ou, - FJoinOptions=join_options) - # you have to do this because JoinDomainOrWorkgroup returns a strangely - # formatted value that looks like (0,) - if not err[0]: - ret = {'Domain': domain, - 'Restart': False} - if restart: - ret['Restart'] = reboot() - return ret - - log.error(win32api.FormatMessage(err[0]).rstrip()) - return False + # Return the results of the command as an error + # JoinDomainOrWorkgroup returns a strangely formatted value that looks like + # (0,) so return the first item + return comp.JoinDomainOrWorkgroup( + Name=domain, Password=password, UserName=username, AccountOU=account_ou, + FJoinOptions=join_options)[0] def unjoin_domain(username=None, @@ -919,7 +965,11 @@ def set_system_date_time(years=None, seconds (int): Seconds digit: 0 - 59 Returns: - bool: True if successful, otherwise False. + bool: True if successful + + Raises: + CommandExecutionError: Raises an error if ``SetLocalTime`` function + fails CLI Example: @@ -972,12 +1022,15 @@ def set_system_date_time(years=None, system_time.wSecond = int(seconds) system_time_ptr = ctypes.pointer(system_time) succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr) - return succeeded is not 0 - except OSError: + if succeeded is not 0: + return True + else: + log.error('Failed to set local time') + raise CommandExecutionError( + win32api.FormatMessage(succeeded).rstrip()) + except OSError as err: log.error('Failed to set local time') - return False - - return True + raise CommandExecutionError(err) def get_system_date(): diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 22285223e4..c41410adea 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -9,15 +9,16 @@ You can add and edit existing tasks. You can add and clear triggers and actions. You can list all tasks, folders, triggers, and actions. ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import -import salt.utils -from datetime import datetime import logging import time +from datetime import datetime -# Import 3rd Party Libraries +# Import Salt libs +import salt.utils.platform + +# Import 3rd-party libraries try: import pythoncom import win32com.client @@ -161,7 +162,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if not HAS_DEPENDENCIES: log.warning('Could not load dependencies for {0}'.format(__virtualname__)) return __virtualname__ @@ -1259,7 +1260,7 @@ def status(name, location='\\'): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() - # get the folder to delete the folder from + # get the folder where the task is defined task_folder = task_service.GetFolder(location) task = task_folder.GetTask(name) diff --git a/salt/modules/win_timezone.py b/salt/modules/win_timezone.py index 5ad07d9a20..4c7b4d2881 100644 --- a/salt/modules/win_timezone.py +++ b/salt/modules/win_timezone.py @@ -4,11 +4,14 @@ Module for managing timezone on Windows systems. ''' from __future__ import absolute_import -# Import python libs -import salt.utils +# Import Python libs import logging import re +# Import Salt libs +import salt.utils.path +import salt.utils.platform + log = logging.getLogger(__name__) # Maybe put in a different file ... ? %-0 @@ -458,7 +461,7 @@ def __virtual__(): ''' Only load on windows ''' - if salt.utils.is_windows() and salt.utils.which('tzutil'): + if salt.utils.platform.is_windows() and salt.utils.path.which('tzutil'): return __virtualname__ return (False, "Module win_timezone: tzutil not found or is not on Windows client") diff --git a/salt/modules/win_update.py b/salt/modules/win_update.py index 3226683cae..4b75e66a82 100644 --- a/salt/modules/win_update.py +++ b/salt/modules/win_update.py @@ -74,9 +74,10 @@ except ImportError: HAS_DEPENDENCIES = False # pylint: enable=import-error -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform import salt.utils.locales +import salt.utils.versions log = logging.getLogger(__name__) @@ -85,8 +86,8 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows() and HAS_DEPENDENCIES: - salt.utils.warn_until( + if salt.utils.platform.is_windows() and HAS_DEPENDENCIES: + salt.utils.versions.warn_until( 'Fluorine', 'The \'win_update\' module is being deprecated and will be removed ' 'in Salt {version}. Please use the \'win_wua\' module instead.' @@ -383,7 +384,7 @@ class PyWinUpdater(object): update_dict = {} for f in update_com_fields: v = getattr(update, f) - if not any([isinstance(v, bool), isinstance(v, str)]): + if not any([isinstance(v, bool), isinstance(v, six.string_types)]): # Fields that require special evaluation. if f in simple_enums: v = [x for x in v] diff --git a/salt/modules/win_useradd.py b/salt/modules/win_useradd.py index 50aebade96..9c11e31226 100644 --- a/salt/modules/win_useradd.py +++ b/salt/modules/win_useradd.py @@ -23,21 +23,24 @@ Module for managing Windows Users .. note:: This currently only works with local user accounts, not domain accounts ''' +# Import Python libs from __future__ import absolute_import -from datetime import datetime +import logging import time +from datetime import datetime try: from shlex import quote as _cmd_quote # pylint: disable=E0611 except: # pylint: disable=W0702 from pipes import quote as _cmd_quote -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.args +import salt.utils.platform from salt.ext import six from salt.ext.six import string_types from salt.exceptions import CommandExecutionError -import logging log = logging.getLogger(__name__) @@ -64,7 +67,7 @@ def __virtual__(): ''' Requires Windows and Windows Modules ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Module win_useradd: Windows Only' if not HAS_WIN32NET_MODS: @@ -587,10 +590,10 @@ def chhome(name, home, **kwargs): name = _to_unicode(name) home = _to_unicode(home) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) persist = kwargs.pop('persist', False) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if persist: log.info('Ignoring unsupported \'persist\' argument to user.chhome') diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py index 638452f2ae..5549b3e2bf 100644 --- a/salt/modules/win_wua.py +++ b/salt/modules/win_wua.py @@ -2,24 +2,67 @@ ''' Module for managing Windows Updates using the Windows Update Agent. +List updates on the system using the following functions: + +- :ref:`available` +- :ref:`list` + +This is an easy way to find additional information about updates available to +to the system, such as the GUID, KB number, or description. + +Once you have the GUID or a KB number for the update you can get information +about the update, download, install, or uninstall it using these functions: + +- :ref:`get` +- :ref:`download` +- :ref:`install` +- :ref:`uninstall` + +The get function expects a name in the form of a GUID, KB, or Title and should +return information about a single update. The other functions accept either a +single item or a list of items for downloading/installing/uninstalling a +specific list of items. + +The :ref:`list` and :ref:`get` functions are utility functions. In addition to +returning information about updates they can also download and install updates +by setting ``download=True`` or ``install=True``. So, with :ref:`list` for +example, you could run the function with the filters you want to see what is +available. Then just add ``install=True`` to install everything on that list. + +If you want to download, install, or uninstall specific updates, use +:ref:`download`, :ref:`install`, or :ref:`uninstall`. To update your system +with the latest updates use :ref:`list` and set ``install=True`` + +You can also adjust the Windows Update settings using the :ref:`set_wu_settings` +function. This function is only supported on the following operating systems: + +- Windows Vista / Server 2008 +- Windows 7 / Server 2008R2 +- Windows 8 / Server 2012 +- Windows 8.1 / Server 2012R2 + +As of Windows 10 and Windows Server 2016, the ability to modify the Windows +Update settings has been restricted. The settings can be modified in the Local +Group Policy using the ``lgpo`` module. + .. versionadded:: 2015.8.0 :depends: - salt.utils.win_update ''' - # Import Python libs from __future__ import absolute_import from __future__ import unicode_literals import logging # Import Salt libs -from salt.ext import six -import salt.utils +import salt.utils.platform +import salt.utils.versions import salt.utils.win_update from salt.exceptions import CommandExecutionError # Import 3rd-party libs +from salt.ext import six try: import pythoncom import win32com.client @@ -34,7 +77,7 @@ def __virtual__(): ''' Only works on Windows systems with PyWin32 ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'WUA: Only available on Window systems' if not HAS_PYWIN32: @@ -54,36 +97,40 @@ def available(software=True, skip_mandatory=False, skip_reboot=False, categories=None, - severities=None, - ): + severities=None,): ''' .. versionadded:: 2017.7.0 - List updates that match the passed criteria. + List updates that match the passed criteria. This allows for more filter + options than :func:`list`. Good for finding a specific GUID or KB. Args: - software (bool): Include software updates in the results (default is - True) + software (bool): + Include software updates in the results (default is True) - drivers (bool): Include driver updates in the results (default is False) + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip updates that are already installed. Default - is False. + skip_installed (bool): + Skip updates that are already installed. Default is False. - skip_hidden (bool): Skip updates that have been hidden. Default is True. + skip_hidden (bool): + Skip updates that have been hidden. Default is True. - skip_mandatory (bool): Skip mandatory updates. Default is False. + skip_mandatory (bool): + Skip mandatory updates. Default is False. - skip_reboot (bool): Skip updates that require a reboot. Default is - False. + skip_reboot (bool): + Skip updates that require a reboot. Default is False. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -101,8 +148,9 @@ def available(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -152,28 +200,30 @@ def available(software=True, salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers - salt '*' win_wua.available categories=['Critical Updates','Drivers'] + salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates - salt '*' win_wua.available categories=['Security Updates'] severities=['Critical'] + salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical - salt '*' win_wua.available severities=['Critical'] + salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates - salt '*' win_wua.available categories=['Feature Packs','Windows 8.1'] summary=True + salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() # Look for available - updates = wua.available(skip_hidden, skip_installed, skip_mandatory, - skip_reboot, software, drivers, categories, - severities) + updates = wua.available( + skip_hidden=skip_hidden, skip_installed=skip_installed, + skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, + software=software, drivers=drivers, categories=categories, + severities=severities) # Return results as Summary or Details return updates.summary() if summary else updates.list() @@ -183,23 +233,29 @@ def list_update(name, download=False, install=False): ''' .. deprecated:: 2017.7.0 Use :func:`get` instead + Returns details for all updates that match the search criteria Args: - name (str): The name of the update you're searching for. This can be the - GUID, a KB number, or any part of the name of the update. GUIDs and - KBs are preferred. Run ``list_updates`` to get the GUID for the update - you're looking for. - download (bool): Download the update returned by this function. Run this - function first to see if the update exists, then set ``download=True`` - to download the update. + name (str): + The name of the update you're searching for. This can be the GUID, a + KB number, or any part of the name of the update. GUIDs and KBs are + preferred. Run ``list_updates`` to get the GUID for the update + you're looking for. - install (bool): Install the update returned by this function. Run this - function first to see if the update exists, then set ``install=True`` to - install the update. + download (bool): + Download the update returned by this function. Run this function + first to see if the update exists, then set ``download=True`` to + download the update. + + install (bool): + Install the update returned by this function. Run this function + first to see if the update exists, then set ``install=True`` to + install the update. Returns: + dict: Returns a dict containing a list of updates that match the name if download and install are both set to False. Should usually be a single update, but can return multiple if a partial name is given. @@ -247,7 +303,7 @@ def list_update(name, download=False, install=False): # Not all updates have an associated KB salt '*' win_wua.list_update 'Microsoft Camera Codec Pack' ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'get\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -258,23 +314,28 @@ def get(name, download=False, install=False): ''' .. versionadded:: 2017.7.0 - Returns details for all updates that match the search criteria + Returns details for the named update Args: - name (str): The name of the update you're searching for. This can be the - GUID, a KB number, or any part of the name of the update. GUIDs and - KBs are preferred. Run ``list`` to get the GUID for the update - you're looking for. - download (bool): Download the update returned by this function. Run this - function first to see if the update exists, then set ``download=True`` - to download the update. + name (str): + The name of the update you're searching for. This can be the GUID, a + KB number, or any part of the name of the update. GUIDs and KBs are + preferred. Run ``list`` to get the GUID for the update you're + looking for. - install (bool): Install the update returned by this function. Run this - function first to see if the update exists, then set ``install=True`` to - install the update. + download (bool): + Download the update returned by this function. Run this function + first to see if the update exists, then set ``download=True`` to + download the update. + + install (bool): + Install the update returned by this function. Run this function + first to see if the update exists, then set ``install=True`` to + install the update. Returns: + dict: Returns a dict containing a list of updates that match the name if download and install are both set to False. Should usually be a single update, but can return multiple if a partial name is given. @@ -357,30 +418,35 @@ def list_updates(software=True, install is True the same list will be downloaded and/or installed. Args: - software (bool): Include software updates in the results (default is - True) - drivers (bool): Include driver updates in the results (default is False) + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip installed updates in the results (default is - False) + skip_installed (bool): + Skip installed updates in the results (default is False) - download (bool): (Overrides reporting functionality) Download the list - of updates returned by this function. Run this function first with - ``download=False`` to see what will be downloaded, then set - ``download=True`` to download the updates. + download (bool): + (Overrides reporting functionality) Download the list of updates + returned by this function. Run this function first with + ``download=False`` to see what will be downloaded, then set + ``download=True`` to download the updates. - install (bool): (Overrides reporting functionality) Install the list of - updates returned by this function. Run this function first with - ``install=False`` to see what will be installed, then set - ``install=True`` to install the updates. + install (bool): + (Overrides reporting functionality) Install the list of updates + returned by this function. Run this function first with + ``install=False`` to see what will be installed, then set + ``install=True`` to install the updates. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -398,8 +464,9 @@ def list_updates(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -463,7 +530,7 @@ def list_updates(software=True, # A summary of all Feature Packs and Windows 8.1 Updates salt '*' win_wua.list_updates categories=['Feature Packs','Windows 8.1'] summary=True ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'list\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -486,30 +553,35 @@ def list(software=True, install is True the same list will be downloaded and/or installed. Args: - software (bool): Include software updates in the results (default is - True) - drivers (bool): Include driver updates in the results (default is False) + software (bool): + Include software updates in the results (default is True) + + drivers (bool): + Include driver updates in the results (default is False) summary (bool): - - True: Return a summary of updates available for each category. - - False (default): Return a detailed list of available updates. + - True: Return a summary of updates available for each category. + - False (default): Return a detailed list of available updates. - skip_installed (bool): Skip installed updates in the results (default is - False) + skip_installed (bool): + Skip installed updates in the results (default is False) - download (bool): (Overrides reporting functionality) Download the list - of updates returned by this function. Run this function first with - ``download=False`` to see what will be downloaded, then set - ``download=True`` to download the updates. + download (bool): + (Overrides reporting functionality) Download the list of updates + returned by this function. Run this function first with + ``download=False`` to see what will be downloaded, then set + ``download=True`` to download the updates. - install (bool): (Overrides reporting functionality) Install the list of - updates returned by this function. Run this function first with - ``install=False`` to see what will be installed, then set - ``install=True`` to install the updates. + install (bool): + (Overrides reporting functionality) Install the list of updates + returned by this function. Run this function first with + ``install=False`` to see what will be installed, then set + ``install=True`` to install the updates. - categories (list): Specify the categories to list. Must be passed as a - list. All categories returned by default. + categories (list): + Specify the categories to list. Must be passed as a list. All + categories returned by default. Categories include the following: @@ -527,8 +599,9 @@ def list(software=True, * Windows 8.1 and later drivers * Windows Defender - severities (list): Specify the severities to include. Must be passed as - a list. All severities returned by default. + severities (list): + Specify the severities to include. Must be passed as a list. All + severities returned by default. Severities include the following: @@ -575,22 +648,22 @@ def list(software=True, .. code-block:: bash # Normal Usage (list all software updates) - salt '*' win_wua.list_updates + salt '*' win_wua.list # List all updates with categories of Critical Updates and Drivers - salt '*' win_wua.list_updates categories=['Critical Updates','Drivers'] + salt '*' win_wua.list categories=['Critical Updates','Drivers'] # List all Critical Security Updates - salt '*' win_wua.list_updates categories=['Security Updates'] severities=['Critical'] + salt '*' win_wua.list categories=['Security Updates'] severities=['Critical'] # List all updates with a severity of Critical - salt '*' win_wua.list_updates severities=['Critical'] + salt '*' win_wua.list severities=['Critical'] # A summary of all available updates - salt '*' win_wua.list_updates summary=True + salt '*' win_wua.list summary=True # A summary of all Feature Packs and Windows 8.1 Updates - salt '*' win_wua.list_updates categories=['Feature Packs','Windows 8.1'] summary=True + salt '*' win_wua.list categories=['Feature Packs','Windows 8.1'] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -604,11 +677,11 @@ def list(software=True, # Download if download or install: - ret['Download'] = wua.download(updates.updates) + ret['Download'] = wua.download(updates) # Install if install: - ret['Install'] = wua.install(updates.updates) + ret['Install'] = wua.install(updates) if not ret: return updates.summary() if summary else updates.list() @@ -625,13 +698,16 @@ def download_update(name): Args: - name (str): The name of the update to download. This can be a GUID, a KB - number, or any part of the name. To ensure a single item is matched the - GUID is preferred. + name (str): + The name of the update to download. This can be a GUID, a KB number, + or any part of the name. To ensure a single item is matched the GUID + is preferred. - .. note:: If more than one result is returned an error will be raised. + .. note:: + If more than one result is returned an error will be raised. Returns: + dict: A dictionary containing the results of the download CLI Examples: @@ -641,9 +717,8 @@ def download_update(name): salt '*' win_wua.download_update 12345678-abcd-1234-abcd-1234567890ab salt '*' win_wua.download_update KB12312321 - ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'download\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -660,8 +735,9 @@ def download_updates(names): Args: - names (list): A list of updates to download. This can be any combination - of GUIDs, KB numbers, or names. GUIDs or KBs are preferred. + names (list): + A list of updates to download. This can be any combination of GUIDs, + KB numbers, or names. GUIDs or KBs are preferred. Returns: @@ -672,9 +748,9 @@ def download_updates(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.download guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] + salt '*' win_wua.download_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'download\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -690,9 +766,14 @@ def download(names): Args: - names (str, list): A single update or a list of updates to download. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to download. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. + + .. note:: + An error will be raised if there are more results than there are items + in the names parameter Returns: @@ -703,7 +784,7 @@ def download(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.download guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] + salt '*' win_wua.download names=['12345678-abcd-1234-abcd-1234567890ab', 'KB2131233'] ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -714,6 +795,13 @@ def download(names): if updates.count() == 0: raise CommandExecutionError('No updates found') + # Make sure it's a list so count comparison is correct + if isinstance(names, six.string_types): + names = [names] + + if isinstance(names, six.integer_types): + names = [str(names)] + if updates.count() > len(names): raise CommandExecutionError('Multiple updates found, names need to be ' 'more specific') @@ -734,10 +822,12 @@ def install_update(name): number, or any part of the name. To ensure a single item is matched the GUID is preferred. - .. note:: If no results or more than one result is returned an error - will be raised. + .. note:: + If no results or more than one result is returned an error will be + raised. Returns: + dict: A dictionary containing the results of the install CLI Examples: @@ -748,7 +838,7 @@ def install_update(name): salt '*' win_wua.install_update KB12312231 ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'install\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -779,7 +869,7 @@ def install_updates(names): # Normal Usage salt '*' win_wua.install_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB12323211'] ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'This function is replaced by \'install\' as of Salt 2017.7.0. This' 'warning will be removed in Salt Fluorine.') @@ -795,9 +885,14 @@ def install(names): Args: - names (str, list): A single update or a list of updates to install. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to install. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. + + .. note:: + An error will be raised if there are more results than there are items + in the names parameter Returns: @@ -808,7 +903,7 @@ def install(names): .. code-block:: bash # Normal Usage - salt '*' win_wua.install_updates guid=['12345678-abcd-1234-abcd-1234567890ab', 'KB12323211'] + salt '*' win_wua.install KB12323211 ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() @@ -819,6 +914,13 @@ def install(names): if updates.count() == 0: raise CommandExecutionError('No updates found') + # Make sure it's a list so count comparison is correct + if isinstance(names, six.string_types): + names = [names] + + if isinstance(names, six.integer_types): + names = [str(names)] + if updates.count() > len(names): raise CommandExecutionError('Multiple updates found, names need to be ' 'more specific') @@ -834,9 +936,10 @@ def uninstall(names): Args: - names (str, list): A single update or a list of updates to uninstall. - This can be any combination of GUIDs, KB numbers, or names. GUIDs or KBs - are preferred. + names (str, list): + A single update or a list of updates to uninstall. This can be any + combination of GUIDs, KB numbers, or names. GUIDs or KBs are + preferred. Returns: @@ -875,33 +978,50 @@ def set_wu_settings(level=None, Change Windows Update settings. If no parameters are passed, the current value will be returned. - :param int level: - Number from 1 to 4 indicating the update level: + Supported: + - Windows Vista / Server 2008 + - Windows 7 / Server 2008R2 + - Windows 8 / Server 2012 + - Windows 8.1 / Server 2012R2 + + .. note: + Microsoft began using the Unified Update Platform (UUP) starting with + Windows 10 / Server 2016. The Windows Update settings have changed and + the ability to 'Save' Windows Update settings has been removed. Windows + Update settings are read-only. See MSDN documentation: + https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx + + Args: + + level (int): + Number from 1 to 4 indicating the update level: + 1. Never check for updates 2. Check for updates but let me choose whether to download and install them 3. Download updates but let me choose whether to install them 4. Install updates automatically - :param bool recommended: - Boolean value that indicates whether to include optional or recommended - updates when a search for updates and installation of updates is - performed. - :param bool featured: - Boolean value that indicates whether to display notifications for - featured updates. + recommended (bool): + Boolean value that indicates whether to include optional or + recommended updates when a search for updates and installation of + updates is performed. - :param bool elevated: - Boolean value that indicates whether non-administrators can perform some - update-related actions without administrator approval. + featured (bool): + Boolean value that indicates whether to display notifications for + featured updates. - :param bool msupdate: - Boolean value that indicates whether to turn on Microsoft Update for - other Microsoft products + elevated (bool): + Boolean value that indicates whether non-administrators can perform + some update-related actions without administrator approval. + + msupdate (bool): + Boolean value that indicates whether to turn on Microsoft Update for + other Microsoft products + + day (str): + Days of the week on which Automatic Updates installs or uninstalls + updates. Accepted values: - :param str day: - Days of the week on which Automatic Updates installs or uninstalls - updates. - Accepted values: - Everyday - Monday - Tuesday @@ -910,21 +1030,43 @@ def set_wu_settings(level=None, - Friday - Saturday - :param str time: - Time at which Automatic Updates installs or uninstalls updates. Must be - in the ##:## 24hr format, eg. 3:00 PM would be 15:00 + time (str): + Time at which Automatic Updates installs or uninstalls updates. Must + be in the ##:## 24hr format, eg. 3:00 PM would be 15:00. Must be in + 1 hour increments. - :return: Returns a dictionary containing the results. + Returns: + + dict: Returns a dictionary containing the results. CLI Examples: .. code-block:: bash salt '*' win_wua.set_wu_settings level=4 recommended=True featured=False - ''' - ret = {} - ret['Success'] = True + # The AutomaticUpdateSettings.Save() method used in this function does not + # work on Windows 10 / Server 2016. It is called in throughout this function + # like this: + # + # obj_au = win32com.client.Dispatch('Microsoft.Update.AutoUpdate') + # obj_au_settings = obj_au.Settings + # obj_au_settings.Save() + # + # The `Save()` method reports success but doesn't actually change anything. + # Windows Update settings are read-only in Windows 10 / Server 2016. There's + # a little blurb on MSDN that mentions this, but gives no alternative for + # changing these settings in Windows 10 / Server 2016. + # + # https://msdn.microsoft.com/en-us/library/aa385829(v=vs.85).aspx + # + # Apparently the Windows Update framework in Windows Vista - Windows 8.1 has + # been changed quite a bit in Windows 10 / Server 2016. It is now called the + # Unified Update Platform (UUP). I haven't found an API or a Powershell + # commandlet for working with the the UUP. Perhaps there will be something + # forthcoming. The `win_lgpo` module might be an option for changing the + # Windows Update settings using local group policy. + ret = {'Success': True} # Initialize the PyCom system pythoncom.CoInitialize() @@ -1076,30 +1218,31 @@ def get_wu_settings(): Boolean value that indicates whether to display notifications for featured updates. Group Policy Required (Read-only): - Boolean value that indicates whether Group Policy requires the Automatic - Updates service. + Boolean value that indicates whether Group Policy requires the + Automatic Updates service. Microsoft Update: Boolean value that indicates whether to turn on Microsoft Update for other Microsoft Products Needs Reboot: - Boolean value that indicates whether the machine is in a reboot pending - state. + Boolean value that indicates whether the machine is in a reboot + pending state. Non Admins Elevated: - Boolean value that indicates whether non-administrators can perform some - update-related actions without administrator approval. + Boolean value that indicates whether non-administrators can perform + some update-related actions without administrator approval. Notification Level: Number 1 to 4 indicating the update level: 1. Never check for updates - 2. Check for updates but let me choose whether to download and install them + 2. Check for updates but let me choose whether to download and + install them 3. Download updates but let me choose whether to install them 4. Install updates automatically Read Only (Read-only): Boolean value that indicates whether the Automatic Update settings are read-only. Recommended Updates: - Boolean value that indicates whether to include optional or recommended - updates when a search for updates and installation of updates is - performed. + Boolean value that indicates whether to include optional or + recommended updates when a search for updates and installation of + updates is performed. Scheduled Day: Days of the week on which Automatic Updates installs or uninstalls updates. @@ -1182,13 +1325,12 @@ def get_needs_reboot(): Returns: - bool: True if the system requires a reboot, False if not + bool: True if the system requires a reboot, otherwise False CLI Examples: .. code-block:: bash salt '*' win_wua.get_needs_reboot - ''' return salt.utils.win_update.needs_reboot() diff --git a/salt/modules/x509.py b/salt/modules/x509.py index 286f90d304..7344bc7d6c 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -23,10 +23,10 @@ import datetime import ast # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path import salt.exceptions -import salt.ext.six as six +from salt.ext import six from salt.utils.odict import OrderedDict # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import range @@ -166,7 +166,7 @@ def _parse_openssl_req(csr_filename): Parses openssl command line output, this is a workaround for M2Crypto's inability to get them from CSR objects. ''' - if not salt.utils.which('openssl'): + if not salt.utils.path.which('openssl'): raise salt.exceptions.SaltInvocationError( 'openssl binary not found in path' ) @@ -215,7 +215,7 @@ def _parse_openssl_crl(crl_filename): Parses openssl command line output, this is a workaround for M2Crypto's inability to get them from CSR objects. ''' - if not salt.utils.which('openssl'): + if not salt.utils.path.which('openssl'): raise salt.exceptions.SaltInvocationError( 'openssl binary not found in path' ) @@ -331,10 +331,14 @@ def _parse_subject(subject): for nid_name, nid_num in six.iteritems(subject.nid): if nid_num in nids: continue - val = getattr(subject, nid_name) - if val: - ret[nid_name] = val - nids.append(nid_num) + try: + val = getattr(subject, nid_name) + if val: + ret[nid_name] = val + nids.append(nid_num) + except TypeError as e: + if e.args and e.args[0] == 'No string argument provided': + pass return ret @@ -1374,10 +1378,19 @@ def create_certificate( ['listen_in', 'preqrequired', '__prerequired__']: kwargs.pop(ignore, None) - cert_txt = __salt__['publish.publish']( + certs = __salt__['publish.publish']( tgt=ca_server, fun='x509.sign_remote_certificate', - arg=str(kwargs))[ca_server] + arg=str(kwargs)) + + if not any(certs): + raise salt.exceptions.SaltInvocationError( + 'ca_server did not respond' + ' salt master must permit peers to' + ' call the sign_remote_certificate function.') + + cert_txt = certs[ca_server] + if path: return write_pem( text=cert_txt, @@ -1737,7 +1750,7 @@ def verify_crl(crl, cert): salt '*' x509.verify_crl crl=/etc/pki/myca.crl cert=/etc/pki/myca.crt ''' - if not salt.utils.which('openssl'): + if not salt.utils.path.which('openssl'): raise salt.exceptions.SaltInvocationError( 'openssl binary not found in path' ) diff --git a/salt/modules/xapi.py b/salt/modules/xapi.py index 240b9c15b5..aa031b2f4f 100644 --- a/salt/modules/xapi.py +++ b/salt/modules/xapi.py @@ -33,10 +33,10 @@ except ImportError: HAS_IMPORTLIB = False # Import salt libs -from salt.exceptions import CommandExecutionError -import salt.utils import salt.utils.files +import salt.utils.path import salt.modules.cmdmod +from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'virt' @@ -115,7 +115,7 @@ def _get_xtool(): Internal, returns xl or xm command line path ''' for xtool in ['xl', 'xm']: - path = salt.utils.which(xtool) + path = salt.utils.path.which(xtool) if path is not None: return path diff --git a/salt/modules/xbpspkg.py b/salt/modules/xbpspkg.py index d4a3064bc9..732adcf3aa 100644 --- a/salt/modules/xbpspkg.py +++ b/salt/modules/xbpspkg.py @@ -16,8 +16,9 @@ import logging import glob # Import salt libs -import salt.utils +import salt.utils # Can be removed when is_true and compare_dicts are moved import salt.utils.files +import salt.utils.path import salt.utils.pkg import salt.utils.decorators as decorators from salt.exceptions import CommandExecutionError, MinionError @@ -42,7 +43,7 @@ def _check_xbps(): ''' Looks to see if xbps-install is present on the system, return full path ''' - return salt.utils.which('xbps-install') + return salt.utils.path.which('xbps-install') @decorators.memoize diff --git a/salt/modules/xfs.py b/salt/modules/xfs.py index f016dbe5a6..65e060b898 100644 --- a/salt/modules/xfs.py +++ b/salt/modules/xfs.py @@ -33,12 +33,13 @@ import time import logging # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin log = logging.getLogger(__name__) @@ -48,7 +49,8 @@ def __virtual__(): ''' Only work on POSIX-like systems ''' - return not salt.utils.is_windows() and __grains__.get('kernel') == 'Linux' + return not salt.utils.platform.is_windows() \ + and __grains__.get('kernel') == 'Linux' def _verify_run(out, cmd=None): @@ -184,7 +186,7 @@ def dump(device, destination, level=0, label=None, noerase=None): salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy' salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True ''' - if not salt.utils.which("xfsdump"): + if not salt.utils.path.which("xfsdump"): raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.") label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device), diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 821a0fb60a..e6e9ac8438 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -17,7 +17,6 @@ Support for YUM/DNF # Import python libs from __future__ import absolute_import import contextlib -import copy import datetime import fnmatch import itertools @@ -39,20 +38,25 @@ except ImportError: HAS_YUM = False # pylint: enable=import-error,redefined-builtin -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.args +import salt.utils.decorators.path import salt.utils.files -import salt.utils.pkg -import salt.ext.six as six import salt.utils.itertools -import salt.utils.systemd -import salt.utils.decorators as decorators +import salt.utils.lazy +import salt.utils.pkg import salt.utils.pkg.rpm +import salt.utils.systemd +import salt.utils.versions from salt.utils.versions import LooseVersion as _LooseVersion from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) __HOLD_PATTERN = r'\w+(?:[.-][^-]+)*' @@ -182,7 +186,16 @@ def _check_versionlock(): Ensure that the appropriate versionlock plugin is present ''' if _yum() == 'dnf': - vl_plugin = 'python-dnf-plugins-extras-versionlock' + if int(__grains__.get('osmajorrelease')) >= 26: + if six.PY3: + vl_plugin = 'python3-dnf-plugin-versionlock' + else: + vl_plugin = 'python2-dnf-plugin-versionlock' + else: + if six.PY3: + vl_plugin = 'python3-dnf-plugins-extras-versionlock' + else: + vl_plugin = 'python-dnf-plugins-extras-versionlock' else: vl_plugin = 'yum-versionlock' \ if __grains__.get('osmajorrelease') == '5' \ @@ -262,7 +275,7 @@ def _get_extra_options(**kwargs): Returns list of extra options for yum ''' ret = [] - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) for key, value in six.iteritems(kwargs): if isinstance(key, six.string_types): ret.append('--{0}=\'{1}\''.format(key, value)) @@ -476,7 +489,7 @@ def latest_version(*names, **kwargs): # If any installed version is greater than (or equal to) the # one found by yum/dnf list available, then it is not an # upgrade. - if salt.utils.compare_versions(ver1=installed_version, + if salt.utils.versions.compare(ver1=installed_version, oper='>=', ver2=pkg.version, cmp_func=version_cmp): @@ -585,15 +598,34 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False): def list_pkgs(versions_as_list=False, **kwargs): ''' - List the packages currently installed in a dict:: + List the packages currently installed as a dict. By default, the dict + contains versions as a comma separated string:: - {'': ''} + {'': '[,...]'} + + versions_as_list: + If set to true, the versions are provided as a list + + {'': ['', '']} + + attr: + If a list of package attributes is specified, returned value will + contain them in addition to version, eg.:: + + {'': [{'version' : 'version', 'arch' : 'arch'}]} + + Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + + If ``all`` is specified, all valid attributes will be returned. + + .. versionadded:: Oxygen CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs + salt '*' pkg.list_pkgs attr='["version", "arch"]' ''' versions_as_list = salt.utils.is_true(versions_as_list) # not yet implemented or not applicable @@ -601,17 +633,14 @@ def list_pkgs(versions_as_list=False, **kwargs): for x in ('removed', 'purge_desired')]): return {} + attr = kwargs.get("attr") if 'pkg.list_pkgs' in __context__: - if versions_as_list: - return __context__['pkg.list_pkgs'] - else: - ret = copy.deepcopy(__context__['pkg.list_pkgs']) - __salt__['pkg_resource.stringify'](ret) - return ret + cached = __context__['pkg.list_pkgs'] + return __salt__['pkg_resource.format_pkg_list'](cached, versions_as_list, attr) ret = {} cmd = ['rpm', '-qa', '--queryformat', - salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)\n')] + salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n'] output = __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace') @@ -621,15 +650,16 @@ def list_pkgs(versions_as_list=False, **kwargs): osarch=__grains__['osarch'] ) if pkginfo is not None: - __salt__['pkg_resource.add_pkg'](ret, - pkginfo.name, - pkginfo.version) + all_attr = {'version': pkginfo.version, 'arch': pkginfo.arch, 'install_date': pkginfo.install_date, + 'install_date_time_t': pkginfo.install_date_time_t} + __salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr) - __salt__['pkg_resource.sort_pkglist'](ret) - __context__['pkg.list_pkgs'] = copy.deepcopy(ret) - if not versions_as_list: - __salt__['pkg_resource.stringify'](ret) - return ret + for pkgname in ret: + ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version']) + + __context__['pkg.list_pkgs'] = ret + + return __salt__['pkg_resource.format_pkg_list'](ret, versions_as_list, attr) def list_repo_pkgs(*args, **kwargs): @@ -1035,6 +1065,11 @@ def refresh_db(**kwargs): clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache'] update_cmd = [_yum(), '--quiet', 'check-update'] + + if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7': + # This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL + update_cmd.append('--setopt=autocheck_running_kernel=false') + for args in (repo_arg, exclude_arg, branch_arg): if args: clean_cmd.extend(args) @@ -1066,6 +1101,21 @@ def clean_metadata(**kwargs): return refresh_db(**kwargs) +class AvailablePackages(salt.utils.lazy.LazyDict): + def __init__(self, *args, **kwargs): + super(AvailablePackages, self).__init__() + self._args = args + self._kwargs = kwargs + + def _load(self, key): + self._load_all() + return True + + def _load_all(self): + self._dict = list_repo_pkgs(*self._args, **self._kwargs) + self.loaded = True + + def install(name=None, refresh=False, skip_verify=False, @@ -1219,11 +1269,41 @@ def install(name=None, .. versionadded:: 2014.7.0 + diff_attr: + If a list of package attributes is specified, returned value will + contain them, eg.:: + + {'': { + 'old': { + 'version': '', + 'arch': ''}, + + 'new': { + 'version': '', + 'arch': ''}}} + + Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + + If ``all`` is specified, all valid attributes will be returned. + + .. versionadded:: Oxygen Returns a dict containing the new package names and versions:: {'': {'old': '', 'new': ''}} + + If an attribute list in diff_attr is specified, the dict will also contain + any specified attribute, eg.:: + + {'': { + 'old': { + 'version': '', + 'arch': ''}, + + 'new': { + 'version': '', + 'arch': ''}}} ''' repo_arg = _get_repo_options(**kwargs) exclude_arg = _get_excludes_option(**kwargs) @@ -1245,10 +1325,11 @@ def install(name=None, version_num = kwargs.get('version') - old = list_pkgs(versions_as_list=False) if not downloadonly else list_downloaded() + diff_attr = kwargs.get("diff_attr") + old = list_pkgs(versions_as_list=False, attr=diff_attr) if not downloadonly else list_downloaded() # Use of __context__ means no duplicate work here, just accessing # information already in __context__ from the previous call to list_pkgs() - old_as_list = list_pkgs(versions_as_list=True) if not downloadonly else list_downloaded() + old_as_list = list_pkgs(versions_as_list=True, attr=diff_attr) if not downloadonly else list_downloaded() to_install = [] to_downgrade = [] @@ -1279,7 +1360,7 @@ def install(name=None, has_comparison.append(pkgname) except (TypeError, ValueError): continue - _available = list_repo_pkgs( + _available = AvailablePackages( *has_wildcards + has_comparison, byrepo=False, **kwargs) @@ -1414,7 +1495,7 @@ def install(name=None, if reinstall and cver: for ver in cver: ver = norm_epoch(ver, version_num) - if salt.utils.compare_versions(ver1=version_num, + if salt.utils.versions.compare(ver1=version_num, oper='==', ver2=ver, cmp_func=version_cmp): @@ -1428,7 +1509,7 @@ def install(name=None, else: for ver in cver: ver = norm_epoch(ver, version_num) - if salt.utils.compare_versions(ver1=version_num, + if salt.utils.versions.compare(ver1=version_num, oper='>=', ver2=ver, cmp_func=version_cmp): @@ -1583,7 +1664,7 @@ def install(name=None, errors.append(out['stdout']) __context__.pop('pkg.list_pkgs', None) - new = list_pkgs(versions_as_list=False) if not downloadonly else list_downloaded() + new = list_pkgs(versions_as_list=False, attr=diff_attr) if not downloadonly else list_downloaded() ret = salt.utils.compare_dicts(old, new) @@ -2895,7 +2976,7 @@ def modified(*packages, **flags): return __salt__['lowpkg.modified'](*packages, **flags) -@decorators.which('yumdownloader') +@salt.utils.decorators.path.which('yumdownloader') def download(*packages): ''' .. versionadded:: 2015.5.0 diff --git a/salt/modules/zabbix.py b/salt/modules/zabbix.py index 2403d2e2b9..7fc76f4a09 100644 --- a/salt/modules/zabbix.py +++ b/salt/modules/zabbix.py @@ -31,6 +31,7 @@ import json # Import salt libs import salt.utils +import salt.utils.path from salt.utils.versions import LooseVersion as _LooseVersion from salt.ext.six.moves.urllib.error import HTTPError, URLError # pylint: disable=import-error,no-name-in-module @@ -46,7 +47,7 @@ def __virtual__(): ''' Only load the module if Zabbix server is installed ''' - if salt.utils.which('zabbix_server'): + if salt.utils.path.which('zabbix_server'): return __virtualname__ return (False, 'The zabbix execution module cannot be loaded: zabbix not installed.') diff --git a/salt/modules/zcbuildout.py b/salt/modules/zcbuildout.py index 5707d6705d..66f111f0df 100644 --- a/salt/modules/zcbuildout.py +++ b/salt/modules/zcbuildout.py @@ -35,7 +35,7 @@ import copy # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -from salt.ext.six import string_types, text_type +from salt.ext import six from salt.ext.six.moves import range from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -124,7 +124,7 @@ def _salt_callback(func, **kwargs): LOG.clear() # before returning, trying to compact the log output for k in ['comment', 'out', 'outlog']: - if status[k] and isinstance(status[k], string_types): + if status[k] and isinstance(status[k], six.string_types): status[k] = '\n'.join([ log for log in status[k].split('\n') @@ -142,7 +142,7 @@ class _Logger(object): self._by_level = {} def _log(self, level, msg): - if not isinstance(msg, text_type): + if not isinstance(msg, six.text_type): msg = msg.decode('utf-8') if level not in self._by_level: self._by_level[level] = [] @@ -185,7 +185,7 @@ LOG = _Logger() def _encode_string(string): - if isinstance(string, text_type): + if isinstance(string, six.text_type): string = string.encode('utf-8') return string @@ -217,7 +217,7 @@ def _set_status(m, m['logs_by_level'] = LOG.by_level.copy() outlog, outlog_by_level = '', '' m['comment'] = comment - if out and isinstance(out, string_types): + if out and isinstance(out, six.string_types): outlog += HR outlog += 'OUTPUT:\n' outlog += '{0}\n'.format(_encode_string(out)) @@ -1029,17 +1029,17 @@ def _check_onlyif_unless(onlyif, unless, directory, runas=None, env=()): status['status'] = False retcode = __salt__['cmd.retcode'] if onlyif is not None: - if not isinstance(onlyif, string_types): + if not isinstance(onlyif, six.string_types): if not onlyif: _valid(status, 'onlyif execution failed') - elif isinstance(onlyif, string_types): + elif isinstance(onlyif, six.string_types): if retcode(onlyif, cwd=directory, runas=runas, env=env) != 0: _valid(status, 'onlyif execution failed') if unless is not None: - if not isinstance(unless, string_types): + if not isinstance(unless, six.string_types): if unless: _valid(status, 'unless execution succeeded') - elif isinstance(unless, string_types): + elif isinstance(unless, six.string_types): if retcode(unless, cwd=directory, runas=runas, env=env, python_shell=False) == 0: _valid(status, 'unless execution succeeded') if status['status']: diff --git a/salt/modules/zfs.py b/salt/modules/zfs.py index 5053ca6605..dc42400796 100644 --- a/salt/modules/zfs.py +++ b/salt/modules/zfs.py @@ -11,7 +11,8 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.args +import salt.utils.path import salt.modules.cmdmod import salt.utils.decorators as decorators from salt.utils.odict import OrderedDict @@ -30,7 +31,7 @@ def _check_zfs(): Looks to see if zfs is present on the system. ''' # Get the path to the zfs binary. - return salt.utils.which('zfs') + return salt.utils.path.which('zfs') @decorators.memoize @@ -39,7 +40,7 @@ def _check_features(): Looks to see if zpool-features is available ''' # get man location - man = salt.utils.which('man') + man = salt.utils.path.which('man') if not man: return False @@ -62,19 +63,24 @@ def __virtual__(): if on_freebsd: cmd = 'kldstat -q -m zfs' elif on_linux: - modinfo = salt.utils.which('modinfo') + modinfo = salt.utils.path.which('modinfo') if modinfo: cmd = '{0} zfs'.format(modinfo) else: cmd = 'ls /sys/module/zfs' elif on_solaris: - # not using salt.utils.which('zfs') to keep compatible with others + # not using salt.utils.path.which('zfs') to keep compatible with others cmd = 'which zfs' if cmd and salt.modules.cmdmod.retcode( cmd, output_loglevel='quiet', ignore_retcode=True ) == 0: return 'zfs' + + _zfs_fuse = lambda f: __salt__['service.' + f]('zfs-fuse') + if _zfs_fuse('available') and (_zfs_fuse('status') or _zfs_fuse('start')): + return 'zfs' + return (False, "The zfs module cannot be loaded: zfs not found") @@ -1142,7 +1148,7 @@ def set(*dataset, **kwargs): ret['error'] = 'one or more snapshots must be specified' # clean kwargs - properties = salt.utils.clean_kwargs(**kwargs) + properties = salt.utils.args.clean_kwargs(**kwargs) if len(properties) < 1: ret['error'] = '{0}one or more properties must be specified'.format( '{0},\n'.format(ret['error']) if 'error' in ret else '' diff --git a/salt/modules/znc.py b/salt/modules/znc.py index 23a2d59a5a..8da86dfa85 100644 --- a/salt/modules/znc.py +++ b/salt/modules/znc.py @@ -16,7 +16,7 @@ import random import signal # Import salt libs -import salt.utils +import salt.utils.path from salt.ext.six.moves import range log = logging.getLogger(__name__) @@ -26,7 +26,7 @@ def __virtual__(): ''' Only load the module if znc is installed ''' - if salt.utils.which('znc'): + if salt.utils.path.which('znc'): return 'znc' return (False, "Module znc: znc binary not found") diff --git a/salt/modules/zoneadm.py b/salt/modules/zoneadm.py index 4edbe15679..0ee2bead73 100644 --- a/salt/modules/zoneadm.py +++ b/salt/modules/zoneadm.py @@ -17,7 +17,7 @@ from __future__ import absolute_import import logging # Import Salt libs -import salt.utils +import salt.utils.path import salt.utils.decorators from salt.ext.six.moves import range @@ -61,11 +61,11 @@ def __virtual__(): We are available if we are have zoneadm and are the global zone on Solaris 10, OmniOS, OpenIndiana, OpenSolaris, or Smartos. ''' - ## note: we depend on PR#37472 to distinguish between Solaris and Oracle Solaris - if _is_globalzone() and salt.utils.which('zoneadm'): - if __grains__['os'] in ['Solaris', 'OpenSolaris', 'SmartOS', 'OmniOS', 'OpenIndiana']: + if _is_globalzone() and salt.utils.path.which('zoneadm'): + if __grains__['os'] in ['OpenSolaris', 'SmartOS', 'OmniOS', 'OpenIndiana']: + return __virtualname__ + elif __grains__['os'] == 'Oracle Solaris' and int(__grains__['osmajorrelease']) == 10: return __virtualname__ - return ( False, '{0} module can only be loaded in a solaris globalzone.'.format( diff --git a/salt/modules/zonecfg.py b/salt/modules/zonecfg.py index 2aa9728c06..54e1747381 100644 --- a/salt/modules/zonecfg.py +++ b/salt/modules/zonecfg.py @@ -19,12 +19,15 @@ import logging import re # Import Salt libs -import salt.ext.six as six -import salt.utils -import salt.utils.files +import salt.utils.args import salt.utils.decorators +import salt.utils.files +import salt.utils.path from salt.utils.odict import OrderedDict +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) # Define the module's virtual name @@ -97,11 +100,11 @@ def __virtual__(): We are available if we are have zonecfg and are the global zone on Solaris 10, OmniOS, OpenIndiana, OpenSolaris, or Smartos. ''' - # note: we depend on PR#37472 to distinguish between Solaris and Oracle Solaris - if _is_globalzone() and salt.utils.which('zonecfg'): - if __grains__['os'] in ['Solaris', 'OpenSolaris', 'SmartOS', 'OmniOS', 'OpenIndiana']: + if _is_globalzone() and salt.utils.path.which('zonecfg'): + if __grains__['os'] in ['OpenSolaris', 'SmartOS', 'OmniOS', 'OpenIndiana']: + return __virtualname__ + elif __grains__['os'] == 'Oracle Solaris' and int(__grains__['osmajorrelease']) == 10: return __virtualname__ - return ( False, '{0} module can only be loaded in a solaris globalzone.'.format( @@ -124,7 +127,7 @@ def _parse_value(value): '''Internal helper for parsing configuration values into python values''' if isinstance(value, bool): return 'true' if value else 'false' - elif isinstance(value, str): + elif isinstance(value, six.string_types): # parse compacted notation to dict listparser = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') @@ -503,7 +506,7 @@ def _resource(methode, zone, resource_type, resource_selector, **kwargs): ret = {'status': True} # parse kwargs - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) for k in kwargs: if isinstance(kwargs[k], dict) or isinstance(kwargs[k], list): kwargs[k] = _sanitize_value(kwargs[k]) diff --git a/salt/modules/zookeeper.py b/salt/modules/zookeeper.py index 6ed4a22c4f..c14c539e38 100644 --- a/salt/modules/zookeeper.py +++ b/salt/modules/zookeeper.py @@ -75,6 +75,7 @@ except ImportError: # Import Salt libraries import salt.utils +import salt.utils.stringutils __virtualname__ = 'zookeeper' @@ -183,7 +184,7 @@ def create(path, value='', acls=None, ephemeral=False, sequence=False, makepath= acls = [make_digest_acl(**acl) for acl in acls] conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) - return conn.create(path, salt.utils.to_bytes(value), acls, ephemeral, sequence, makepath) + return conn.create(path, salt.utils.stringutils.to_bytes(value), acls, ephemeral, sequence, makepath) def ensure_path(path, acls=None, profile=None, hosts=None, scheme=None, @@ -302,7 +303,7 @@ def get(path, profile=None, hosts=None, scheme=None, username=None, password=Non conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) ret, _ = conn.get(path) - return salt.utils.to_str(ret) + return salt.utils.stringutils.to_str(ret) def get_children(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): @@ -384,7 +385,7 @@ def set(path, value, version=-1, profile=None, hosts=None, scheme=None, ''' conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) - return conn.set(path, salt.utils.to_bytes(value), version=version) + return conn.set(path, salt.utils.stringutils.to_bytes(value), version=version) def get_acls(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): diff --git a/salt/modules/zpool.py b/salt/modules/zpool.py index 406530310e..9433c87abd 100644 --- a/salt/modules/zpool.py +++ b/salt/modules/zpool.py @@ -12,8 +12,9 @@ import stat import logging # Import Salt libs -import salt.utils -import salt.utils.decorators as decorators +import salt.utils.decorators +import salt.utils.decorators.path +import salt.utils.path from salt.utils.odict import OrderedDict log = logging.getLogger(__name__) @@ -24,21 +25,21 @@ __func_alias__ = { } -@decorators.memoize +@salt.utils.decorators.memoize def _check_zpool(): ''' Looks to see if zpool is present on the system ''' - return salt.utils.which('zpool') + return salt.utils.path.which('zpool') -@decorators.memoize +@salt.utils.decorators.memoize def _check_features(): ''' Looks to see if zpool-features is available ''' # get man location - man = salt.utils.which('man') + man = salt.utils.path.which('man') if not man: return False @@ -49,12 +50,12 @@ def _check_features(): return res['retcode'] == 0 -@decorators.memoize +@salt.utils.decorators.memoize def _check_mkfile(): ''' Looks to see if mkfile is present on the system ''' - return salt.utils.which('mkfile') + return salt.utils.path.which('mkfile') def __virtual__(): @@ -918,7 +919,7 @@ def replace(zpool, old_device, new_device=None, force=False): return ret -@salt.utils.decorators.which('mkfile') +@salt.utils.decorators.path.which('mkfile') def create_file_vdev(size, *vdevs): ''' .. versionchanged:: 2016.3.0 diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py index ed16544545..60b920d929 100644 --- a/salt/modules/zypper.py +++ b/salt/modules/zypper.py @@ -14,20 +14,16 @@ Package support for openSUSE via the zypper package manager # Import python libs from __future__ import absolute_import -import copy import fnmatch import logging import re import os import time import datetime -from salt.utils.versions import LooseVersion # Import 3rd-party libs # pylint: disable=import-error,redefined-builtin,no-name-in-module -import salt.ext.six as six -from salt.exceptions import SaltInvocationError -import salt.utils.event +from salt.ext import six from salt.ext.six.moves import configparser from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: enable=import-error,redefined-builtin,no-name-in-module @@ -37,11 +33,13 @@ from xml.parsers.expat import ExpatError # Import salt libs import salt.utils +import salt.utils.event import salt.utils.files +import salt.utils.path import salt.utils.pkg import salt.utils.systemd -from salt.exceptions import ( - CommandExecutionError, MinionError) +from salt.utils.versions import LooseVersion +from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError log = logging.getLogger(__name__) @@ -62,7 +60,7 @@ def __virtual__(): if __grains__.get('os_family', '') != 'Suse': return (False, "Module zypper: non SUSE OS not suppored by zypper package manager") # Not all versions of SUSE use zypper, check that it is available - if not salt.utils.which('zypper'): + if not salt.utils.path.which('zypper'): return (False, "Module zypper: zypper package manager not found") return __virtualname__ @@ -695,7 +693,8 @@ def list_pkgs(versions_as_list=False, **kwargs): attr = kwargs.get("attr") if 'pkg.list_pkgs' in __context__: - return _format_pkg_list(__context__['pkg.list_pkgs'], versions_as_list, attr) + cached = __context__['pkg.list_pkgs'] + return __salt__['pkg_resource.format_pkg_list'](cached, versions_as_list, attr) cmd = ['rpm', '-qa', '--queryformat', ( "%{NAME}_|-%{VERSION}_|-%{RELEASE}_|-%{ARCH}_|-" @@ -719,35 +718,7 @@ def list_pkgs(versions_as_list=False, **kwargs): __context__['pkg.list_pkgs'] = ret - return _format_pkg_list(ret, versions_as_list, attr) - - -def _format_pkg_list(packages, versions_as_list, attr): - ''' - Formats packages according to parameters. - ''' - ret = copy.deepcopy(packages) - if attr: - requested_attr = set(['version', 'arch', 'install_date', 'install_date_time_t']) - - if attr != 'all': - requested_attr &= set(attr + ['version']) - - for name in ret: - versions = [] - for all_attr in ret[name]: - filtered_attr = {} - for key in requested_attr: - filtered_attr[key] = all_attr[key] - versions.append(filtered_attr) - ret[name] = versions - return ret - - for name in ret: - ret[name] = [d['version'] for d in ret[name]] - if not versions_as_list: - __salt__['pkg_resource.stringify'](ret) - return ret + return __salt__['pkg_resource.format_pkg_list'](ret, versions_as_list, attr) def _get_configured_repos(): diff --git a/salt/netapi/__init__.py b/salt/netapi/__init__.py index eabb9981a0..99a33697f2 100644 --- a/salt/netapi/__init__.py +++ b/salt/netapi/__init__.py @@ -19,7 +19,7 @@ import salt.client.ssh.client import salt.exceptions # Import third party libs -import salt.ext.six as six +from salt.ext import six class NetapiClient(object): diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py index 9626690347..5e9ea47cf7 100644 --- a/salt/netapi/rest_cherrypy/app.py +++ b/salt/netapi/rest_cherrypy/app.py @@ -580,25 +580,33 @@ import signal import tarfile from multiprocessing import Process, Pipe -# Import third-party libs -# pylint: disable=import-error -import cherrypy # pylint: disable=3rd-party-module-not-gated -import yaml -import salt.ext.six as six -# pylint: enable=import-error +logger = logging.getLogger(__name__) +# Import third-party libs +# pylint: disable=import-error, 3rd-party-module-not-gated +import cherrypy +try: + from cherrypy.lib import cpstats +except ImportError: + cpstats = None + logger.warn('Import of cherrypy.cpstats failed. ' + 'Possible upstream bug: ' + 'https://github.com/cherrypy/cherrypy/issues/1444') + +import yaml +# pylint: enable=import-error, 3rd-party-module-not-gated # Import Salt libs import salt import salt.auth import salt.utils import salt.utils.event +import salt.utils.stringutils +from salt.ext import six # Import salt-api libs import salt.netapi -logger = logging.getLogger(__name__) - # Imports related to websocket try: from .tools import websockets @@ -868,7 +876,7 @@ def hypermedia_handler(*args, **kwargs): try: response = out(ret) if six.PY3: - response = salt.utils.to_bytes(response) + response = salt.utils.stringutils.to_bytes(response) return response except Exception: msg = 'Could not serialize the return data from Salt.' @@ -926,7 +934,7 @@ def urlencoded_processor(entity): entity.fp.read(fp_out=contents) contents.seek(0) body_str = contents.read() - body_bytes = salt.utils.to_bytes(body_str) + body_bytes = salt.utils.stringutils.to_bytes(body_str) body_bytes = six.BytesIO(body_bytes) body_bytes.seek(0) # Patch fp @@ -2716,13 +2724,6 @@ class Stats(object): :status 406: |406| ''' if hasattr(logging, 'statistics'): - # Late import - try: - from cherrypy.lib import cpstats - except ImportError: - logger.error('Import of cherrypy.cpstats failed. Possible ' - 'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444') - return {} return cpstats.extrapolate_statistics(logging.statistics) return {} @@ -2842,13 +2843,14 @@ class API(object): 'tools.trailing_slash.on': True, 'tools.gzip.on': True, - 'tools.cpstats.on': self.apiopts.get('collect_stats', False), - 'tools.html_override.on': True, 'tools.cors_tool.on': True, }, } + if cpstats and self.apiopts.get('collect_stats', False): + conf['/']['tools.cpstats.on'] = True + if 'favicon' in self.apiopts: conf['/favicon.ico'] = { 'tools.staticfile.on': True, diff --git a/salt/netapi/rest_cherrypy/event_processor.py b/salt/netapi/rest_cherrypy/event_processor.py index 3301e41e92..58ec010b00 100644 --- a/salt/netapi/rest_cherrypy/event_processor.py +++ b/salt/netapi/rest_cherrypy/event_processor.py @@ -6,7 +6,7 @@ import json import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import Salt libs import salt.netapi diff --git a/salt/netapi/rest_tornado/__init__.py b/salt/netapi/rest_tornado/__init__.py index 56a88e34d3..fc547a02a3 100644 --- a/salt/netapi/rest_tornado/__init__.py +++ b/salt/netapi/rest_tornado/__init__.py @@ -115,6 +115,7 @@ def start(): ssl_opts.update({'keyfile': mod_opts['ssl_key']}) kwargs['ssl_options'] = ssl_opts + import tornado.httpserver http_server = tornado.httpserver.HTTPServer(get_application(__opts__), **kwargs) try: http_server.bind(mod_opts['port'], diff --git a/salt/netapi/rest_tornado/event_processor.py b/salt/netapi/rest_tornado/event_processor.py index b8e947a591..c644f12361 100644 --- a/salt/netapi/rest_tornado/event_processor.py +++ b/salt/netapi/rest_tornado/event_processor.py @@ -3,7 +3,7 @@ from __future__ import absolute_import import json import logging import threading -import salt.ext.six as six +from salt.ext import six import salt.netapi diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index cf7bd652a9..d40edf44e0 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -206,7 +206,7 @@ import tornado.web import tornado.gen from tornado.concurrent import Future from zmq.eventloop import ioloop -import salt.ext.six as six +from salt.ext import six # pylint: enable=import-error # instantiate the zmq IOLoop (specialized poller) @@ -523,8 +523,7 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin try: # Use cgi.parse_header to correctly separate parameters from value - header = cgi.parse_header(self.request.headers['Content-Type']) - value, parameters = header + value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) @@ -538,7 +537,7 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin if not self.request.body: return data = self.deserialize(self.request.body) - self.raw_data = copy(data) + self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] @@ -696,15 +695,13 @@ class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 }} ''' try: - request_payload = self.deserialize(self.request.body) - - if not isinstance(request_payload, dict): + if not isinstance(self.request_payload, dict): self.send_error(400) return - creds = {'username': request_payload['username'], - 'password': request_payload['password'], - 'eauth': request_payload['eauth'], + creds = {'username': self.request_payload['username'], + 'password': self.request_payload['password'], + 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: @@ -1641,7 +1638,7 @@ class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 value = value[0] arguments[argname] = value ret = self.event.fire_event({ - 'post': self.raw_data, + 'post': self.request_payload, 'get': arguments, # In Tornado >= v4.0.3, the headers come # back as an HTTPHeaders instance, which diff --git a/salt/output/__init__.py b/salt/output/__init__.py index 9cff2d5d99..6cb90a47e1 100644 --- a/salt/output/__init__.py +++ b/salt/output/__init__.py @@ -4,22 +4,24 @@ Used to manage the outputter system. This package is the modular system used for managing outputters. ''' -# Import python libs -from __future__ import print_function +# Import Python libs from __future__ import absolute_import -import re -import os -import sys +from __future__ import print_function import errno import logging +import os +import re +import sys import traceback -# Import salt libs +# Import Salt libs import salt.loader import salt.utils import salt.utils.files -import salt.ext.six as six -from salt.utils import print_cli +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding @@ -125,7 +127,7 @@ def display_output(data, out=None, opts=None, **kwargs): ofh.close() return if display_data: - print_cli(display_data) + salt.utils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: @@ -165,10 +167,17 @@ def get_printout(out, opts=None, **kwargs): if opts.get('force_color', False): opts['color'] = True - elif opts.get('no_color', False) or is_pipe() or salt.utils.is_windows(): + elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True + else: + if opts.get('force_color', False): + opts['color'] = True + elif opts.get('no_color', False) or salt.utils.platform.is_windows(): + opts['color'] = False + else: + pass outputters = salt.loader.outputters(opts) if out not in outputters: diff --git a/salt/output/highstate.py b/salt/output/highstate.py index 0de81af124..2f30d6c8f9 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -109,11 +109,12 @@ import textwrap # Import salt libs import salt.utils +import salt.utils.stringutils import salt.output from salt.utils.locales import sdecode # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six import logging @@ -168,7 +169,7 @@ def _format_host(host, data): nchanges = 0 strip_colors = __opts__.get('strip_colors', True) - if isinstance(data, int) or isinstance(data, str): + if isinstance(data, int) or isinstance(data, six.string_types): # Data in this format is from saltmod.function, # so it is always a 'change' nchanges = 1 @@ -319,7 +320,7 @@ def _format_host(host, data): if six.PY2: ret['comment'] = str(ret['comment']).decode('utf-8') else: - ret['comment'] = salt.utils.to_str(ret['comment']) + ret['comment'] = salt.utils.stringutils.to_str(ret['comment']) except UnicodeDecodeError: # but try to continue on errors pass @@ -547,7 +548,7 @@ def _format_terse(tcolor, comps, ret, colors, tabular): if __opts__.get('state_output_profile', True) and 'start_time' in ret: fmt_string += u'{6[start_time]!s} [{6[duration]!s:>7} ms] ' fmt_string += u'{2:>10}.{3:<10} {4:7} Name: {1}{5}' - elif isinstance(tabular, str): + elif isinstance(tabular, six.string_types): fmt_string = tabular else: fmt_string = '' diff --git a/salt/output/nested.py b/salt/output/nested.py index 7d21dba49f..8ef0e4c046 100644 --- a/salt/output/nested.py +++ b/salt/output/nested.py @@ -25,14 +25,14 @@ Example output:: ''' from __future__ import absolute_import # Import python libs -import salt.utils.odict from numbers import Number # Import salt libs import salt.output -from salt.ext.six import string_types -from salt.utils import get_colors import salt.utils.locales +import salt.utils.odict +from salt.utils import get_colors +from salt.ext.six import string_types class NestDisplay(object): diff --git a/salt/output/newline_values_only.py b/salt/output/newline_values_only.py index bd186a6208..b5bd440b3b 100644 --- a/salt/output/newline_values_only.py +++ b/salt/output/newline_values_only.py @@ -73,7 +73,7 @@ Output from __future__ import absolute_import # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def _get_values(data): diff --git a/salt/output/no_return.py b/salt/output/no_return.py index d0f3394fd8..781db538de 100644 --- a/salt/output/no_return.py +++ b/salt/output/no_return.py @@ -17,6 +17,9 @@ from __future__ import absolute_import # Import salt libs import salt.utils +# Import 3rd-party libs +from salt.ext import six + class NestDisplay(object): ''' @@ -24,32 +27,32 @@ class NestDisplay(object): ''' def __init__(self): self.colors = salt.utils.get_colors( - __opts__.get('color'), - __opts__.get('color_theme')) + __opts__.get(u'color'), + __opts__.get(u'color_theme')) def display(self, ret, indent, prefix, out): ''' Recursively iterate down through data structures to determine output ''' - if isinstance(ret, str): - lines = ret.split('\n') + if isinstance(ret, six.string_types): + lines = ret.split(u'\n') for line in lines: - out += '{0}{1}{2}{3}{4}\n'.format( - self.colors['RED'], - ' ' * indent, + out += u'{0}{1}{2}{3}{4}\n'.format( + self.colors[u'RED'], + u' ' * indent, prefix, line, - self.colors['ENDC']) + self.colors[u'ENDC']) elif isinstance(ret, dict): for key in sorted(ret): val = ret[key] - out += '{0}{1}{2}{3}{4}:\n'.format( - self.colors['CYAN'], + out += u'{0}{1}{2}{3}{4}:\n'.format( + self.colors[u'CYAN'], ' ' * indent, prefix, key, - self.colors['ENDC']) - out = self.display(val, indent + 4, '', out) + self.colors[u'ENDC']) + out = self.display(val, indent + 4, u'', out) return out @@ -58,4 +61,4 @@ def output(ret, **kwargs): # pylint: disable=unused-argument Display ret data ''' nest = NestDisplay() - return nest.display(ret, 0, '', '') + return nest.display(ret, 0, u'', u'') diff --git a/salt/output/overstatestage.py b/salt/output/overstatestage.py index ef77e10dc6..d35dd5a311 100644 --- a/salt/output/overstatestage.py +++ b/salt/output/overstatestage.py @@ -14,7 +14,7 @@ from __future__ import absolute_import import salt.utils # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # [{'group2': {'match': ['fedora17-2', 'fedora17-3'], # 'require': ['group1'], diff --git a/salt/output/pony.py b/salt/output/pony.py index 816df86ee9..dbdddd396e 100644 --- a/salt/output/pony.py +++ b/salt/output/pony.py @@ -46,15 +46,15 @@ from __future__ import absolute_import import subprocess # Import Salt libs -import salt.utils import salt.utils.locales +import salt.utils.path __virtualname__ = 'pony' def __virtual__(): - if salt.utils.which('ponysay'): + if salt.utils.path.which('ponysay'): return __virtualname__ return False diff --git a/salt/output/txt.py b/salt/output/txt.py index c0ae9eae2c..62c55ed596 100644 --- a/salt/output/txt.py +++ b/salt/output/txt.py @@ -17,18 +17,21 @@ def output(data, **kwargs): # pylint: disable=unused-argument ''' Output the data in lines, very nice for running commands ''' - ret = '' - if hasattr(data, 'keys'): + ret = u'' + if hasattr(data, u'keys'): for key in data: value = data[key] # Don't blow up on non-strings try: for line in value.splitlines(): - ret += '{0}: {1}\n'.format(key, line) + ret += u'{0}: {1}\n'.format(key, line) except AttributeError: - ret += '{0}: {1}\n'.format(key, value) + ret += u'{0}: {1}\n'.format(key, value) else: - # For non-dictionary data, just use print - ret += '{0}\n'.format(pprint.pformat(data)) + try: + ret += data + u'\n' + except TypeError: + # For non-dictionary, non-string data, just use print + ret += u'{0}\n'.format(pprint.pformat(data)) return ret diff --git a/salt/output/virt_query.py b/salt/output/virt_query.py index 491c96ef35..6f988524cb 100644 --- a/salt/output/virt_query.py +++ b/salt/output/virt_query.py @@ -11,7 +11,7 @@ runner. from __future__ import absolute_import # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def output(data, **kwargs): # pylint: disable=unused-argument diff --git a/salt/payload.py b/salt/payload.py index 1235a2e8b5..70492e4faa 100644 --- a/salt/payload.py +++ b/salt/payload.py @@ -16,11 +16,11 @@ import datetime import salt.log import salt.crypt import salt.transport.frame +import salt.utils.immutabletypes as immutabletypes from salt.exceptions import SaltReqTimeoutError -from salt.utils import immutabletypes # Import third party libs -import salt.ext.six as six +from salt.ext import six try: import zmq except ImportError: @@ -46,15 +46,15 @@ except ImportError: except ImportError: # TODO: Come up with a sane way to get a configured logfile # and write to the logfile when this error is hit also - LOG_FORMAT = '[%(levelname)-8s] %(message)s' + LOG_FORMAT = u'[%(levelname)-8s] %(message)s' salt.log.setup_console_logger(log_format=LOG_FORMAT) - log.fatal('Unable to import msgpack or msgpack_pure python modules') + log.fatal(u'Unable to import msgpack or msgpack_pure python modules') # Don't exit if msgpack is not available, this is to make local mode # work without msgpack #sys.exit(salt.defaults.exitcodes.EX_GENERIC) -if HAS_MSGPACK and not hasattr(msgpack, 'exceptions'): +if HAS_MSGPACK and not hasattr(msgpack, u'exceptions'): class PackValueError(Exception): ''' older versions of msgpack do not have PackValueError @@ -89,11 +89,11 @@ def format_payload(enc, **kwargs): Pass in the required arguments for a payload, the enc type and the cmd, then a list of keyword args to generate the body of the load dict. ''' - payload = {'enc': enc} + payload = {u'enc': enc} load = {} for key in kwargs: load[key] = kwargs[key] - payload['load'] = load + payload[u'load'] = load return package(payload) @@ -104,11 +104,11 @@ class Serial(object): ''' def __init__(self, opts): if isinstance(opts, dict): - self.serial = opts.get('serial', 'msgpack') - elif isinstance(opts, str): + self.serial = opts.get(u'serial', u'msgpack') + elif isinstance(opts, six.string_types): self.serial = opts else: - self.serial = 'msgpack' + self.serial = u'msgpack' def loads(self, msg, encoding=None, raw=False): ''' @@ -140,10 +140,13 @@ class Serial(object): if six.PY3 and encoding is None and not raw: ret = salt.transport.frame.decode_embedded_strs(ret) except Exception as exc: - log.critical('Could not deserialize msgpack message.' - 'This often happens when trying to read a file not in binary mode' - 'To see message payload, enable debug logging and retry. Exception: {0}'.format(exc)) - log.debug('Msgpack deserialization failure on message: {0}'.format(msg)) + log.critical( + u'Could not deserialize msgpack message. This often happens ' + u'when trying to read a file not in binary mode. ' + u'To see message payload, enable debug logging and retry. ' + u'Exception: %s', exc + ) + log.debug(u'Msgpack deserialization failure on message: %s', msg) gc.collect() raise finally: @@ -158,7 +161,7 @@ class Serial(object): fn_.close() if data: if six.PY3: - return self.loads(data, encoding='utf-8') + return self.loads(data, encoding=u'utf-8') else: return self.loads(data) @@ -215,7 +218,7 @@ class Serial(object): return msgpack.ExtType(78, obj) def dt_encode(obj): - datetime_str = obj.strftime("%Y%m%dT%H:%M:%S.%f") + datetime_str = obj.strftime(u"%Y%m%dT%H:%M:%S.%f") if msgpack.version >= (0, 4, 0): return msgpack.packb(datetime_str, default=default, use_bin_type=use_bin_type) else: @@ -241,7 +244,7 @@ class Serial(object): return obj def immutable_encoder(obj): - log.debug('IMMUTABLE OBJ: {0}'.format(obj)) + log.debug(u'IMMUTABLE OBJ: %s', obj) if isinstance(obj, immutabletypes.ImmutableDict): return dict(obj) if isinstance(obj, immutabletypes.ImmutableList): @@ -249,12 +252,12 @@ class Serial(object): if isinstance(obj, immutabletypes.ImmutableSet): return set(obj) - if "datetime.datetime" in str(e): + if u"datetime.datetime" in str(e): if msgpack.version >= (0, 4, 0): return msgpack.dumps(datetime_encoder(msg), use_bin_type=use_bin_type) else: return msgpack.dumps(datetime_encoder(msg)) - elif "Immutable" in str(e): + elif u"Immutable" in str(e): if msgpack.version >= (0, 4, 0): return msgpack.dumps(msg, default=immutable_encoder, use_bin_type=use_bin_type) else: @@ -287,9 +290,10 @@ class Serial(object): else: return msgpack.dumps(odict_encoder(msg)) except (SystemError, TypeError) as exc: # pylint: disable=W0705 - log.critical('Unable to serialize message! Consider upgrading msgpack. ' - 'Message which failed was {failed_message} ' - 'with exception {exception_message}').format(msg, exc) + log.critical( + u'Unable to serialize message! Consider upgrading msgpack. ' + u'Message which failed was %s, with exception %s', msg, exc + ) def dump(self, msg, fn_): ''' @@ -309,7 +313,7 @@ class SREQ(object): ''' Create a generic interface to wrap salt zeromq req calls. ''' - def __init__(self, master, id_='', serial='msgpack', linger=0, opts=None): + def __init__(self, master, id_=u'', serial=u'msgpack', linger=0, opts=None): self.master = master self.id_ = id_ self.serial = Serial(serial) @@ -323,20 +327,20 @@ class SREQ(object): ''' Lazily create the socket. ''' - if not hasattr(self, '_socket'): + if not hasattr(self, u'_socket'): # create a new one self._socket = self.context.socket(zmq.REQ) - if hasattr(zmq, 'RECONNECT_IVL_MAX'): + if hasattr(zmq, u'RECONNECT_IVL_MAX'): self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) self._set_tcp_keepalive() - if self.master.startswith('tcp://['): + if self.master.startswith(u'tcp://['): # Hint PF type if bracket enclosed IPv6 address - if hasattr(zmq, 'IPV6'): + if hasattr(zmq, u'IPV6'): self._socket.setsockopt(zmq.IPV6, 1) - elif hasattr(zmq, 'IPV4ONLY'): + elif hasattr(zmq, u'IPV4ONLY'): self._socket.setsockopt(zmq.IPV4ONLY, 0) self._socket.linger = self.linger if self.id_: @@ -345,37 +349,37 @@ class SREQ(object): return self._socket def _set_tcp_keepalive(self): - if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts: - if 'tcp_keepalive' in self.opts: + if hasattr(zmq, u'TCP_KEEPALIVE') and self.opts: + if u'tcp_keepalive' in self.opts: self._socket.setsockopt( - zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive'] + zmq.TCP_KEEPALIVE, self.opts[u'tcp_keepalive'] ) - if 'tcp_keepalive_idle' in self.opts: + if u'tcp_keepalive_idle' in self.opts: self._socket.setsockopt( - zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle'] + zmq.TCP_KEEPALIVE_IDLE, self.opts[u'tcp_keepalive_idle'] ) - if 'tcp_keepalive_cnt' in self.opts: + if u'tcp_keepalive_cnt' in self.opts: self._socket.setsockopt( - zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt'] + zmq.TCP_KEEPALIVE_CNT, self.opts[u'tcp_keepalive_cnt'] ) - if 'tcp_keepalive_intvl' in self.opts: + if u'tcp_keepalive_intvl' in self.opts: self._socket.setsockopt( - zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl'] + zmq.TCP_KEEPALIVE_INTVL, self.opts[u'tcp_keepalive_intvl'] ) def clear_socket(self): ''' delete socket if you have it ''' - if hasattr(self, '_socket'): + if hasattr(self, u'_socket'): if isinstance(self.poller.sockets, dict): sockets = list(self.poller.sockets.keys()) for socket in sockets: - log.trace('Unregistering socket: {0}'.format(socket)) + log.trace(u'Unregistering socket: %s', socket) self.poller.unregister(socket) else: for socket in self.poller.sockets: - log.trace('Unregistering socket: {0}'.format(socket)) + log.trace(u'Unregistering socket: %s', socket) self.poller.unregister(socket[0]) del self._socket @@ -383,8 +387,8 @@ class SREQ(object): ''' Takes two arguments, the encryption type and the base payload ''' - payload = {'enc': enc} - payload['load'] = load + payload = {u'enc': enc} + payload[u'load'] = load pkg = self.serial.dumps(payload) self.socket.send(pkg) self.poller.register(self.socket, zmq.POLLIN) @@ -395,12 +399,15 @@ class SREQ(object): if polled: break if tries > 1: - log.info('SaltReqTimeoutError: after {0} seconds. (Try {1} of {2})'.format( - timeout, tried, tries)) + log.info( + u'SaltReqTimeoutError: after %s seconds. (Try %s of %s)', + timeout, tried, tries + ) if tried >= tries: self.clear_socket() raise SaltReqTimeoutError( - 'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format(timeout * tried, tried) + u'SaltReqTimeoutError: after {0} seconds, ran {1} ' + u'tries'.format(timeout * tried, tried) ) return self.serial.loads(self.socket.recv()) @@ -408,8 +415,8 @@ class SREQ(object): ''' Detect the encryption type based on the payload ''' - enc = payload.get('enc', 'clear') - load = payload.get('load', {}) + enc = payload.get(u'enc', u'clear') + load = payload.get(u'load', {}) return self.send(enc, load, tries, timeout) def destroy(self): diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 3af3dce7d1..ce6c5621f0 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -30,7 +30,7 @@ from salt.utils.odict import OrderedDict from salt.version import __version__ # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/pillar/django_orm.py b/salt/pillar/django_orm.py index 833a13d101..b87260110d 100644 --- a/salt/pillar/django_orm.py +++ b/salt/pillar/django_orm.py @@ -96,8 +96,9 @@ import os import sys import salt.exceptions -import salt.ext.six as six +from salt.ext import six import salt.utils +import salt.utils.stringutils HAS_VIRTUALENV = False @@ -178,14 +179,14 @@ def ext_pillar(minion_id, # pylint: disable=W0613 base_env = {} proc = subprocess.Popen(['bash', '-c', 'env'], stdout=subprocess.PIPE) for line in proc.stdout: - (key, _, value) = salt.utils.to_str(line).partition('=') + (key, _, value) = salt.utils.stringutils.to_str(line).partition('=') base_env[key] = value command = ['bash', '-c', 'source {0} && env'.format(env_file)] proc = subprocess.Popen(command, stdout=subprocess.PIPE) for line in proc.stdout: - (key, _, value) = salt.utils.to_str(line).partition('=') + (key, _, value) = salt.utils.stringutils.to_str(line).partition('=') # only add a key if it is different or doesn't already exist if key not in base_env or base_env[key] != value: os.environ[key] = value.rstrip('\n') diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 8b85d14ab3..55d8da6897 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -473,14 +473,15 @@ import hashlib import os # Import salt libs -import salt.utils import salt.utils.gitfs import salt.utils.dictupdate +import salt.utils.stringutils +import salt.utils.versions from salt.exceptions import FileserverConfigError from salt.pillar import Pillar # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import git @@ -630,7 +631,7 @@ class _LegacyGitPillar(object): hash_type = getattr(hashlib, opts['hash_type']) hash_str = '{0} {1}'.format(self.branch, self.rp_location) - repo_hash = hash_type(salt.utils.to_bytes(hash_str)).hexdigest() + repo_hash = hash_type(salt.utils.stringutils.to_bytes(hash_str)).hexdigest() rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash) if not os.path.isdir(rp_): @@ -737,7 +738,7 @@ def _legacy_git_pillar(minion_id, repo_string, pillar_dirs): ''' Support pre-Beryllium config schema ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'The git ext_pillar configuration is deprecated. Please refer to the ' 'documentation at ' diff --git a/salt/pillar/hg_pillar.py b/salt/pillar/hg_pillar.py index f43d83ddd1..32c5bd9a04 100644 --- a/salt/pillar/hg_pillar.py +++ b/salt/pillar/hg_pillar.py @@ -28,9 +28,10 @@ import os # Import Salt Libs import salt.pillar import salt.utils +import salt.utils.stringutils # Import Third Party Libs -import salt.ext.six as six +from salt.ext import six try: import hglib except ImportError: @@ -105,7 +106,7 @@ class Repo(object): if six.PY2: repo_hash = hash_type(repo_uri).hexdigest() else: - repo_hash = hash_type(salt.utils.to_bytes(repo_uri)).hexdigest() + repo_hash = hash_type(salt.utils.stringutils.to_bytes(repo_uri)).hexdigest() self.working_dir = os.path.join(cachedir, repo_hash) if not os.path.isdir(self.working_dir): self.repo = hglib.clone(repo_uri, self.working_dir) diff --git a/salt/pillar/hiera.py b/salt/pillar/hiera.py index 5ad5a52395..5a1a8d4031 100644 --- a/salt/pillar/hiera.py +++ b/salt/pillar/hiera.py @@ -3,16 +3,16 @@ Use hiera data as a Pillar source ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging +import yaml # Import salt libs -import salt.utils +import salt.utils.path -# Import third party libs -import yaml -import salt.ext.six as six +# Import 3rd-party libs +from salt.ext import six # Set up logging @@ -23,7 +23,7 @@ def __virtual__(): ''' Only return if hiera is installed ''' - return 'hiera' if salt.utils.which('hiera') else False + return 'hiera' if salt.utils.path.which('hiera') else False def ext_pillar(minion_id, # pylint: disable=W0613 diff --git a/salt/pillar/libvirt.py b/salt/pillar/libvirt.py index a209defdcf..4ff98d42e0 100644 --- a/salt/pillar/libvirt.py +++ b/salt/pillar/libvirt.py @@ -15,12 +15,12 @@ import os import subprocess # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path def __virtual__(): - return salt.utils.which('certtool') is not None + return salt.utils.path.which('certtool') is not None def ext_pillar(minion_id, diff --git a/salt/pillar/makostack.py b/salt/pillar/makostack.py index f233dcfd21..83494e4492 100644 --- a/salt/pillar/makostack.py +++ b/salt/pillar/makostack.py @@ -381,7 +381,7 @@ from functools import partial import yaml # Import Salt libs -import salt.ext.six as six +from salt.ext import six try: from mako.lookup import TemplateLookup diff --git a/salt/pillar/nodegroups.py b/salt/pillar/nodegroups.py index 7ebe5e6d40..bf74c2f8b6 100644 --- a/salt/pillar/nodegroups.py +++ b/salt/pillar/nodegroups.py @@ -43,7 +43,7 @@ from __future__ import absolute_import from salt.utils.minions import CkMinions # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __version__ = '0.0.2' diff --git a/salt/pillar/pepa.py b/salt/pillar/pepa.py index 6a0c74b7bb..01fd90a559 100644 --- a/salt/pillar/pepa.py +++ b/salt/pillar/pepa.py @@ -280,7 +280,7 @@ import re from os.path import isfile, join # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin import salt.utils.files from salt.utils.yamlloader import SaltYamlSafeLoader diff --git a/salt/pillar/reclass_adapter.py b/salt/pillar/reclass_adapter.py index 7f3c1c20e2..dd83b3e0f5 100644 --- a/salt/pillar/reclass_adapter.py +++ b/salt/pillar/reclass_adapter.py @@ -63,7 +63,7 @@ from salt.utils.reclass import ( ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Define the module's virtual name __virtualname__ = 'reclass' diff --git a/salt/pillar/s3.py b/salt/pillar/s3.py index 23ba836d1a..e65dfd8b36 100644 --- a/salt/pillar/s3.py +++ b/salt/pillar/s3.py @@ -98,7 +98,7 @@ from copy import deepcopy # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import filter from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: enable=import-error,no-name-in-module,redefined-builtin diff --git a/salt/pillar/sql_base.py b/salt/pillar/sql_base.py index e7abceb134..ca6c9d5452 100644 --- a/salt/pillar/sql_base.py +++ b/salt/pillar/sql_base.py @@ -251,7 +251,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): # Filter out values that don't have queries. qbuffer = [x for x in qbuffer if ( - (isinstance(x[1], str) and len(x[1])) + (isinstance(x[1], six.string_types) and len(x[1])) or (isinstance(x[1], (list, tuple)) and (len(x[1]) > 0) and x[1][0]) or @@ -266,7 +266,7 @@ class SqlBaseExtPillar(six.with_metaclass(abc.ABCMeta, object)): 'with_lists': None, 'ignore_null': False } - if isinstance(qb[1], str): + if isinstance(qb[1], six.string_types): defaults['query'] = qb[1] elif isinstance(qb[1], (list, tuple)): defaults['query'] = qb[1][0] diff --git a/salt/pillar/stack.py b/salt/pillar/stack.py index 42fae41b7c..6165f90d36 100644 --- a/salt/pillar/stack.py +++ b/salt/pillar/stack.py @@ -386,7 +386,7 @@ import yaml from jinja2 import FileSystemLoader, Environment # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils diff --git a/salt/pillar/vault.py b/salt/pillar/vault.py index c57ed33707..96e25df586 100644 --- a/salt/pillar/vault.py +++ b/salt/pillar/vault.py @@ -48,10 +48,12 @@ Multiple Vault sources may also be used: - vault: path=secret/minions/{minion}/pass ''' -# import python libs +# Import Python libs from __future__ import absolute_import import logging -import salt.utils + +# Import Salt libs +import salt.utils.versions log = logging.getLogger(__name__) @@ -76,7 +78,7 @@ def ext_pillar(minion_id, # pylint: disable=W0613 comps = conf.split() if not comps[0].startswith('path='): - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'profile\' argument has been deprecated. Any parts up until ' 'and following the first "path=" are discarded' diff --git a/salt/pillar/vmware_pillar.py b/salt/pillar/vmware_pillar.py index 314f7dc55c..04dd5a64b8 100644 --- a/salt/pillar/vmware_pillar.py +++ b/salt/pillar/vmware_pillar.py @@ -147,11 +147,11 @@ from __future__ import absolute_import import logging # Import salt libs -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate import salt.utils.vmware # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: from pyVmomi import vim from pyVim.connect import Disconnect diff --git a/salt/proxy/esxdatacenter.py b/salt/proxy/esxdatacenter.py new file mode 100644 index 0000000000..186b880c2c --- /dev/null +++ b/salt/proxy/esxdatacenter.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +''' +Proxy Minion interface module for managing VMWare ESXi clusters. + +Dependencies +============ + +- pyVmomi +- jsonschema + +Configuration +============= +To use this integration proxy module, please configure the following: + +Pillar +------ + +Proxy minions get their configuration from Salt's Pillar. This can now happen +from the proxy's configuration file. + +Example pillars: + +``userpass`` mechanism: + +.. code-block:: yaml + + proxy: + proxytype: esxdatacenter + datacenter: + vcenter: + mechanism: userpass + username: + passwords: (required if userpass is used) + - first_password + - second_password + - third_password + +``sspi`` mechanism: + +.. code-block:: yaml + + proxy: + proxytype: esxdatacenter + datacenter: + vcenter: + mechanism: sspi + domain: + principal: + +proxytype +^^^^^^^^^ +To use this Proxy Module, set this to ``esxdatacenter``. + +datacenter +^^^^^^^^^^ +Name of the managed datacenter. Required. + +vcenter +^^^^^^^ +The location of the VMware vCenter server (host of ip) where the datacenter +should be managed. Required. + +mechanism +^^^^^^^^ +The mechanism used to connect to the vCenter server. Supported values are +``userpass`` and ``sspi``. Required. + +Note: + Connections are attempted using all (``username``, ``password``) + combinations on proxy startup. + +username +^^^^^^^^ +The username used to login to the host, such as ``root``. Required if mechanism +is ``userpass``. + +passwords +^^^^^^^^^ +A list of passwords to be used to try and login to the vCenter server. At least +one password in this list is required if mechanism is ``userpass``. When the +proxy comes up, it will try the passwords listed in order. + +domain +^^^^^^ +User domain. Required if mechanism is ``sspi``. + +principal +^^^^^^^^ +Kerberos principal. Rquired if mechanism is ``sspi``. + +protocol +^^^^^^^^ +If the ESXi host is not using the default protocol, set this value to an +alternate protocol. Default is ``https``. + +port +^^^^ +If the ESXi host is not using the default port, set this value to an +alternate port. Default is ``443``. + +Salt Proxy +---------- + +After your pillar is in place, you can test the proxy. The proxy can run on +any machine that has network connectivity to your Salt Master and to the +vCenter server in the pillar. SaltStack recommends that the machine running the +salt-proxy process also run a regular minion, though it is not strictly +necessary. + +To start a proxy minion one needs to establish its identity : + +.. code-block:: bash + + salt-proxy --proxyid + +On the machine that will run the proxy, make sure there is a configuration file +present. By default this is ``/etc/salt/proxy``. If in a different location, the +```` has to be specified when running the proxy: +file with at least the following in it: + +.. code-block:: bash + + salt-proxy --proxyid -c + +Commands +-------- + +Once the proxy is running it will connect back to the specified master and +individual commands can be runs against it: + +.. code-block:: bash + + # Master - minion communication + salt test.ping + + # Test vcenter connection + salt vsphere.test_vcenter_connection + +States +------ + +Associated states are documented in +:mod:`salt.states.esxdatacenter `. +Look there to find an example structure for Pillar as well as an example +``.sls`` file for configuring an ESX datacenter from scratch. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import os + +# Import Salt Libs +import salt.exceptions +from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['esxdatacenter'] + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Variables are scoped to this module so we can have persistent data +# across calls to fns in here. +DETAILS = {} + + +# Set up logging +log = logging.getLogger(__name__) +# Define the module's virtual name +__virtualname__ = 'esxdatacenter' + + +def __virtual__(): + ''' + Only load if the vsphere execution module is available. + ''' + if HAS_JSONSCHEMA: + return __virtualname__ + + return False, 'The esxdatacenter proxy module did not load.' + + +def init(opts): + ''' + This function gets called when the proxy starts up. + All login details are cached. + ''' + log.debug('Initting esxdatacenter proxy module in process ' + '{}'.format(os.getpid())) + log.trace('Validating esxdatacenter proxy input') + schema = EsxdatacenterProxySchema.serialize() + log.trace('schema = {}'.format(schema)) + try: + jsonschema.validate(opts['proxy'], schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidConfigError(exc) + + # Save mandatory fields in cache + for key in ('vcenter', 'datacenter', 'mechanism'): + DETAILS[key] = opts['proxy'][key] + + # Additional validation + if DETAILS['mechanism'] == 'userpass': + if 'username' not in opts['proxy']: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\', but no ' + '\'username\' key found in proxy config.') + if 'passwords' not in opts['proxy']: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\', but no ' + '\'passwords\' key found in proxy config.') + for key in ('username', 'passwords'): + DETAILS[key] = opts['proxy'][key] + else: + if 'domain' not in opts['proxy']: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\', but no ' + '\'domain\' key found in proxy config.') + if 'principal' not in opts['proxy']: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\', but no ' + '\'principal\' key found in proxy config.') + for key in ('domain', 'principal'): + DETAILS[key] = opts['proxy'][key] + + # Save optional + DETAILS['protocol'] = opts['proxy'].get('protocol') + DETAILS['port'] = opts['proxy'].get('port') + + # Test connection + if DETAILS['mechanism'] == 'userpass': + # Get the correct login details + log.debug('Retrieving credentials and testing vCenter connection for ' + 'mehchanism \'userpass\'') + try: + username, password = find_credentials() + DETAILS['password'] = password + except salt.exceptions.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + return True + + +def ping(): + ''' + Returns True. + + CLI Example: + + .. code-block:: bash + + salt dc_id test.ping + ''' + return True + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('esxdatacenter proxy shutdown() called...') + + +def find_credentials(): + ''' + Cycle through all the possible credentials and return the first one that + works. + ''' + + # if the username and password were already found don't fo though the + # connection process again + if 'username' in DETAILS and 'password' in DETAILS: + return DETAILS['username'], DETAILS['password'] + + passwords = DETAILS['passwords'] + for password in passwords: + DETAILS['password'] = password + if not __salt__['vsphere.test_vcenter_connection'](): + # We are unable to authenticate + continue + # If we have data returned from above, we've successfully authenticated. + return DETAILS['username'], password + # We've reached the end of the list without successfully authenticating. + raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' + 'incorrect credentials.') + + +def get_details(): + ''' + Function that returns the cached details + ''' + return DETAILS diff --git a/salt/proxy/fx2.py b/salt/proxy/fx2.py index e90c345a45..df03bb1ce2 100644 --- a/salt/proxy/fx2.py +++ b/salt/proxy/fx2.py @@ -175,8 +175,8 @@ from __future__ import absolute_import # Import python libs import logging -import salt.utils import salt.utils.http +import salt.utils.path # This must be present or the Salt loader won't load this module __proxyenabled__ = ['fx2'] @@ -195,7 +195,7 @@ def __virtual__(): ''' Only return if all the modules are available ''' - if not salt.utils.which('racadm'): + if not salt.utils.path.which('racadm'): log.critical('fx2 proxy minion needs "racadm" to be installed.') return False diff --git a/salt/proxy/napalm.py b/salt/proxy/napalm.py index 9ae4fd5316..1acee33148 100644 --- a/salt/proxy/napalm.py +++ b/salt/proxy/napalm.py @@ -67,6 +67,17 @@ provider: ``napalm_base`` .. versionadded:: 2017.7.1 +multiprocessing: ``False`` + Overrides the :conf_minion:`multiprocessing` option, per proxy minion. + The ``multiprocessing`` option must be turned off for SSH-based proxies. + However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. + As multiple proxy minions may share the same configuration file, + this option permits the configuration of the ``multiprocessing`` option + more specifically, for some proxy minions. + + .. versionadded:: 2017.7.2 + + .. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems .. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments diff --git a/salt/proxy/panos.py b/salt/proxy/panos.py new file mode 100644 index 0000000000..324a2e8ea3 --- /dev/null +++ b/salt/proxy/panos.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +''' + +Proxy Minion interface module for managing Palo Alto firewall devices. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + +This proxy minion enables Palo Alto firewalls (hereafter referred to +as simply 'panos') to be treated individually like a Salt Minion. + +The panos proxy leverages the XML API functionality on the Palo Alto +firewall. The Salt proxy must have access to the Palo Alto firewall on +HTTPS (tcp/443). + +More in-depth conceptual reading on Proxy Minions can be found in the +:ref:`Proxy Minion ` section of Salt's +documentation. + + +Configuration +============= +To use this integration proxy module, please configure the following: + +Pillar +------ + +Proxy minions get their configuration from Salt's Pillar. Every proxy must +have a stanza in Pillar and a reference in the Pillar top-file that matches +the ID. There are four connection options available for the panos proxy module. + +- Direct Device (Password) +- Direct Device (API Key) +- Panorama Pass-Through (Password) +- Panorama Pass-Through (API Key) + + +Direct Device (Password) +------------------------ + +The direct device configuration configures the proxy to connect directly to +the device with username and password. + +.. code-block:: yaml + + proxy: + proxytype: panos + host: + username: + password: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this panos Proxy Module, set this to +``panos``. + +host +^^^^ +The location, or ip/dns, of the panos host. Required. + +username +^^^^^^^^ +The username used to login to the panos host. Required. + +password +^^^^^^^^ +The password used to login to the panos host. Required. + +Direct Device (API Key) +------------------------ + +Palo Alto devices allow for access to the XML API with a generated 'API key'_ +instead of username and password. + +.. _API key: https://www.paloaltonetworks.com/documentation/71/pan-os/xml-api/get-started-with-the-pan-os-xml-api/get-your-api-key + +.. code-block:: yaml + + proxy: + proxytype: panos + host: + apikey: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this panos Proxy Module, set this to +``panos``. + +host +^^^^ +The location, or ip/dns, of the panos host. Required. + +apikey +^^^^^^^^ +The generated XML API key for the panos host. Required. + +Panorama Pass-Through (Password) +------------------------ + +The Panorama pass-through method sends all connections through the Panorama +management system. It passes the connections to the appropriate device using +the serial number of the Palo Alto firewall. + +This option will reduce the number of connections that must be present for the +proxy server. It will only require a connection to the Panorama server. + +The username and password will be for authentication to the Panorama server, +not the panos device. + +.. code-block:: yaml + + proxy: + proxytype: panos + serial: + host: + username: + password: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this panos Proxy Module, set this to +``panos``. + +serial +^^^^^^ +The serial number of the panos host. Required. + +host +^^^^ +The location, or ip/dns, of the Panorama server. Required. + +username +^^^^^^^^ +The username used to login to the Panorama server. Required. + +password +^^^^^^^^ +The password used to login to the Panorama server. Required. + +Panorama Pass-Through (API Key) +------------------------ + +The Panorama server can also utilize a generated 'API key'_ for authentication. + +.. _API key: https://www.paloaltonetworks.com/documentation/71/pan-os/xml-api/get-started-with-the-pan-os-xml-api/get-your-api-key + +.. code-block:: yaml + + proxy: + proxytype: panos + serial: + host: + apikey: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this panos Proxy Module, set this to +``panos``. + +serial +^^^^^^ +The serial number of the panos host. Required. + +host +^^^^ +The location, or ip/dns, of the Panorama server. Required. + +apikey +^^^^^^^^ +The generated XML API key for the Panorama server. Required. + +''' + +from __future__ import absolute_import + +# Import Python Libs +import logging + +# Import Salt Libs +import salt.exceptions + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['panos'] + +# Variables are scoped to this module so we can have persistent data. +GRAINS_CACHE = {'vendor': 'Palo Alto'} +DETAILS = {} + +# Set up logging +log = logging.getLogger(__file__) + +# Define the module's virtual name +__virtualname__ = 'panos' + + +def __virtual__(): + ''' + Only return if all the modules are available. + ''' + return __virtualname__ + + +def init(opts): + ''' + This function gets called when the proxy starts up. For + panos devices, a determination is made on the connection type + and the appropriate connection details that must be cached. + ''' + if 'host' not in opts['proxy']: + log.critical('No \'host\' key found in pillar for this proxy.') + return False + if 'apikey' not in opts['proxy']: + # If we do not have an apikey, we must have both a username and password + if 'username' not in opts['proxy']: + log.critical('No \'username\' key found in pillar for this proxy.') + return False + if 'password' not in opts['proxy']: + log.critical('No \'passwords\' key found in pillar for this proxy.') + return False + + DETAILS['url'] = 'https://{0}/api/'.format(opts['proxy']['host']) + + # Set configuration details + DETAILS['host'] = opts['proxy']['host'] + if 'serial' in opts['proxy']: + DETAILS['serial'] = opts['proxy'].get('serial') + if 'apikey' in opts['proxy']: + log.debug("Selected pan_key method for panos proxy module.") + DETAILS['method'] = 'pan_key' + DETAILS['apikey'] = opts['proxy'].get('apikey') + else: + log.debug("Selected pan_pass method for panos proxy module.") + DETAILS['method'] = 'pan_pass' + DETAILS['username'] = opts['proxy'].get('username') + DETAILS['password'] = opts['proxy'].get('password') + else: + if 'apikey' in opts['proxy']: + log.debug("Selected dev_key method for panos proxy module.") + DETAILS['method'] = 'dev_key' + DETAILS['apikey'] = opts['proxy'].get('apikey') + else: + log.debug("Selected dev_pass method for panos proxy module.") + DETAILS['method'] = 'dev_pass' + DETAILS['username'] = opts['proxy'].get('username') + DETAILS['password'] = opts['proxy'].get('password') + + # Ensure connectivity to the device + log.debug("Attempting to connect to panos proxy host.") + query = {'type': 'op', 'cmd': ''} + call(query) + log.debug("Successfully connected to panos proxy host.") + + DETAILS['initialized'] = True + + +def call(payload=None): + ''' + This function captures the query string and sends it to the Palo Alto device. + ''' + ret = {} + try: + if DETAILS['method'] == 'dev_key': + # Pass the api key without the target declaration + conditional_payload = {'key': DETAILS['apikey']} + payload.update(conditional_payload) + r = __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='xml', + decode=True, + verify_ssl=False, + raise_error=True) + ret = r['dict'][0] + elif DETAILS['method'] == 'dev_pass': + # Pass credentials without the target declaration + r = __utils__['http.query'](DETAILS['url'], + username=DETAILS['username'], + password=DETAILS['password'], + data=payload, + method='POST', + decode_type='xml', + decode=True, + verify_ssl=False, + raise_error=True) + ret = r['dict'][0] + elif DETAILS['method'] == 'pan_key': + # Pass the api key with the target declaration + conditional_payload = {'key': DETAILS['apikey'], + 'target': DETAILS['serial']} + payload.update(conditional_payload) + r = __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='xml', + decode=True, + verify_ssl=False, + raise_error=True) + ret = r['dict'][0] + elif DETAILS['method'] == 'pan_pass': + # Pass credentials with the target declaration + conditional_payload = {'target': DETAILS['serial']} + payload.update(conditional_payload) + r = __utils__['http.query'](DETAILS['url'], + username=DETAILS['username'], + password=DETAILS['password'], + data=payload, + method='POST', + decode_type='xml', + decode=True, + verify_ssl=False, + raise_error=True) + ret = r['dict'][0] + except KeyError as err: + raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.") + return ret + + +def is_required_version(required_version='0.0.0'): + ''' + Because different versions of Palo Alto support different command sets, this function + will return true if the current version of Palo Alto supports the required command. + ''' + if 'sw-version' in DETAILS['grains_cache']: + current_version = DETAILS['grains_cache']['sw-version'] + else: + # If we do not have the current sw-version cached, we cannot check version requirements. + return False + + required_version_split = required_version.split(".") + current_version_split = current_version.split(".") + + try: + if int(current_version_split[0]) > int(required_version_split[0]): + return True + elif int(current_version_split[0]) < int(required_version_split[0]): + return False + + if int(current_version_split[1]) > int(required_version_split[1]): + return True + elif int(current_version_split[1]) < int(required_version_split[1]): + return False + + if int(current_version_split[2]) > int(required_version_split[2]): + return True + elif int(current_version_split[2]) < int(required_version_split[2]): + return False + + # We have an exact match + return True + except Exception as err: + return False + + +def initialized(): + ''' + Since grains are loaded in many different places and some of those + places occur before the proxy can be initialized, return whether + our init() function has been called + ''' + return DETAILS.get('initialized', False) + + +def grains(): + ''' + Get the grains from the proxied device + ''' + if not DETAILS.get('grains_cache', {}): + DETAILS['grains_cache'] = GRAINS_CACHE + try: + query = {'type': 'op', 'cmd': ''} + DETAILS['grains_cache'] = call(query)['system'] + except Exception as err: + pass + return DETAILS['grains_cache'] + + +def grains_refresh(): + ''' + Refresh the grains from the proxied device + ''' + DETAILS['grains_cache'] = None + return grains() + + +def ping(): + ''' + Returns true if the device is reachable, else false. + ''' + try: + query = {'type': 'op', 'cmd': ''} + if 'result' in call(query)['system']: + return True + else: + return False + except Exception as err: + return False + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('Panos proxy shutdown() called.') diff --git a/salt/queues/pgjsonb_queue.py b/salt/queues/pgjsonb_queue.py index 93ea8ca314..479c463b70 100644 --- a/salt/queues/pgjsonb_queue.py +++ b/salt/queues/pgjsonb_queue.py @@ -46,7 +46,7 @@ import json import sys # import salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltInvocationError, SaltMasterError try: diff --git a/salt/queues/sqlite_queue.py b/salt/queues/sqlite_queue.py index 768fe8f228..6232361b9b 100644 --- a/salt/queues/sqlite_queue.py +++ b/salt/queues/sqlite_queue.py @@ -23,6 +23,9 @@ import re import sqlite3 as lite from salt.exceptions import SaltInvocationError +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) # Define the module's virtual name @@ -139,7 +142,7 @@ def insert(queue, items): con = _conn(queue) with con: cur = con.cursor() - if isinstance(items, str): + if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: {0}'.format(cmd)) @@ -171,7 +174,7 @@ def delete(queue, items): con = _conn(queue) with con: cur = con.cursor() - if isinstance(items, str): + if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: {0}'.format(cmd)) diff --git a/salt/renderers/gpg.py b/salt/renderers/gpg.py index 9b8e80b695..7f9b65721c 100644 --- a/salt/renderers/gpg.py +++ b/salt/renderers/gpg.py @@ -216,13 +216,13 @@ import logging from subprocess import Popen, PIPE # Import salt libs -import salt.utils +import salt.utils.path import salt.utils.stringio import salt.syspaths from salt.exceptions import SaltRenderError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -233,7 +233,7 @@ def _get_gpg_exec(): ''' return the GPG executable or raise an error ''' - gpg_exec = salt.utils.which('gpg') + gpg_exec = salt.utils.path.which('gpg') if gpg_exec: return gpg_exec else: diff --git a/salt/renderers/jinja.py b/salt/renderers/jinja.py index e7fca8b04c..a78619ac23 100644 --- a/salt/renderers/jinja.py +++ b/salt/renderers/jinja.py @@ -14,7 +14,7 @@ from salt.exceptions import SaltRenderError import salt.utils.templates # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO # pylint: disable=import-error log = logging.getLogger(__name__) @@ -72,6 +72,6 @@ def render(template_file, saltenv='base', sls='', argline='', raise SaltRenderError( tmp_data.get('data', 'Unknown render error in jinja renderer') ) - if six.PY3 and isinstance(tmp_data['data'], bytes): + if isinstance(tmp_data['data'], bytes): tmp_data['data'] = tmp_data['data'].decode(__salt_system_encoding__) return StringIO(tmp_data['data']) diff --git a/salt/renderers/mako.py b/salt/renderers/mako.py index 4704b4d054..9f8b43077d 100644 --- a/salt/renderers/mako.py +++ b/salt/renderers/mako.py @@ -7,7 +7,7 @@ Mako Renderer for Salt from __future__ import absolute_import # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.templates from salt.exceptions import SaltRenderError diff --git a/salt/renderers/pass.py b/salt/renderers/pass.py index 05607e9475..381ca19448 100644 --- a/salt/renderers/pass.py +++ b/salt/renderers/pass.py @@ -48,11 +48,11 @@ from os.path import expanduser from subprocess import Popen, PIPE # Import salt libs -import salt.utils +import salt.utils.path from salt.exceptions import SaltRenderError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -61,7 +61,7 @@ def _get_pass_exec(): """ Return the pass executable or raise an error """ - pass_exec = salt.utils.which('pass') + pass_exec = salt.utils.path.which('pass') if pass_exec: return pass_exec else: diff --git a/salt/renderers/pydsl.py b/salt/renderers/pydsl.py index 3132c4f502..14874b5c55 100644 --- a/salt/renderers/pydsl.py +++ b/salt/renderers/pydsl.py @@ -337,8 +337,9 @@ For example: from __future__ import absolute_import import types +import salt.utils.pydsl as pydsl +import salt.utils.stringutils from salt.ext.six import exec_ -from salt.utils import pydsl, to_str from salt.utils.pydsl import PyDslError from salt.exceptions import SaltRenderError @@ -346,13 +347,14 @@ __all__ = ['render'] def render(template, saltenv='base', sls='', tmplpath=None, rendered_sls=None, **kws): - sls = to_str(sls) + sls = salt.utils.stringutils.to_str(sls) mod = types.ModuleType(sls) # Note: mod object is transient. It's existence only lasts as long as # the lowstate data structure that the highstate in the sls file # is compiled to. - mod.__name__ = sls + # __name__ can't be assigned a unicode + mod.__name__ = str(sls) # future lint: disable=non-unicode-string # to workaround state.py's use of copy.deepcopy(chunk) mod.__deepcopy__ = lambda x: mod diff --git a/salt/renderers/pyobjects.py b/salt/renderers/pyobjects.py index df8d92fc08..9f0b8430fd 100644 --- a/salt/renderers/pyobjects.py +++ b/salt/renderers/pyobjects.py @@ -306,7 +306,7 @@ import salt.utils.files import salt.loader from salt.fileclient import get_file_client from salt.utils.pyobjects import Registry, StateFactory, SaltObject, Map -import salt.ext.six as six +from salt.ext import six # our import regexes FROM_RE = re.compile(r'^\s*from\s+(salt:\/\/.*)\s+import (.*)$') diff --git a/salt/renderers/stateconf.py b/salt/renderers/stateconf.py index 75b7ba3c9c..7165b0cfbd 100644 --- a/salt/renderers/stateconf.py +++ b/salt/renderers/stateconf.py @@ -39,7 +39,7 @@ import salt.utils.files from salt.exceptions import SaltRenderError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO # pylint: disable=import-error __all__ = ['render'] diff --git a/salt/renderers/wempy.py b/salt/renderers/wempy.py index a6777e76d6..9c27e9c952 100644 --- a/salt/renderers/wempy.py +++ b/salt/renderers/wempy.py @@ -4,7 +4,7 @@ from __future__ import absolute_import # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltRenderError import salt.utils.templates diff --git a/salt/renderers/yaml.py b/salt/renderers/yaml.py index 08d1514ddd..2adb667992 100644 --- a/salt/renderers/yaml.py +++ b/salt/renderers/yaml.py @@ -19,9 +19,8 @@ import salt.utils.url from salt.utils.yamlloader import SaltYamlSafeLoader, load from salt.utils.odict import OrderedDict from salt.exceptions import SaltRenderError -import salt.ext.six as six +from salt.ext import six from salt.ext.six import string_types -from salt.ext.six.moves import range log = logging.getLogger(__name__) @@ -67,12 +66,6 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws): ) if not data: data = {} - else: - if 'config.get' in __salt__: - if __salt__['config.get']('yaml_utf8', False): - data = _yaml_result_unicode_to_utf8(data) - elif __opts__.get('yaml_utf8'): - data = _yaml_result_unicode_to_utf8(data) log.debug('Results of YAML rendering: \n{0}'.format(data)) def _validate_data(data): @@ -94,23 +87,3 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws): _validate_data(data) return data - - -def _yaml_result_unicode_to_utf8(data): - '''' - Replace `unicode` strings by utf-8 `str` in final yaml result - - This is a recursive function - ''' - if six.PY3: - return data - if isinstance(data, OrderedDict): - for key, elt in six.iteritems(data): - data[key] = _yaml_result_unicode_to_utf8(elt) - elif isinstance(data, list): - for i in range(len(data)): - data[i] = _yaml_result_unicode_to_utf8(data[i]) - elif isinstance(data, six.text_type): - # here also - data = data.encode('utf-8') - return data diff --git a/salt/returners/carbon_return.py b/salt/returners/carbon_return.py index 077236bd88..10c22b29f4 100644 --- a/salt/returners/carbon_return.py +++ b/salt/returners/carbon_return.py @@ -95,7 +95,7 @@ import salt.utils.jid import salt.returners # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import cPickle, map # pylint: disable=import-error,no-name-in-module,redefined-builtin log = logging.getLogger(__name__) diff --git a/salt/returners/cassandra_return.py b/salt/returners/cassandra_return.py index 47b4257828..2cae15bd9f 100644 --- a/salt/returners/cassandra_return.py +++ b/salt/returners/cassandra_return.py @@ -27,7 +27,7 @@ import logging import salt.utils.jid # Import third party libs -import salt.ext.six as six +from salt.ext import six try: import pycassa # pylint: disable=import-error HAS_PYCASSA = True diff --git a/salt/returners/elasticsearch_return.py b/salt/returners/elasticsearch_return.py index 2ddb4a26eb..5849c0e0e7 100644 --- a/salt/returners/elasticsearch_return.py +++ b/salt/returners/elasticsearch_return.py @@ -107,7 +107,7 @@ import salt.returners import salt.utils.jid # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __virtualname__ = 'elasticsearch' diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index fafe39f2aa..85a1c07264 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -24,7 +24,7 @@ import salt.exceptions # Import 3rd-party libs import msgpack -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/returners/mattermost_returner.py b/salt/returners/mattermost_returner.py index 3d022297de..c682c65be5 100644 --- a/salt/returners/mattermost_returner.py +++ b/salt/returners/mattermost_returner.py @@ -36,7 +36,7 @@ import logging import json # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.ext.six.moves.http_client # pylint: enable=import-error,no-name-in-module,redefined-builtin diff --git a/salt/returners/mongo_future_return.py b/salt/returners/mongo_future_return.py index b36d03e02a..d7e6e1df53 100644 --- a/salt/returners/mongo_future_return.py +++ b/salt/returners/mongo_future_return.py @@ -71,7 +71,7 @@ import logging # Import Salt libs import salt.utils.jid import salt.returners -import salt.ext.six as six +from salt.ext import six # Import third party libs try: diff --git a/salt/returners/mongo_return.py b/salt/returners/mongo_return.py index 07177addd5..a0a0cac8b4 100644 --- a/salt/returners/mongo_return.py +++ b/salt/returners/mongo_return.py @@ -68,7 +68,7 @@ import logging # import Salt libs import salt.utils.jid import salt.returners -import salt.ext.six as six +from salt.ext import six # Import third party libs try: diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py index ad5f11f35c..110c1caf6c 100644 --- a/salt/returners/mysql.py +++ b/salt/returners/mysql.py @@ -154,7 +154,7 @@ import salt.utils.jid import salt.exceptions # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import MySQLdb HAS_MYSQL = True diff --git a/salt/returners/postgres_local_cache.py b/salt/returners/postgres_local_cache.py index a7d77efe4f..42957f7dbe 100644 --- a/salt/returners/postgres_local_cache.py +++ b/salt/returners/postgres_local_cache.py @@ -117,7 +117,7 @@ import sys # Import salt libs import salt.utils import salt.utils.jid -import salt.ext.six as six +from salt.ext import six # Import third party libs try: diff --git a/salt/returners/redis_return.py b/salt/returners/redis_return.py index 8ebf273f53..140af0d063 100644 --- a/salt/returners/redis_return.py +++ b/salt/returners/redis_return.py @@ -12,6 +12,19 @@ config, these are the defaults: redis.host: 'salt' redis.port: 6379 +Cluster Mode Example: + +.. code-block::yaml + + redis.db: '0' + redis.cluster_mode: true + redis.cluster.skip_full_coverage_check: true + redis.cluster.startup_nodes: + - host: redis-member-1 + port: 6379 + - host: redis-member-2 + port: 6379 + Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location: @@ -44,20 +57,46 @@ To override individual configuration items, append --return_kwargs '{"key:": "va salt '*' test.ping --return redis --return_kwargs '{"db": "another-salt"}' +Redis Cluster Mode Options: + +cluster_mode: ``False`` + Whether cluster_mode is enabled or not + +cluster.startup_nodes: + A list of host, port dictionaries pointing to cluster members. At least one is required + but multiple nodes are better + + .. code-block::yaml + + cache.redis.cluster.startup_nodes + - host: redis-member-1 + port: 6379 + - host: redis-member-2 + port: 6379 + +cluster.skip_full_coverage_check: ``False`` + Some cluster providers restrict certain redis commands such as CONFIG for enhanced security. + Set this option to true to skip checks that required advanced privileges. + + .. note:: + + Most cloud hosted redis clusters will require this to be set to ``True`` + + ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import json import logging # Import Salt libs -import salt.ext.six as six -import salt.utils -import salt.utils.jid import salt.returners +import salt.utils.jid +import salt.utils.platform -# Import third party libs +# Import 3rd-party libs +from salt.ext import six try: import redis HAS_REDIS = True @@ -66,14 +105,28 @@ except ImportError: log = logging.getLogger(__name__) +try: + from rediscluster import StrictRedisCluster + HAS_REDIS_CLUSTER = True +except ImportError: + HAS_REDIS_CLUSTER = False + # Define the module's virtual name __virtualname__ = 'redis' def __virtual__(): + ''' + The redis library must be installed for this module to work. + + The redis redis cluster library must be installed if cluster_mode is True + ''' + if not HAS_REDIS: return False, 'Could not import redis returner; ' \ 'redis python client is not installed.' + if not HAS_REDIS_CLUSTER and _get_options()['cluster_mode']: + return (False, "Please install the redis-py-cluster package.") return __virtualname__ @@ -83,13 +136,20 @@ def _get_options(ret=None): ''' attrs = {'host': 'host', 'port': 'port', - 'db': 'db'} + 'db': 'db', + 'cluster_mode': 'cluster_mode', + 'startup_nodes': 'cluster.startup_nodes', + 'skip_full_coverage_check': 'cluster.skip_full_coverage_check', + } - if salt.utils.is_proxy(): + if salt.utils.platform.is_proxy(): return { 'host': __opts__.get('redis.host', 'salt'), 'port': __opts__.get('redis.port', 6379), - 'db': __opts__.get('redis.db', '0') + 'db': __opts__.get('redis.db', '0'), + 'cluster_mode': __opts__.get('redis.cluster_mode', False), + 'startup_nodes': __opts__.get('redis.cluster.startup_nodes', {}), + 'skip_full_coverage_check': __opts__.get('redis.cluster.skip_full_coverage_check', False) } _options = salt.returners.get_returner_options(__virtualname__, @@ -103,10 +163,12 @@ def _get_options(ret=None): CONN_POOL = None -def _get_conn_pool(): +def _get_conn_pool(_options): global CONN_POOL if CONN_POOL is None: - CONN_POOL = redis.ConnectionPool() + CONN_POOL = redis.ConnectionPool(host=_options.get('host'), + port=_options.get('port'), + db=_options.get('db')) return CONN_POOL @@ -115,16 +177,14 @@ def _get_serv(ret=None): Return a redis server object ''' _options = _get_options(ret) - host = _options.get('host') - port = _options.get('port') - db = _options.get('db') - pool = _get_conn_pool() - return redis.Redis( - host=host, - port=port, - db=db, - connection_pool=pool) + if _options.get('cluster_mode'): + return StrictRedisCluster(startup_nodes=_options.get('startup_nodes'), + skip_full_coverage_check=_options.get('skip_full_coverage_check'), + decode_responses=True) + else: + pool = _get_conn_pool(_options) + return redis.StrictRedis(connection_pool=pool) def _get_ttl(): @@ -150,7 +210,7 @@ def save_load(jid, load, minions=None): Save the load to the specified jid ''' serv = _get_serv(ret=None) - serv.setex('load:{0}'.format(jid), json.dumps(load), _get_ttl()) + serv.setex('load:{0}'.format(jid), _get_ttl(), json.dumps(load)) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument diff --git a/salt/returners/sentry_return.py b/salt/returners/sentry_return.py index 6fda73db6d..146f6cc6da 100644 --- a/salt/returners/sentry_return.py +++ b/salt/returners/sentry_return.py @@ -48,7 +48,7 @@ import logging # Import Salt libs import salt.utils.jid -import salt.ext.six as six +from salt.ext import six try: from raven import Client diff --git a/salt/returners/smtp_return.py b/salt/returners/smtp_return.py index 0dc904ebb0..78dfdc9ef5 100644 --- a/salt/returners/smtp_return.py +++ b/salt/returners/smtp_return.py @@ -113,7 +113,7 @@ import smtplib from email.utils import formatdate # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.jid import salt.returners import salt.loader diff --git a/salt/returners/syslog_return.py b/salt/returners/syslog_return.py index 67adbaa509..bcafaf614d 100644 --- a/salt/returners/syslog_return.py +++ b/salt/returners/syslog_return.py @@ -100,7 +100,7 @@ except ImportError: # Import Salt libs import salt.utils.jid import salt.returners -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index 356be1bd58..6470fe31ff 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -25,7 +25,7 @@ import logging import os # Import Salt libs -import salt.ext.six as six +from salt.ext import six # Get logging started log = logging.getLogger(__name__) diff --git a/salt/roster/ansible.py b/salt/roster/ansible.py index 487a565ee0..d97b106884 100644 --- a/salt/roster/ansible.py +++ b/salt/roster/ansible.py @@ -96,12 +96,13 @@ import json import subprocess # Import Salt libs -import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.stringutils from salt.roster import get_roster_file # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six CONVERSION = { 'ansible_ssh_host': 'host', @@ -207,7 +208,7 @@ class Inventory(Target): ''' Parse lines in the inventory file that are under the same group block ''' - line_args = salt.utils.shlex_split(line) + line_args = salt.utils.args.shlex_split(line) name = line_args[0] host = {line_args[0]: dict()} for arg in line_args[1:]: @@ -246,7 +247,7 @@ class Script(Target): self.tgt = tgt self.tgt_type = tgt_type inventory, error = subprocess.Popen([inventory_file], shell=True, stdout=subprocess.PIPE).communicate() - self.inventory = json.loads(salt.utils.to_str(inventory)) + self.inventory = json.loads(salt.utils.stringutils.to_str(inventory)) self.meta = self.inventory.get('_meta', {}) self.groups = dict() self.hostvars = dict() diff --git a/salt/roster/cache.py b/salt/roster/cache.py index fb848507ff..cea0377f8a 100644 --- a/salt/roster/cache.py +++ b/salt/roster/cache.py @@ -101,9 +101,10 @@ import re # Salt libs import salt.utils.minions +import salt.utils.versions import salt.cache from salt._compat import ipaddress -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -129,17 +130,21 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 'host': ('ipv6-private', 'ipv6-global', 'ipv4-private', 'ipv4-public') }) if isinstance(roster_order, (tuple, list)): - salt.utils.warn_until('Oxygen', - 'Using legacy syntax for roster_order') + salt.utils.versions.warn_until( + 'Fluorine', + 'Using legacy syntax for roster_order' + ) roster_order = { 'host': roster_order } for config_key, order in roster_order.items(): for idx, key in enumerate(order): if key in ('public', 'private', 'local'): - salt.utils.warn_until('Oxygen', - 'roster_order {0} will include IPv6 soon. ' - 'Set order to ipv4-{0} if needed.'.format(key)) + salt.utils.versions.warn_until( + 'Fluorine', + 'roster_order {0} will include IPv6 soon. ' + 'Set order to ipv4-{0} if needed.'.format(key) + ) order[idx] = 'ipv4-' + key # log.debug(roster_order) diff --git a/salt/roster/flat.py b/salt/roster/flat.py index 9d5af1c333..e470aea907 100644 --- a/salt/roster/flat.py +++ b/salt/roster/flat.py @@ -19,6 +19,7 @@ except ImportError: # Import Salt libs import salt.loader +import salt.config from salt.template import compile_template from salt.ext.six import string_types from salt.roster import get_roster_file @@ -43,7 +44,7 @@ def targets(tgt, tgt_type='glob', **kwargs): **kwargs) conditioned_raw = {} for minion in raw: - conditioned_raw[str(minion)] = raw[minion] + conditioned_raw[str(minion)] = salt.config.apply_sdb(raw[minion]) rmatcher = RosterMatcher(conditioned_raw, tgt, tgt_type, 'ipv4') return rmatcher.targets() diff --git a/salt/runner.py b/salt/runner.py index d9a04da015..fa71e520ee 100644 --- a/salt/runner.py +++ b/salt/runner.py @@ -12,7 +12,7 @@ import logging import salt.exceptions import salt.loader import salt.minion -import salt.utils +import salt.utils # Can be removed when get_specific_user is moved import salt.utils.args import salt.utils.event import salt.utils.files @@ -38,16 +38,16 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): eauth user must be authorized to execute runner modules: (``@runner``). Only the :py:meth:`master_call` below supports eauth. ''' - client = 'runner' - tag_prefix = 'run' + client = u'runner' + tag_prefix = u'run' def __init__(self, opts): self.opts = opts @property def functions(self): - if not hasattr(self, '_functions'): - if not hasattr(self, 'utils'): + if not hasattr(self, u'_functions'): + if not hasattr(self, u'utils'): self.utils = salt.loader.utils(self.opts) # Must be self.functions for mixin to work correctly :-/ try: @@ -70,19 +70,19 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): New-style: ``{'fun': 'jobs.lookup_jid', 'kwarg': {'jid': '1234'}}`` CLI-style: ``{'fun': 'jobs.lookup_jid', 'arg': ['jid="1234"']}`` ''' - fun = low.pop('fun') + fun = low.pop(u'fun') verify_fun(self.functions, fun) eauth_creds = dict([(i, low.pop(i)) for i in [ - 'username', 'password', 'eauth', 'token', 'client', 'user', 'key', + u'username', u'password', u'eauth', u'token', u'client', u'user', u'key', ] if i in low]) # Run name=value args through parse_input. We don't need to run kwargs # through because there is no way to send name=value strings in the low # dict other than by including an `arg` array. _arg, _kwarg = salt.utils.args.parse_input( - low.pop('arg', []), condition=False) - _kwarg.update(low.pop('kwarg', {})) + low.pop(u'arg', []), condition=False) + _kwarg.update(low.pop(u'kwarg', {})) # If anything hasn't been pop()'ed out of low by this point it must be # an old-style kwarg. @@ -100,7 +100,7 @@ class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object): self.opts, ignore_invalid=True) - return dict(fun=fun, kwarg={'kwarg': kwarg, 'arg': arg}, + return dict(fun=fun, kwarg={u'kwarg': kwarg, u'arg': arg}, **eauth_creds) def cmd_async(self, low): @@ -167,10 +167,10 @@ class Runner(RunnerClient): ''' Print out the documentation! ''' - arg = self.opts.get('fun', None) + arg = self.opts.get(u'fun', None) docs = super(Runner, self).get_docs(arg) for fun in sorted(docs): - display_output('{0}:'.format(fun), 'text', self.opts) + display_output(u'{0}:'.format(fun), u'text', self.opts) print(docs[fun]) # TODO: move to mixin whenever we want a salt-wheel cli @@ -180,106 +180,117 @@ class Runner(RunnerClient): ''' import salt.minion ret = {} - if self.opts.get('doc', False): + if self.opts.get(u'doc', False): self.print_docs() else: - low = {'fun': self.opts['fun']} + low = {u'fun': self.opts[u'fun']} try: # Allocate a jid async_pub = self._gen_async_pub() - self.jid = async_pub['jid'] + self.jid = async_pub[u'jid'] fun_args = salt.utils.args.parse_input( - self.opts['arg'], - no_parse=self.opts.get('no_parse', [])) + self.opts[u'arg'], + no_parse=self.opts.get(u'no_parse', [])) - verify_fun(self.functions, low['fun']) + verify_fun(self.functions, low[u'fun']) args, kwargs = salt.minion.load_args_and_kwargs( - self.functions[low['fun']], + self.functions[low[u'fun']], fun_args, self.opts, ) - low['arg'] = args - low['kwarg'] = kwargs + low[u'arg'] = args + low[u'kwarg'] = kwargs - if self.opts.get('eauth'): - if 'token' in self.opts: + if self.opts.get(u'eauth'): + if u'token' in self.opts: try: - with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_: - low['key'] = fp_.readline() + with salt.utils.files.fopen(os.path.join(self.opts[u'cachedir'], u'.root_key'), u'r') as fp_: + low[u'key'] = fp_.readline() except IOError: - low['token'] = self.opts['token'] + low[u'token'] = self.opts[u'token'] # If using eauth and a token hasn't already been loaded into # low, prompt the user to enter auth credentials - if 'token' not in low and 'key' not in low and self.opts['eauth']: + if u'token' not in low and u'key' not in low and self.opts[u'eauth']: # This is expensive. Don't do it unless we need to. import salt.auth resolver = salt.auth.Resolver(self.opts) - res = resolver.cli(self.opts['eauth']) - if self.opts['mktoken'] and res: + res = resolver.cli(self.opts[u'eauth']) + if self.opts[u'mktoken'] and res: tok = resolver.token_cli( - self.opts['eauth'], + self.opts[u'eauth'], res ) if tok: - low['token'] = tok.get('token', '') + low[u'token'] = tok.get(u'token', u'') if not res: - log.error('Authentication failed') + log.error(u'Authentication failed') return ret low.update(res) - low['eauth'] = self.opts['eauth'] + low[u'eauth'] = self.opts[u'eauth'] else: user = salt.utils.get_specific_user() - if low['fun'] == 'state.orchestrate': - low['kwarg']['orchestration_jid'] = async_pub['jid'] + if low[u'fun'] == u'state.orchestrate': + low[u'kwarg'][u'orchestration_jid'] = async_pub[u'jid'] # Run the runner! - if self.opts.get('async', False): - if self.opts.get('eauth'): + if self.opts.get(u'async', False): + if self.opts.get(u'eauth'): async_pub = self.cmd_async(low) else: - async_pub = self.async(self.opts['fun'], + async_pub = self.async(self.opts[u'fun'], low, user=user, pub=async_pub) # by default: info will be not enougth to be printed out ! - log.warning('Running in async mode. Results of this execution may ' - 'be collected by attaching to the master event bus or ' - 'by examing the master job cache, if configured. ' - 'This execution is running under tag {tag}'.format(**async_pub)) - return async_pub['jid'] # return the jid + log.warning( + u'Running in async mode. Results of this execution may ' + u'be collected by attaching to the master event bus or ' + u'by examing the master job cache, if configured. ' + u'This execution is running under tag %s', async_pub[u'tag'] + ) + return async_pub[u'jid'] # return the jid # otherwise run it in the main process - if self.opts.get('eauth'): + if self.opts.get(u'eauth'): ret = self.cmd_sync(low) - if isinstance(ret, dict) and set(ret) == set(('data', 'outputter')): - outputter = ret['outputter'] - ret = ret['data'] + if isinstance(ret, dict) and set(ret) == set((u'data', u'outputter')): + outputter = ret[u'outputter'] + ret = ret[u'data'] else: outputter = None display_output(ret, outputter, self.opts) else: - ret = self._proc_function(self.opts['fun'], + ret = self._proc_function(self.opts[u'fun'], low, user, - async_pub['tag'], - async_pub['jid'], + async_pub[u'tag'], + async_pub[u'jid'], daemonize=False) except salt.exceptions.SaltException as exc: - evt = salt.utils.event.get_event('master', opts=self.opts) - evt.fire_event({'success': False, - 'return': "{0}".format(exc), - 'retcode': 254, - 'fun': self.opts['fun'], - 'fun_args': fun_args, - 'jid': self.jid}, - tag='salt/run/{0}/ret'.format(self.jid)) - ret = '{0}'.format(exc) - if not self.opts.get('quiet', False): - display_output(ret, 'nested', self.opts) + evt = salt.utils.event.get_event(u'master', opts=self.opts) + evt.fire_event({u'success': False, + u'return': u'{0}'.format(exc), + u'retcode': 254, + u'fun': self.opts[u'fun'], + u'fun_args': fun_args, + u'jid': self.jid}, + tag=u'salt/run/{0}/ret'.format(self.jid)) + # Attempt to grab documentation + if u'fun' in low: + ret = self.get_docs(u'{0}*'.format(low[u'fun'])) + else: + ret = None + + # If we didn't get docs returned then + # return the `not availble` message. + if not ret: + ret = u'{0}'.format(exc) + if not self.opts.get(u'quiet', False): + display_output(ret, u'nested', self.opts) else: - log.debug('Runner return: {0}'.format(ret)) + log.debug(u'Runner return: %s', ret) return ret diff --git a/salt/runners/auth.py b/salt/runners/auth.py index 8aec8c3389..7e8e64855a 100644 --- a/salt/runners/auth.py +++ b/salt/runners/auth.py @@ -17,16 +17,28 @@ import salt.netapi def mk_token(**load): - ''' + r''' Create an eauth token using provided credentials + Non-root users may specify an expiration date -- if allowed via the + :conf_master:`token_expire_user_override` setting -- by passing an + additional ``token_expire`` param. This overrides the + :conf_master:`token_expire` setting of the same name in the Master config + and is how long a token should live in seconds. + CLI Example: .. code-block:: shell salt-run auth.mk_token username=saltdev password=saltdev eauth=auto - salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \\ + + # Create a token valid for three years. + salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \ token_expire=94670856 + + # Calculate the number of seconds using expr. + salt-run auth.mk_token username=saltdev password=saltdev eauth=auto \ + token_expire=$(expr \( 365 \* 24 \* 60 \* 60 \) \* 3) ''' # This will hang if the master daemon is not running. netapi = salt.netapi.NetapiClient(__opts__) diff --git a/salt/runners/cache.py b/salt/runners/cache.py index 8260b074df..af074e4aba 100644 --- a/salt/runners/cache.py +++ b/salt/runners/cache.py @@ -10,10 +10,12 @@ import os # Import salt libs import salt.config -import salt.ext.six as six +from salt.ext import six import salt.log import salt.utils +import salt.utils.args import salt.utils.master +import salt.utils.versions import salt.payload import salt.cache from salt.exceptions import SaltInvocationError @@ -45,7 +47,7 @@ def grains(tgt=None, tgt_type='glob', **kwargs): salt-run cache.grains ''' if 'expr_form' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -76,7 +78,7 @@ def pillar(tgt=None, tgt_type='glob', **kwargs): salt-run cache.pillar ''' if 'expr_form' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -109,7 +111,7 @@ def mine(tgt=None, tgt_type='glob', **kwargs): salt-run cache.mine ''' if 'expr_form' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -167,7 +169,7 @@ def clear_pillar(tgt=None, tgt_type='glob', expr_form=None): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -195,7 +197,7 @@ def clear_grains(tgt=None, tgt_type='glob', expr_form=None): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -223,7 +225,7 @@ def clear_mine(tgt=None, tgt_type='glob', expr_form=None): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -271,7 +273,7 @@ def clear_all(tgt=None, tgt_type='glob', expr_form=None): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -321,10 +323,10 @@ def clear_git_lock(role, remote=None, **kwargs): salt-run cache.clear_git_lock git_pillar ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) type_ = salt.utils.split_input(kwargs.pop('type', ['update', 'checkout'])) if kwargs: - salt.utils.invalid_kwargs(kwargs) + salt.utils.args.invalid_kwargs(kwargs) if role == 'gitfs': git_objects = [salt.utils.gitfs.GitFS(__opts__)] diff --git a/salt/runners/cloud.py b/salt/runners/cloud.py index b00d843c68..eb46d4afa5 100644 --- a/salt/runners/cloud.py +++ b/salt/runners/cloud.py @@ -14,16 +14,13 @@ import os # Import Salt libs import salt.cloud +import salt.utils.args from salt.exceptions import SaltCloudConfigError # Get logging started log = logging.getLogger(__name__) -def _filter_kwargs(k): - return dict((x, k[x]) for x in k if not x.startswith('__')) - - def _get_client(): ''' Return cloud client @@ -114,7 +111,7 @@ def profile(prof=None, instances=None, opts=None, **kwargs): client = _get_client() if isinstance(opts, dict): client.opts.update(opts) - info = client.profile(prof, instances, **_filter_kwargs(kwargs)) + info = client.profile(prof, instances, **salt.utils.args.clean_kwargs(**kwargs)) return info @@ -123,7 +120,7 @@ def map_run(path=None, **kwargs): Execute a salt cloud map file ''' client = _get_client() - info = client.map_run(path, **_filter_kwargs(kwargs)) + info = client.map_run(path, **salt.utils.args.clean_kwargs(**kwargs)) return info @@ -154,7 +151,14 @@ def action(func=None, info = {} client = _get_client() try: - info = client.action(func, cloudmap, instances, provider, instance, **_filter_kwargs(kwargs)) + info = client.action( + func, + cloudmap, + instances, + provider, + instance, + **salt.utils.args.clean_kwargs(**kwargs) + ) except SaltCloudConfigError as err: log.error(err) return info @@ -172,12 +176,8 @@ def create(provider, instances, opts=None, **kwargs): image=ami-1624987f size='t1.micro' ssh_username=ec2-user \ securitygroup=default delvol_on_destroy=True ''' - create_kwargs = {} - for kwarg in kwargs: - if not kwarg.startswith('__'): - create_kwargs[kwarg] = kwargs[kwarg] client = _get_client() if isinstance(opts, dict): client.opts.update(opts) - info = client.create(provider, instances, **create_kwargs) + info = client.create(provider, instances, **salt.utils.args.clean_kwargs(**kwargs)) return info diff --git a/salt/runners/digicertapi.py b/salt/runners/digicertapi.py index 646a069111..70f5cd2027 100644 --- a/salt/runners/digicertapi.py +++ b/salt/runners/digicertapi.py @@ -47,7 +47,7 @@ import salt.syspaths as syspaths import salt.cache import salt.utils.files import salt.utils.http -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range from salt.exceptions import (CommandExecutionError, SaltRunnerError) from Crypto.PublicKey import RSA diff --git a/salt/runners/doc.py b/salt/runners/doc.py index d61e6b8cd8..92a06b3a44 100644 --- a/salt/runners/doc.py +++ b/salt/runners/doc.py @@ -13,7 +13,7 @@ import salt.runner import salt.wheel # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltClientError diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py index 8dfc4412eb..0e8e97beb3 100644 --- a/salt/runners/git_pillar.py +++ b/salt/runners/git_pillar.py @@ -86,7 +86,8 @@ def update(branch=None, repo=None): else: pillar = salt.utils.gitfs.GitPillar(__opts__) pillar.init_remotes(pillar_conf, - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES) + salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + salt.pillar.git_pillar.PER_REMOTE_ONLY) for remote in pillar.remotes: # Skip this remote if it doesn't match the search criteria if branch is not None: diff --git a/salt/runners/jobs.py b/salt/runners/jobs.py index 42a7a5688d..65f1158049 100644 --- a/salt/runners/jobs.py +++ b/salt/runners/jobs.py @@ -19,7 +19,7 @@ import salt.minion import salt.returners # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltClientError try: diff --git a/salt/runners/lxc.py b/salt/runners/lxc.py index 4d95d3c471..bd1f86f971 100644 --- a/salt/runners/lxc.py +++ b/salt/runners/lxc.py @@ -14,15 +14,15 @@ import logging # Import Salt libs import salt.client -import salt.utils +import salt.utils.args +import salt.utils.cloud import salt.utils.files import salt.utils.virt -import salt.utils.cloud import salt.key from salt.utils.odict import OrderedDict as _OrderedDict # Import 3rd-party lib -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -276,7 +276,7 @@ def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs): ret['result'] = False return ret - kw = salt.utils.clean_kwargs(**kwargs) + kw = salt.utils.args.clean_kwargs(**kwargs) pub_key = kw.get('pub_key', None) priv_key = kw.get('priv_key', None) explicit_auth = pub_key and priv_key @@ -309,7 +309,7 @@ def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs): cmds = [] for name in names: args = [name] - kw = salt.utils.clean_kwargs(**kwargs) + kw = salt.utils.args.clean_kwargs(**kwargs) if saltcloud_mode: kw = copy.deepcopy(kw) kw['name'] = name diff --git a/salt/runners/manage.py b/salt/runners/manage.py index be40e0411d..e446d0e985 100644 --- a/salt/runners/manage.py +++ b/salt/runners/manage.py @@ -16,14 +16,16 @@ import logging import uuid # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.request import urlopen as _urlopen # pylint: disable=no-name-in-module,import-error # Import salt libs import salt.key -import salt.utils +import salt.utils.compat import salt.utils.files import salt.utils.minions +import salt.utils.raetevent +import salt.utils.versions import salt.client import salt.client.ssh import salt.wheel @@ -86,7 +88,7 @@ def status(output=True, tgt='*', tgt_type='glob', expr_form=None, timeout=None, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -653,6 +655,7 @@ def versions(): return ret labels = { + -2: 'Minion offline', -1: 'Minion requires update', 0: 'Up to date', 1: 'Minion newer than master', @@ -664,12 +667,19 @@ def versions(): master_version = salt.version.__saltstack_version__ for minion in minions: - minion_version = salt.version.SaltStackVersion.parse(minions[minion]) - ver_diff = cmp(minion_version, master_version) + if not minions[minion]: + minion_version = False + ver_diff = -2 + else: + minion_version = salt.version.SaltStackVersion.parse(minions[minion]) + ver_diff = salt.utils.compat.cmp(minion_version, master_version) if ver_diff not in version_status: version_status[ver_diff] = {} - version_status[ver_diff][minion] = minion_version.string + if minion_version: + version_status[ver_diff][minion] = minion_version.string + else: + version_status[ver_diff][minion] = minion_version # Add version of Master to output version_status[2] = master_version.string diff --git a/salt/runners/mattermost.py b/salt/runners/mattermost.py index 2bd3d928c4..4ca4e7125c 100644 --- a/salt/runners/mattermost.py +++ b/salt/runners/mattermost.py @@ -20,7 +20,7 @@ import json import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import Salt libs # pylint: disable=import-error,no-name-in-module,redefined-builtin diff --git a/salt/runners/nacl.py b/salt/runners/nacl.py index f8ec91841c..2ccd888bef 100644 --- a/salt/runners/nacl.py +++ b/salt/runners/nacl.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -This runner helps create encrypted passwords that can be included in pillars. +This module helps include encrypted passwords in pillars, grains and salt state files. :depends: libnacl, https://github.com/saltstack/libnacl @@ -8,35 +8,166 @@ This is often useful if you wish to store your pillars in source control or share your pillar data with others that you trust. I don't advise making your pillars public regardless if they are encrypted or not. -The following configurations can be defined in the master config -so your users can create encrypted passwords using the runner nacl: +When generating keys and encrypting passwords use --local when using salt-call for extra +security. Also consider using just the salt runner nacl when encrypting pillar passwords. + +:configuration: The following configuration defaults can be + define (pillar or config files) Avoid storing private keys in pillars! Ensure master does not have `pillar_opts=True`: + + .. code-block:: python + + # cat /etc/salt/master.d/nacl.conf + nacl.config: + # NOTE: `key` and `key_file` have been renamed to `sk`, `sk_file` + # also `box_type` default changed from secretbox to sealedbox. + box_type: sealedbox (default) + sk_file: /etc/salt/pki/master/nacl (default) + pk_file: /etc/salt/pki/master/nacl.pub (default) + sk: None + pk: None + + Usage can override the config defaults: + + .. code-block:: bash + + salt-call nacl.enc sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub + + +The nacl lib uses 32byte keys, these keys are base64 encoded to make your life more simple. +To generate your `sk_file` and `pk_file` use: .. code-block:: bash - cat /etc/salt/master.d/nacl.conf + salt-call --local nacl.keygen sk_file=/etc/salt/pki/master/nacl + # or if you want to work without files. + salt-call --local nacl.keygen + local: + ---------- + pk: + /kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0= + sk: + SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow= + +Now with your keypair, you can encrypt data: + +You have two option, `sealedbox` or `secretbox`. + +SecretBox is data encrypted using private key `pk`. Sealedbox is encrypted using public key `pk`. + +Recommend using Sealedbox because the one way encryption permits developers to encrypt data for source control but not decrypt. +Sealedbox only has one key that is for both encryption and decryption. + +.. code-block:: bash + + salt-call --local nacl.enc asecretpass pk=/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0= + tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58= + +To decrypt the data: + +.. code-block:: bash + + salt-call --local nacl.dec data='tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' \ + sk='SVWut5SqNpuPeNzb1b9y6b2eXg2PLIog43GBzp48Sow=' + +When the keys are defined in the master config you can use them from the nacl runner +without extra parameters: + +.. code-block:: python + + # cat /etc/salt/master.d/nacl.conf nacl.config: - key: 'cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' - keyfile: /root/.nacl - -Now with the config in the master you can use the runner nacl like: + sk_file: /etc/salt/pki/master/nacl + pk: 'cTIqXwnUiD1ulg4kXsbeCE7/NoeKEzd4nLeYcCFpd9k=' .. code-block:: bash - salt-run nacl.enc 'data' + salt-run nacl.enc 'asecretpass' + salt-run nacl.dec 'tqXzeIJnTAM9Xf0mdLcpEdklMbfBGPj2oTKmlgrm3S1DTVVHNnh9h8mU1GKllGq/+cYsk6m5WhGdk58=' + +.. code-block:: yam + # a salt developers minion could have pillar data that includes a nacl public key + nacl.config: + pk: '/kfGX7PbWeu099702PBbKWLpG/9p06IQRswkdWHCDk0=' + +The developer can then use a less-secure system to encrypt data. + +.. code-block:: bash + + salt-call --local nacl.enc apassword + + +Pillar files can include protected data that the salt master decrypts: + +.. code-block:: jinja + + pillarexample: + user: root + password1: {{salt.nacl.dec('DRB7Q6/X5gGSRCTpZyxS6hlbWj0llUA+uaVyvou3vJ4=')|json}} + cert_key: {{salt.nacl.dec_file('/srv/salt/certs/example.com/key.nacl')|json}} + cert_key2: {{salt.nacl.dec_file('salt:///certs/example.com/key.nacl')|json}} + +Larger files like certificates can be encrypted with: + +.. code-block:: bash + + salt-call nacl.enc_file /tmp/cert.crt out=/tmp/cert.nacl + # or more advanced + cert=$(cat /tmp/cert.crt) + salt-call --out=newline_values_only nacl.enc_pub data="$cert" > /tmp/cert.nacl + +In pillars rended with jinja be sure to include `|json` so line breaks are encoded: + +.. code-block:: jinja + + cert: "{{salt.nacl.dec('S2uogToXkgENz9...085KYt')|json}}" + +In states rendered with jinja it is also good pratice to include `|json`: + +.. code-block:: jinja + + {{sls}} private key: + file.managed: + - name: /etc/ssl/private/cert.key + - mode: 700 + - contents: "{{pillar['pillarexample']['cert_key']|json}}" + + +Optional small program to encrypt data without needing salt modules. + +.. code-block:: python + + #!/bin/python3 + import sys, base64, libnacl.sealed + pk = base64.b64decode('YOURPUBKEY') + b = libnacl.sealed.SealedBox(pk) + data = sys.stdin.buffer.read() + print(base64.b64encode(b.encrypt(data)).decode()) + +.. code-block:: bash + + echo 'apassword' | nacl_enc.py + ''' +# Import Python libs from __future__ import absolute_import import base64 import os + +# Import Salt libs import salt.utils.files +import salt.utils.platform +import salt.utils.win_functions +import salt.utils.win_dacl import salt.syspaths REQ_ERROR = None try: import libnacl.secret -except ImportError as e: - REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package' + import libnacl.sealed +except (ImportError, OSError) as e: + REQ_ERROR = 'libnacl import error, perhaps missing python libnacl package or should update.' __virtualname__ = 'nacl' @@ -50,91 +181,294 @@ def _get_config(**kwargs): Return configuration ''' config = { - 'key': None, - 'keyfile': None, + 'box_type': 'sealedbox', + 'sk': None, + 'sk_file': '/etc/salt/pki/master/nacl', + 'pk': None, + 'pk_file': '/etc/salt/pki/master/nacl.pub', } config_key = '{0}.config'.format(__virtualname__) - config.update(__opts__.get(config_key, {})) - for k in set(config) & set(kwargs): + try: + config.update(__salt__['config.get'](config_key, {})) + except (NameError, KeyError) as e: + # likly using salt-run so fallback to __opts__ + config.update(__opts__.get(config_key, {})) + # pylint: disable=C0201 + for k in set(config.keys()) & set(kwargs.keys()): config[k] = kwargs[k] return config -def _get_key(rstrip_newline=True, **kwargs): +def _get_sk(**kwargs): ''' - Return key + Return sk ''' config = _get_config(**kwargs) - key = config['key'] - keyfile = config['keyfile'] - if not key and keyfile: - if not os.path.isfile(keyfile): - raise Exception('file not found: {0}'.format(keyfile)) - with salt.utils.files.fopen(keyfile, 'rb') as keyf: - key = keyf.read() + key = config['sk'] + sk_file = config['sk_file'] + if not key and sk_file: + with salt.utils.files.fopen(sk_file, 'rb') as keyf: + key = str(keyf.read()).rstrip('\n') if key is None: - raise Exception('no key found') - key = str(key) - if rstrip_newline: - key = key.rstrip('\n') - return key + raise Exception('no key or sk_file found') + return base64.b64decode(key) -def keygen(keyfile=None): +def _get_pk(**kwargs): ''' - Use libnacl to generate a private key + Return pk + ''' + config = _get_config(**kwargs) + pubkey = config['pk'] + pk_file = config['pk_file'] + if not pubkey and pk_file: + with salt.utils.files.fopen(pk_file, 'rb') as keyf: + pubkey = str(keyf.read()).rstrip('\n') + if pubkey is None: + raise Exception('no pubkey or pk_file found') + pubkey = str(pubkey) + return base64.b64decode(pubkey) + + +def keygen(sk_file=None, pk_file=None): + ''' + Use libnacl to generate a keypair. + + If no `sk_file` is defined return a keypair. + + If only the `sk_file` is defined `pk_file` will use the same name with a postfix `.pub`. + + When the `sk_file` is already existing, but `pk_file` is not. The `pk_file` will be generated + using the `sk_file`. CLI Examples: .. code-block:: bash - salt-run nacl.keygen - salt-run nacl.keygen keyfile=/root/.nacl - salt-run --out=newline_values_only nacl.keygen > /root/.nacl + salt-call nacl.keygen + salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl + salt-call nacl.keygen sk_file=/etc/salt/pki/master/nacl pk_file=/etc/salt/pki/master/nacl.pub + salt-call --local nacl.keygen ''' - b = libnacl.secret.SecretBox() - key = b.sk - key = base64.b64encode(key) - if keyfile: - if os.path.isfile(keyfile): - raise Exception('file already found: {0}'.format(keyfile)) - with salt.utils.files.fopen(keyfile, 'w') as keyf: - keyf.write(key) - return 'saved: {0}'.format(keyfile) - return key + if sk_file is None: + kp = libnacl.public.SecretKey() + return {'sk': base64.b64encode(kp.sk), 'pk': base64.b64encode(kp.pk)} + + if pk_file is None: + pk_file = '{0}.pub'.format(sk_file) + + if sk_file and pk_file is None: + if not os.path.isfile(sk_file): + kp = libnacl.public.SecretKey() + with salt.utils.files.fopen(sk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.sk)) + if salt.utils.platform.is_windows(): + cur_user = salt.utils.win_functions.get_current_user() + salt.utils.win_dacl.set_owner(sk_file, cur_user) + salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True) + else: + # chmod 0600 file + os.chmod(sk_file, 1536) + return 'saved sk_file: {0}'.format(sk_file) + else: + raise Exception('sk_file:{0} already exist.'.format(sk_file)) + + if sk_file is None and pk_file: + raise Exception('sk_file: Must be set inorder to generate a public key.') + + if os.path.isfile(sk_file) and os.path.isfile(pk_file): + raise Exception('sk_file:{0} and pk_file:{1} already exist.'.format(sk_file, pk_file)) + + if os.path.isfile(sk_file) and not os.path.isfile(pk_file): + # generate pk using the sk + with salt.utils.files.fopen(sk_file, 'rb') as keyf: + sk = str(keyf.read()).rstrip('\n') + sk = base64.b64decode(sk) + kp = libnacl.public.SecretKey(sk) + with salt.utils.files.fopen(pk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.pk)) + return 'saved pk_file: {0}'.format(pk_file) + + kp = libnacl.public.SecretKey() + with salt.utils.files.fopen(sk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.sk)) + if salt.utils.platform.is_windows(): + cur_user = salt.utils.win_functions.get_current_user() + salt.utils.win_dacl.set_owner(sk_file, cur_user) + salt.utils.win_dacl.set_permissions(sk_file, cur_user, 'full_control', 'grant', reset_perms=True, protected=True) + else: + # chmod 0600 file + os.chmod(sk_file, 1536) + with salt.utils.files.fopen(pk_file, 'w') as keyf: + keyf.write(base64.b64encode(kp.pk)) + return 'saved sk_file:{0} pk_file: {1}'.format(sk_file, pk_file) def enc(data, **kwargs): ''' - Takes a key generated from `nacl.keygen` and encrypt some data. + Alias to `{box_type}_encrypt` + + box_type: secretbox, sealedbox(default) + ''' + box_type = _get_config(**kwargs)['box_type'] + if box_type == 'sealedbox': + return sealedbox_encrypt(data, **kwargs) + if box_type == 'secretbox': + return secretbox_encrypt(data, **kwargs) + return sealedbox_encrypt(data, **kwargs) + + +def enc_file(name, out=None, **kwargs): + ''' + This is a helper function to encrypt a file and return its contents. + + You can provide an optional output file using `out` + + `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash - salt-run nacl.enc datatoenc - salt-run nacl.enc datatoenc keyfile=/root/.nacl - salt-run nacl.enc datatoenc key='cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' + salt-run nacl.enc_file name=/tmp/id_rsa + salt-call nacl.enc_file name=salt://crt/mycert out=/tmp/cert + salt-run nacl.enc_file name=/tmp/id_rsa box_type=secretbox \ + sk_file=/etc/salt/pki/master/nacl.pub ''' - key = _get_key(**kwargs) - sk = base64.b64decode(key) - b = libnacl.secret.SecretBox(sk) - return base64.b64encode(b.encrypt(data)) + try: + data = __salt__['cp.get_file_str'](name) + except Exception as e: + # likly using salt-run so fallback to local filesystem + with salt.utils.files.fopen(name, 'rb') as f: + data = f.read() + d = enc(data, **kwargs) + if out: + if os.path.isfile(out): + raise Exception('file:{0} already exist.'.format(out)) + with salt.utils.files.fopen(out, 'wb') as f: + f.write(d) + return 'Wrote: {0}'.format(out) + return d def dec(data, **kwargs): ''' - Takes a key generated from `nacl.keygen` and decrypt some data. + Alias to `{box_type}_decrypt` + + box_type: secretbox, sealedbox(default) + ''' + box_type = _get_config(**kwargs)['box_type'] + if box_type == 'sealedbox': + return sealedbox_decrypt(data, **kwargs) + if box_type == 'secretbox': + return secretbox_decrypt(data, **kwargs) + return sealedbox_decrypt(data, **kwargs) + + +def dec_file(name, out=None, **kwargs): + ''' + This is a helper function to decrypt a file and return its contents. + + You can provide an optional output file using `out` + + `name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc. CLI Examples: .. code-block:: bash - salt-run nacl.dec pEXHQM6cuaF7A= - salt-run nacl.dec data='pEXHQM6cuaF7A=' keyfile=/root/.nacl - salt-run nacl.dec data='pEXHQM6cuaF7A=' key='cKEzd4kXsbeCE7/nLTIqXwnUiD1ulg4NoeeYcCFpd9k=' + salt-run nacl.dec_file name=/tmp/id_rsa.nacl + salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa + salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \ + sk_file=/etc/salt/pki/master/nacl.pub ''' - key = _get_key(**kwargs) - sk = base64.b64decode(key) - b = libnacl.secret.SecretBox(key=sk) + try: + data = __salt__['cp.get_file_str'](name) + except Exception as e: + # likly using salt-run so fallback to local filesystem + with salt.utils.files.fopen(name, 'rb') as f: + data = f.read() + d = dec(data, **kwargs) + if out: + if os.path.isfile(out): + raise Exception('file:{0} already exist.'.format(out)) + with salt.utils.files.fopen(out, 'wb') as f: + f.write(d) + return 'Wrote: {0}'.format(out) + return d + + +def sealedbox_encrypt(data, **kwargs): + ''' + Encrypt data using a public key generated from `nacl.keygen`. + The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key. + + CLI Examples: + + .. code-block:: bash + + salt-run nacl.sealedbox_encrypt datatoenc + salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub + salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ=' + ''' + pk = _get_pk(**kwargs) + b = libnacl.sealed.SealedBox(pk) + return base64.b64encode(b.encrypt(data)) + + +def sealedbox_decrypt(data, **kwargs): + ''' + Decrypt data using a secret key that was encrypted using a public key with `nacl.sealedbox_encrypt`. + + CLI Examples: + + .. code-block:: bash + + salt-call nacl.sealedbox_decrypt pEXHQM6cuaF7A= + salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.sealedbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' + ''' + if data is None: + return None + sk = _get_sk(**kwargs) + keypair = libnacl.public.SecretKey(sk) + b = libnacl.sealed.SealedBox(keypair) + return b.decrypt(base64.b64decode(data)) + + +def secretbox_encrypt(data, **kwargs): + ''' + Encrypt data using a secret key generated from `nacl.keygen`. + The same secret key can be used to decrypt the data using `nacl.secretbox_decrypt`. + + CLI Examples: + + .. code-block:: bash + + salt-run nacl.secretbox_encrypt datatoenc + salt-call --local nacl.secretbox_encrypt datatoenc sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.secretbox_encrypt datatoenc sk='YmFkcGFzcwo=' + ''' + sk = _get_sk(**kwargs) + b = libnacl.secret.SecretBox(sk) + return base64.b64encode(b.encrypt(data)) + + +def secretbox_decrypt(data, **kwargs): + ''' + Decrypt data that was encrypted using `nacl.secretbox_encrypt` using the secret key + that was generated from `nacl.keygen`. + + CLI Examples: + + .. code-block:: bash + + salt-call nacl.secretbox_decrypt pEXHQM6cuaF7A= + salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk_file=/etc/salt/pki/master/nacl + salt-call --local nacl.secretbox_decrypt data='pEXHQM6cuaF7A=' sk='YmFkcGFzcwo=' + ''' + if data is None: + return None + key = _get_sk(**kwargs) + b = libnacl.secret.SecretBox(key=key) return b.decrypt(base64.b64decode(data)) diff --git a/salt/runners/pkg.py b/salt/runners/pkg.py index 3e1f06ca33..cbb918726e 100644 --- a/salt/runners/pkg.py +++ b/salt/runners/pkg.py @@ -13,7 +13,7 @@ import salt.output import salt.minion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def _get_returner(returner_types): diff --git a/salt/runners/queue.py b/salt/runners/queue.py index 10e9c99a77..01bad4b4e7 100644 --- a/salt/runners/queue.py +++ b/salt/runners/queue.py @@ -68,7 +68,7 @@ from __future__ import absolute_import # Import salt libs import salt.loader -import salt.ext.six as six +from salt.ext import six from salt.utils.event import get_event, tagify from salt.exceptions import SaltInvocationError diff --git a/salt/runners/saltutil.py b/salt/runners/saltutil.py index 555de1b16b..45b94454de 100644 --- a/salt/runners/saltutil.py +++ b/salt/runners/saltutil.py @@ -38,6 +38,7 @@ def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): ''' log.debug('Syncing all') ret = {} + ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) diff --git a/salt/runners/smartos_vmadm.py b/salt/runners/smartos_vmadm.py index 9faeca5f58..15f8cec6f8 100644 --- a/salt/runners/smartos_vmadm.py +++ b/salt/runners/smartos_vmadm.py @@ -12,7 +12,7 @@ from salt.exceptions import SaltClientError from salt.utils.odict import OrderedDict # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six # Function aliases __func_alias__ = { diff --git a/salt/runners/spacewalk.py b/salt/runners/spacewalk.py index ff2ef3803f..cb9495934a 100644 --- a/salt/runners/spacewalk.py +++ b/salt/runners/spacewalk.py @@ -36,7 +36,7 @@ import atexit import logging # Import third party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/runners/ssh.py b/salt/runners/ssh.py index e26b4579e3..a35e924555 100644 --- a/salt/runners/ssh.py +++ b/salt/runners/ssh.py @@ -10,6 +10,7 @@ from __future__ import absolute_import # Import Salt Libs import salt.client.ssh.client +import salt.utils.versions def cmd(tgt, @@ -34,7 +35,7 @@ def cmd(tgt, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/runners/survey.py b/salt/runners/survey.py index 077e638230..3fd2b1014f 100644 --- a/salt/runners/survey.py +++ b/salt/runners/survey.py @@ -20,7 +20,7 @@ import salt.client from salt.exceptions import SaltClientError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range diff --git a/salt/runners/test.py b/salt/runners/test.py index b21bbe3c49..0d56edf17f 100644 --- a/salt/runners/test.py +++ b/salt/runners/test.py @@ -6,7 +6,7 @@ from __future__ import absolute_import from __future__ import print_function # Import python libs import time -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range diff --git a/salt/runners/venafiapi.py b/salt/runners/venafiapi.py index b30ae4d716..887b3e5bd0 100644 --- a/salt/runners/venafiapi.py +++ b/salt/runners/venafiapi.py @@ -38,7 +38,7 @@ import json import salt.syspaths as syspaths import salt.cache import salt.utils.files -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError __virtualname__ = 'venafi' diff --git a/salt/runners/virt.py b/salt/runners/virt.py index 7212455229..6a9822d5b7 100644 --- a/salt/runners/virt.py +++ b/salt/runners/virt.py @@ -16,7 +16,7 @@ import salt.key from salt.exceptions import SaltClientError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/runners/winrepo.py b/salt/runners/winrepo.py index 62f8b80696..01c209d382 100644 --- a/salt/runners/winrepo.py +++ b/salt/runners/winrepo.py @@ -11,7 +11,7 @@ from __future__ import absolute_import, print_function import os # Import third party libs -import salt.ext.six as six +from salt.ext import six try: import msgpack except ImportError: diff --git a/salt/scripts.py b/salt/scripts.py index 20104bf31c..acef4987f9 100644 --- a/salt/scripts.py +++ b/salt/scripts.py @@ -24,7 +24,7 @@ import salt.defaults.exitcodes # pylint: disable=unused-import log = logging.getLogger(__name__) -def _handle_interrupt(exc, original_exc, hardfail=False, trace=''): +def _handle_interrupt(exc, original_exc, hardfail=False, trace=u''): ''' if hardfailing: If we got the original stacktrace, log it @@ -50,16 +50,16 @@ def _handle_signals(client, signum, sigframe): hardcrash = False if signum == signal.SIGINT: - exit_msg = '\nExiting gracefully on Ctrl-c' + exit_msg = u'\nExiting gracefully on Ctrl-c' try: - jid = client.local_client.pub_data['jid'] + jid = client.local_client.pub_data[u'jid'] exit_msg += ( - '\n' - 'This job\'s jid is: {0}\n' - 'The minions may not have all finished running and any remaining ' - 'minions will return upon completion. To look up the return data ' - 'for this job later, run the following command:\n\n' - 'salt-run jobs.lookup_jid {0}'.format(jid) + u'\n' + u'This job\'s jid is: {0}\n' + u'The minions may not have all finished running and any remaining ' + u'minions will return upon completion. To look up the return data ' + u'for this job later, run the following command:\n\n' + u'salt-run jobs.lookup_jid {0}'.format(jid) ) except (AttributeError, KeyError): pass @@ -68,7 +68,7 @@ def _handle_signals(client, signum, sigframe): _handle_interrupt( SystemExit(exit_msg), - Exception('\nExiting with hard crash on Ctrl-c'), + Exception(u'\nExiting with hard crash on Ctrl-c'), hardcrash, trace=trace) @@ -96,12 +96,12 @@ def minion_process(): ''' Start a minion process ''' - import salt.utils + import salt.utils.platform import salt.cli.daemons # salt_minion spawns this function in a new process - salt.utils.appendproctitle('KeepAlive') + salt.utils.appendproctitle(u'KeepAlive') def handle_hup(manager, sig, frame): manager.minion.reload() @@ -117,15 +117,15 @@ def minion_process(): time.sleep(5) try: # check pid alive (Unix only trick!) - if os.getuid() == 0 and not salt.utils.is_windows(): + if os.getuid() == 0 and not salt.utils.platform.is_windows(): os.kill(parent_pid, 0) except OSError as exc: # forcibly exit, regular sys.exit raises an exception-- which # isn't sufficient in a thread - log.error('Minion process encountered exception: {0}'.format(exc)) + log.error(u'Minion process encountered exception: %s', exc) os._exit(salt.defaults.exitcodes.EX_GENERIC) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),)) thread.start() @@ -137,13 +137,13 @@ def minion_process(): try: minion.start() except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc: - log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True) - log.warning('** Restarting minion **') + log.warning(u'Fatal functionality error caught by minion handler:\n', exc_info=True) + log.warning(u'** Restarting minion **') delay = 60 - if minion is not None and hasattr(minion, 'config'): - delay = minion.config.get('random_reauth_delay', 60) + if minion is not None and hasattr(minion, u'config'): + delay = minion.config.get(u'random_reauth_delay', 60) delay = randint(1, delay) - log.info('waiting random_reauth_delay {0}s'.format(delay)) + log.info(u'waiting random_reauth_delay %ss', delay) time.sleep(delay) sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE) @@ -155,21 +155,22 @@ def salt_minion(): ''' import signal + import salt.utils.platform import salt.utils.process salt.utils.process.notify_systemd() import salt.cli.daemons import multiprocessing - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): minion = salt.cli.daemons.Minion() minion.start() return - if '--disable-keepalive' in sys.argv: - sys.argv.remove('--disable-keepalive') + if u'--disable-keepalive' in sys.argv: + sys.argv.remove(u'--disable-keepalive') minion = salt.cli.daemons.Minion() minion.start() return @@ -230,6 +231,7 @@ def proxy_minion_process(queue): Start a proxy minion process ''' import salt.cli.daemons + import salt.utils.platform # salt_minion spawns this function in a new process def suicide_when_without_parent(parent_pid): @@ -249,7 +251,7 @@ def proxy_minion_process(queue): # isn't sufficient in a thread os._exit(999) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),)) thread.start() @@ -260,7 +262,7 @@ def proxy_minion_process(queue): proxyminion = salt.cli.daemons.ProxyMinion() proxyminion.start() except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc: - log.error('Proxy Minion failed to start: ', exc_info=True) + log.error(u'Proxy Minion failed to start: ', exc_info=True) restart = True # status is superfluous since the process will be restarted status = salt.defaults.exitcodes.SALT_KEEPALIVE @@ -269,13 +271,13 @@ def proxy_minion_process(queue): status = exc.code if restart is True: - log.warning('** Restarting proxy minion **') + log.warning(u'** Restarting proxy minion **') delay = 60 if proxyminion is not None: - if hasattr(proxyminion, 'config'): - delay = proxyminion.config.get('random_reauth_delay', 60) + if hasattr(proxyminion, u'config'): + delay = proxyminion.config.get(u'random_reauth_delay', 60) random_delay = randint(1, delay) - log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay)) + log.info(u'Sleeping random_reauth_delay of %s seconds', random_delay) # preform delay after minion resources have been cleaned queue.put(random_delay) else: @@ -288,17 +290,18 @@ def salt_proxy(): Start a proxy minion. ''' import salt.cli.daemons + import salt.utils.platform import multiprocessing - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): proxyminion = salt.cli.daemons.ProxyMinion() proxyminion.start() return - if '--disable-keepalive' in sys.argv: - sys.argv.remove('--disable-keepalive') + if u'--disable-keepalive' in sys.argv: + sys.argv.remove(u'--disable-keepalive') proxyminion = salt.cli.daemons.ProxyMinion() proxyminion.start() return @@ -364,7 +367,7 @@ def salt_key(): _install_signal_handlers(client) client.run() except Exception as err: - sys.stderr.write("Error: {0}\n".format(err)) + sys.stderr.write(u"Error: {0}\n".format(err)) def salt_cp(): @@ -384,8 +387,8 @@ def salt_call(): salt minion to run. ''' import salt.cli.call - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') client = salt.cli.call.SaltCall() _install_signal_handlers(client) client.run() @@ -396,8 +399,8 @@ def salt_run(): Execute a salt convenience routine. ''' import salt.cli.run - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') client = salt.cli.run.SaltRun() _install_signal_handlers(client) client.run() @@ -408,8 +411,8 @@ def salt_ssh(): Execute the salt-ssh system ''' import salt.cli.ssh - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') try: client = salt.cli.ssh.SaltSSH() _install_signal_handlers(client) @@ -440,11 +443,11 @@ def salt_cloud(): import salt.cloud.cli except ImportError as e: # No salt cloud on Windows - log.error("Error importing salt cloud {0}".format(e)) - print('salt-cloud is not available in this system') + log.error(u'Error importing salt cloud: %s', e) + print(u'salt-cloud is not available in this system') sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE) - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') client = salt.cloud.cli.SaltCloud() _install_signal_handlers(client) @@ -469,8 +472,8 @@ def salt_main(): master. ''' import salt.cli.salt - if '' in sys.path: - sys.path.remove('') + if u'' in sys.path: + sys.path.remove(u'') client = salt.cli.salt.SaltCMD() _install_signal_handlers(client) client.run() diff --git a/salt/sdb/sqlite3.py b/salt/sdb/sqlite3.py index a5a094746f..e0b85c65e8 100644 --- a/salt/sdb/sqlite3.py +++ b/salt/sdb/sqlite3.py @@ -54,7 +54,7 @@ except ImportError: HAS_SQLITE3 = False # Import salt libs -import salt.ext.six as six +from salt.ext import six # Import third party libs import msgpack diff --git a/salt/serializers/configparser.py b/salt/serializers/configparser.py index 3df65e93f1..787ce88f3a 100644 --- a/salt/serializers/configparser.py +++ b/salt/serializers/configparser.py @@ -12,7 +12,7 @@ from __future__ import absolute_import # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.ext.six.moves.configparser as configparser # pylint: disable=E0611 from salt.serializers import DeserializationError, SerializationError diff --git a/salt/serializers/json.py b/salt/serializers/json.py index d0f42efab9..024842a43c 100644 --- a/salt/serializers/json.py +++ b/salt/serializers/json.py @@ -15,9 +15,12 @@ try: except ImportError: import json -from salt.ext.six import string_types +# Import Salt libs from salt.serializers import DeserializationError, SerializationError +# Import 3rd-party libs +from salt.ext import six + __all__ = ['deserialize', 'serialize', 'available'] available = True @@ -32,7 +35,7 @@ def deserialize(stream_or_string, **options): ''' try: - if not isinstance(stream_or_string, (bytes, string_types)): + if not isinstance(stream_or_string, (bytes, six.string_types)): return json.load(stream_or_string, **options) if isinstance(stream_or_string, bytes): diff --git a/salt/serializers/msgpack.py b/salt/serializers/msgpack.py index 553ab622fa..6085f3525a 100644 --- a/salt/serializers/msgpack.py +++ b/salt/serializers/msgpack.py @@ -16,7 +16,7 @@ from salt.log import setup_console_logger from salt.serializers import DeserializationError, SerializationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/serializers/yaml.py b/salt/serializers/yaml.py index 74004c2208..2fad384d1b 100644 --- a/salt/serializers/yaml.py +++ b/salt/serializers/yaml.py @@ -17,7 +17,7 @@ from yaml.constructor import ConstructorError from yaml.scanner import ScannerError from salt.serializers import DeserializationError, SerializationError -import salt.ext.six as six +from salt.ext import six from salt.utils.odict import OrderedDict __all__ = ['deserialize', 'serialize', 'available'] diff --git a/salt/serializers/yamlex.py b/salt/serializers/yamlex.py index 65fbf58e48..6e1c22b39c 100644 --- a/salt/serializers/yamlex.py +++ b/salt/serializers/yamlex.py @@ -119,7 +119,7 @@ import yaml from yaml.nodes import MappingNode from yaml.constructor import ConstructorError from yaml.scanner import ScannerError -import salt.ext.six as six +from salt.ext import six __all__ = ['deserialize', 'serialize', 'available'] diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index bf336eaf19..ea19b6ee0e 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -25,7 +25,7 @@ import salt.cache import salt.utils.files import salt.utils.http as http import salt.syspaths as syspaths -import salt.ext.six as six +from salt.ext import six from salt.ext.six import string_types from salt.ext.six.moves import input from salt.ext.six.moves import filter @@ -358,6 +358,7 @@ class SPMClient(object): # First we download everything, then we install for package in dl_list: + out_file = dl_list[package]['dest_file'] # Kick off the install self._install_indv_pkg(package, out_file) return diff --git a/salt/spm/pkgfiles/local.py b/salt/spm/pkgfiles/local.py index 54e79e4383..2d7f8ae3d4 100644 --- a/salt/spm/pkgfiles/local.py +++ b/salt/spm/pkgfiles/local.py @@ -13,8 +13,8 @@ import logging # Import Salt libs import salt.syspaths -import salt.utils import salt.utils.files +import salt.utils.stringutils # Get logging started log = logging.getLogger(__name__) @@ -185,7 +185,7 @@ def hash_file(path, hashobj, conn=None): return '' with salt.utils.files.fopen(path, 'r') as f: - hashobj.update(salt.utils.to_bytes(f.read())) + hashobj.update(salt.utils.stringutils.to_bytes(f.read())) return hashobj.hexdigest() diff --git a/salt/state.py b/salt/state.py index 7840f5ae3c..cbdd8078e9 100644 --- a/salt/state.py +++ b/salt/state.py @@ -37,10 +37,11 @@ import salt.utils.crypt import salt.utils.dictupdate import salt.utils.event import salt.utils.files +import salt.utils.immutabletypes as immutabletypes import salt.utils.url +import salt.utils.platform import salt.utils.process import salt.syspaths as syspaths -from salt.utils import immutabletypes from salt.template import compile_template, compile_template_str from salt.exceptions import ( SaltException, @@ -54,7 +55,7 @@ import salt.utils.yamlloader as yamlloader # Import third party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import map, range, reload_module # pylint: enable=import-error,no-name-in-module,redefined-builtin import msgpack @@ -65,57 +66,58 @@ log = logging.getLogger(__name__) # These are keywords passed to state module functions which are to be used # by salt in this state module and not on the actual state module function STATE_REQUISITE_KEYWORDS = frozenset([ - 'onchanges', - 'onfail', - 'onfail_stop', - 'prereq', - 'prerequired', - 'watch', - 'require', - 'listen', + u'onchanges', + u'onfail', + u'onfail_stop', + u'prereq', + u'prerequired', + u'watch', + u'require', + u'listen', ]) STATE_REQUISITE_IN_KEYWORDS = frozenset([ - 'onchanges_in', - 'onfail_in', - 'prereq_in', - 'watch_in', - 'require_in', - 'listen_in', + u'onchanges_in', + u'onfail_in', + u'prereq_in', + u'watch_in', + u'require_in', + u'listen_in', ]) STATE_RUNTIME_KEYWORDS = frozenset([ - 'fun', - 'state', - 'check_cmd', - 'failhard', - 'onlyif', - 'unless', - 'retry', - 'order', - 'parallel', - 'prereq', - 'prereq_in', - 'prerequired', - 'reload_modules', - 'reload_grains', - 'reload_pillar', - 'runas', - 'fire_event', - 'saltenv', - 'use', - 'use_in', - '__env__', - '__sls__', - '__id__', - '__orchestration_jid__', - '__pub_user', - '__pub_arg', - '__pub_jid', - '__pub_fun', - '__pub_tgt', - '__pub_ret', - '__pub_pid', - '__pub_tgt_type', - '__prereq__', + u'fun', + u'state', + u'check_cmd', + u'failhard', + u'onlyif', + u'unless', + u'retry', + u'order', + u'parallel', + u'prereq', + u'prereq_in', + u'prerequired', + u'reload_modules', + u'reload_grains', + u'reload_pillar', + u'runas', + u'runas_password', + u'fire_event', + u'saltenv', + u'use', + u'use_in', + u'__env__', + u'__sls__', + u'__id__', + u'__orchestration_jid__', + u'__pub_user', + u'__pub_arg', + u'__pub_jid', + u'__pub_fun', + u'__pub_tgt', + u'__pub_ret', + u'__pub_pid', + u'__pub_tgt_type', + u'__prereq__', ]) STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS) @@ -132,26 +134,26 @@ def split_low_tag(tag): ''' Take a low tag and split it back into the low dict that it came from ''' - state, id_, name, fun = tag.split('_|-') + state, id_, name, fun = tag.split(u'_|-') - return {'state': state, - '__id__': id_, - 'name': name, - 'fun': fun} + return {u'state': state, + u'__id__': id_, + u'name': name, + u'fun': fun} def _gen_tag(low): ''' Generate the running dict tag string from the low data structure ''' - return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) + return u'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) def _l_tag(name, id_): - low = {'name': 'listen_{0}'.format(name), - '__id__': 'listen_{0}'.format(id_), - 'state': 'Listen_Error', - 'fun': 'Listen_Error'} + low = {u'name': u'listen_{0}'.format(name), + u'__id__': u'listen_{0}'.format(id_), + u'state': u'Listen_Error', + u'fun': u'Listen_Error'} return _gen_tag(low) @@ -160,8 +162,8 @@ def trim_req(req): Trim any function off of a requisite ''' reqfirst = next(iter(req)) - if '.' in reqfirst: - return {reqfirst.split('.')[0]: req[reqfirst]} + if u'.' in reqfirst: + return {reqfirst.split(u'.')[0]: req[reqfirst]} return req @@ -193,9 +195,9 @@ def find_name(name, state, high): if name in high: ext_id.append((name, state)) # if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS - elif state == 'sls': + elif state == u'sls': for nid, item in six.iteritems(high): - if item['__sls__'] == name: + if item[u'__sls__'] == name: ext_id.append((nid, next(iter(item)))) # otherwise we are requiring a single state, lets find it else: @@ -219,9 +221,9 @@ def find_sls_ids(sls, high): ''' ret = [] for nid, item in six.iteritems(high): - if item['__sls__'] == sls: + if item[u'__sls__'] == sls: for st_ in item: - if st_.startswith('__'): + if st_.startswith(u'__'): continue ret.append((nid, st_)) return ret @@ -231,38 +233,38 @@ def format_log(ret): ''' Format the state into a log message ''' - msg = '' + msg = u'' if isinstance(ret, dict): # Looks like the ret may be a valid state return - if 'changes' in ret: + if u'changes' in ret: # Yep, looks like a valid state return - chg = ret['changes'] + chg = ret[u'changes'] if not chg: - if ret['comment']: - msg = ret['comment'] + if ret[u'comment']: + msg = ret[u'comment'] else: - msg = 'No changes made for {0[name]}'.format(ret) + msg = u'No changes made for {0[name]}'.format(ret) elif isinstance(chg, dict): - if 'diff' in chg: - if isinstance(chg['diff'], six.string_types): - msg = 'File changed:\n{0}'.format(chg['diff']) + if u'diff' in chg: + if isinstance(chg[u'diff'], six.string_types): + msg = u'File changed:\n{0}'.format(chg[u'diff']) if all([isinstance(x, dict) for x in six.itervalues(chg)]): - if all([('old' in x and 'new' in x) + if all([(u'old' in x and u'new' in x) for x in six.itervalues(chg)]): - msg = 'Made the following changes:\n' + msg = u'Made the following changes:\n' for pkg in chg: - old = chg[pkg]['old'] + old = chg[pkg][u'old'] if not old and old not in (False, None): - old = 'absent' - new = chg[pkg]['new'] + old = u'absent' + new = chg[pkg][u'new'] if not new and new not in (False, None): - new = 'absent' + new = u'absent' # This must be able to handle unicode as some package names contain # non-ascii characters like "Français" or "Español". See Issue #33605. msg += u'\'{0}\' changed from \'{1}\' to \'{2}\'\n'.format(pkg, old, new) if not msg: - msg = str(ret['changes']) - if ret['result'] is True or ret['result'] is None: + msg = str(ret[u'changes']) + if ret[u'result'] is True or ret[u'result'] is None: log.info(msg) else: log.error(msg) @@ -294,14 +296,14 @@ def mock_ret(cdata): ''' # As this is expanded it should be sent into the execution module # layer or it should be turned into a standalone loader system - if cdata['args']: - name = cdata['args'][0] + if cdata[u'args']: + name = cdata[u'args'][0] else: - name = cdata['kwargs']['name'] - return {'name': name, - 'comment': 'Not called, mocked', - 'changes': {}, - 'result': True} + name = cdata[u'kwargs'][u'name'] + return {u'name': name, + u'comment': u'Not called, mocked', + u'changes': {}, + u'result': True} class StateError(Exception): @@ -325,9 +327,9 @@ class Compiler(object): ''' high = compile_template(template, self.rend, - self.opts['renderer'], - self.opts['renderer_blacklist'], - self.opts['renderer_whitelist'], + self.opts[u'renderer'], + self.opts[u'renderer_blacklist'], + self.opts[u'renderer_whitelist'], **kwargs) if not high: return high @@ -341,11 +343,11 @@ class Compiler(object): if not isinstance(high[name], dict): if isinstance(high[name], six.string_types): # Is this is a short state? It needs to be padded! - if '.' in high[name]: - comps = high[name].split('.') + if u'.' in high[name]: + comps = high[name].split(u'.') if len(comps) >= 2: # Merge the comps - comps[1] = '.'.join(comps[1:len(comps)]) + comps[1] = u'.'.join(comps[1:len(comps)]) high[name] = { # '__sls__': template, # '__env__': None, @@ -355,15 +357,15 @@ class Compiler(object): continue skeys = set() for key in sorted(high[name]): - if key.startswith('_'): + if key.startswith(u'_'): continue if not isinstance(high[name][key], list): continue - if '.' in key: - comps = key.split('.') + if u'.' in key: + comps = key.split(u'.') if len(comps) >= 2: # Merge the comps - comps[1] = '.'.join(comps[1:len(comps)]) + comps[1] = u'.'.join(comps[1:len(comps)]) # Salt doesn't support state files such as: # # /etc/redis/redis.conf: @@ -388,49 +390,49 @@ class Compiler(object): ''' errors = [] if not isinstance(high, dict): - errors.append('High data is not a dictionary and is invalid') + errors.append(u'High data is not a dictionary and is invalid') reqs = OrderedDict() for name, body in six.iteritems(high): - if name.startswith('__'): + if name.startswith(u'__'): continue if not isinstance(name, six.string_types): errors.append( - 'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but ' - 'is a {2}'.format( + u'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but ' + u'is a {2}'.format( name, - body['__sls__'], + body[u'__sls__'], type(name).__name__ ) ) if not isinstance(body, dict): - err = ('The type {0} in {1} is not formatted as a dictionary' + err = (u'The type {0} in {1} is not formatted as a dictionary' .format(name, body)) errors.append(err) continue for state in body: - if state.startswith('__'): + if state.startswith(u'__'): continue if not isinstance(body[state], list): errors.append( - 'State \'{0}\' in SLS \'{1}\' is not formed as a list' - .format(name, body['__sls__']) + u'State \'{0}\' in SLS \'{1}\' is not formed as a list' + .format(name, body[u'__sls__']) ) else: fun = 0 - if '.' in state: + if u'.' in state: fun += 1 for arg in body[state]: if isinstance(arg, six.string_types): fun += 1 - if ' ' in arg.strip(): - errors.append(('The function "{0}" in state ' - '"{1}" in SLS "{2}" has ' - 'whitespace, a function with whitespace is ' - 'not supported, perhaps this is an argument ' - 'that is missing a ":"').format( + if u' ' in arg.strip(): + errors.append((u'The function "{0}" in state ' + u'"{1}" in SLS "{2}" has ' + u'whitespace, a function with whitespace is ' + u'not supported, perhaps this is an argument ' + u'that is missing a ":"').format( arg, name, - body['__sls__'])) + body[u'__sls__'])) elif isinstance(arg, dict): # The arg is a dict, if the arg is require or # watch, it must be a list. @@ -438,52 +440,52 @@ class Compiler(object): # Add the requires to the reqs dict and check them # all for recursive requisites. argfirst = next(iter(arg)) - if argfirst in ('require', 'watch', 'prereq', 'onchanges'): + if argfirst in (u'require', u'watch', u'prereq', u'onchanges'): if not isinstance(arg[argfirst], list): - errors.append(('The {0}' - ' statement in state \'{1}\' in SLS \'{2}\' ' - 'needs to be formed as a list').format( + errors.append((u'The {0}' + u' statement in state \'{1}\' in SLS \'{2}\' ' + u'needs to be formed as a list').format( argfirst, name, - body['__sls__'] + body[u'__sls__'] )) # It is a list, verify that the members of the # list are all single key dicts. else: - reqs[name] = {'state': state} + reqs[name] = {u'state': state} for req in arg[argfirst]: if isinstance(req, six.string_types): - req = {'id': req} + req = {u'id': req} if not isinstance(req, dict): - err = ('Requisite declaration {0}' - ' in SLS {1} is not formed as a' - ' single key dictionary').format( + err = (u'Requisite declaration {0}' + u' in SLS {1} is not formed as a' + u' single key dictionary').format( req, - body['__sls__']) + body[u'__sls__']) errors.append(err) continue req_key = next(iter(req)) req_val = req[req_key] - if '.' in req_key: + if u'.' in req_key: errors.append(( - 'Invalid requisite type \'{0}\' ' - 'in state \'{1}\', in SLS ' - '\'{2}\'. Requisite types must ' - 'not contain dots, did you ' - 'mean \'{3}\'?'.format( + u'Invalid requisite type \'{0}\' ' + u'in state \'{1}\', in SLS ' + u'\'{2}\'. Requisite types must ' + u'not contain dots, did you ' + u'mean \'{3}\'?'.format( req_key, name, - body['__sls__'], - req_key[:req_key.find('.')] + body[u'__sls__'], + req_key[:req_key.find(u'.')] ) )) if not ishashable(req_val): errors.append(( - 'Illegal requisite "{0}", ' - 'is SLS {1}\n' + u'Illegal requisite "{0}", ' + u'is SLS {1}\n' ).format( str(req_val), - body['__sls__'])) + body[u'__sls__'])) continue # Check for global recursive requisites @@ -494,12 +496,12 @@ class Compiler(object): if req_val in reqs: if name in reqs[req_val]: if reqs[req_val][name] == state: - if reqs[req_val]['state'] == reqs[name][req_val]: - err = ('A recursive ' - 'requisite was found, SLS ' - '"{0}" ID "{1}" ID "{2}"' + if reqs[req_val][u'state'] == reqs[name][req_val]: + err = (u'A recursive ' + u'requisite was found, SLS ' + u'"{0}" ID "{1}" ID "{2}"' ).format( - body['__sls__'], + body[u'__sls__'], name, req_val ) @@ -507,20 +509,20 @@ class Compiler(object): # Make sure that there is only one key in the # dict if len(list(arg)) != 1: - errors.append(('Multiple dictionaries ' - 'defined in argument of state \'{0}\' in SLS' - ' \'{1}\'').format( + errors.append((u'Multiple dictionaries ' + u'defined in argument of state \'{0}\' in SLS' + u' \'{1}\'').format( name, - body['__sls__'])) + body[u'__sls__'])) if not fun: - if state == 'require' or state == 'watch': + if state == u'require' or state == u'watch': continue - errors.append(('No function declared in state \'{0}\' in' - ' SLS \'{1}\'').format(state, body['__sls__'])) + errors.append((u'No function declared in state \'{0}\' in' + u' SLS \'{1}\'').format(state, body[u'__sls__'])) elif fun > 1: errors.append( - 'Too many functions declared in state \'{0}\' in ' - 'SLS \'{1}\''.format(state, body['__sls__']) + u'Too many functions declared in state \'{0}\' in ' + u'SLS \'{1}\''.format(state, body[u'__sls__']) ) return errors @@ -531,31 +533,31 @@ class Compiler(object): ''' cap = 1 for chunk in chunks: - if 'order' in chunk: - if not isinstance(chunk['order'], int): + if u'order' in chunk: + if not isinstance(chunk[u'order'], int): continue - chunk_order = chunk['order'] + chunk_order = chunk[u'order'] if chunk_order > cap - 1 and chunk_order > 0: cap = chunk_order + 100 for chunk in chunks: - if 'order' not in chunk: - chunk['order'] = cap + if u'order' not in chunk: + chunk[u'order'] = cap continue - if not isinstance(chunk['order'], (int, float)): - if chunk['order'] == 'last': - chunk['order'] = cap + 1000000 - elif chunk['order'] == 'first': - chunk['order'] = 0 + if not isinstance(chunk[u'order'], (int, float)): + if chunk[u'order'] == u'last': + chunk[u'order'] = cap + 1000000 + elif chunk[u'order'] == u'first': + chunk[u'order'] = 0 else: - chunk['order'] = cap - if 'name_order' in chunk: - chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0 - if chunk['order'] < 0: - chunk['order'] = cap + 1000000 + chunk['order'] - chunk['name'] = sdecode(chunk['name']) - chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk))) + chunk[u'order'] = cap + if u'name_order' in chunk: + chunk[u'order'] = chunk[u'order'] + chunk.pop(u'name_order') / 10000.0 + if chunk[u'order'] < 0: + chunk[u'order'] = cap + 1000000 + chunk[u'order'] + chunk[u'name'] = sdecode(chunk[u'name']) + chunks.sort(key=lambda chunk: (chunk[u'order'], u'{0[state]}{0[name]}{0[fun]}'.format(chunk))) return chunks def compile_high_data(self, high): @@ -565,27 +567,27 @@ class Compiler(object): ''' chunks = [] for name, body in six.iteritems(high): - if name.startswith('__'): + if name.startswith(u'__'): continue for state, run in six.iteritems(body): funcs = set() names = [] - if state.startswith('__'): + if state.startswith(u'__'): continue - chunk = {'state': state, - 'name': name} - if '__sls__' in body: - chunk['__sls__'] = body['__sls__'] - if '__env__' in body: - chunk['__env__'] = body['__env__'] - chunk['__id__'] = name + chunk = {u'state': state, + u'name': name} + if u'__sls__' in body: + chunk[u'__sls__'] = body[u'__sls__'] + if u'__env__' in body: + chunk[u'__env__'] = body[u'__env__'] + chunk[u'__id__'] = name for arg in run: if isinstance(arg, six.string_types): funcs.add(arg) continue if isinstance(arg, dict): for key, val in six.iteritems(arg): - if key == 'names': + if key == u'names': for _name in val: if _name not in names: names.append(_name) @@ -598,19 +600,19 @@ class Compiler(object): live = copy.deepcopy(chunk) if isinstance(entry, dict): low_name = next(six.iterkeys(entry)) - live['name'] = low_name + live[u'name'] = low_name list(map(live.update, entry[low_name])) else: - live['name'] = entry - live['name_order'] = name_order + live[u'name'] = entry + live[u'name_order'] = name_order name_order = name_order + 1 for fun in funcs: - live['fun'] = fun + live[u'fun'] = fun chunks.append(live) else: live = copy.deepcopy(chunk) for fun in funcs: - live['fun'] = fun + live[u'fun'] = fun chunks.append(live) chunks = self.order_chunks(chunks) return chunks @@ -620,13 +622,13 @@ class Compiler(object): Read in the __exclude__ list and remove all excluded objects from the high data ''' - if '__exclude__' not in high: + if u'__exclude__' not in high: return high ex_sls = set() ex_id = set() - exclude = high.pop('__exclude__') + exclude = high.pop(u'__exclude__') for exc in exclude: - if isinstance(exc, str): + if isinstance(exc, six.string_types): # The exclude statement is a string, assume it is an sls ex_sls.add(exc) if isinstance(exc, dict): @@ -634,17 +636,17 @@ class Compiler(object): if len(exc) != 1: continue key = next(six.iterkeys(exc)) - if key == 'sls': - ex_sls.add(exc['sls']) - elif key == 'id': - ex_id.add(exc['id']) + if key == u'sls': + ex_sls.add(exc[u'sls']) + elif key == u'id': + ex_id.add(exc[u'id']) # Now the excludes have been simplified, use them if ex_sls: # There are sls excludes, find the associtaed ids for name, body in six.iteritems(high): - if name.startswith('__'): + if name.startswith(u'__'): continue - if body.get('__sls__', '') in ex_sls: + if body.get(u'__sls__', u'') in ex_sls: ex_id.add(name) for id_ in ex_id: if id_ in high: @@ -665,11 +667,11 @@ class State(object): proxy=None, context=None, mocked=False, - loader='states', + loader=u'states', initial_pillar=None): self.states_loader = loader - if 'grains' not in opts: - opts['grains'] = salt.loader.grains(opts) + if u'grains' not in opts: + opts[u'grains'] = salt.loader.grains(opts) self.opts = opts self.proxy = proxy self._pillar_override = pillar_override @@ -680,16 +682,16 @@ class State(object): pillar_enc = str(pillar_enc).lower() self._pillar_enc = pillar_enc if initial_pillar is not None: - self.opts['pillar'] = initial_pillar + self.opts[u'pillar'] = initial_pillar if self._pillar_override: - self.opts['pillar'] = salt.utils.dictupdate.merge( - self.opts['pillar'], + self.opts[u'pillar'] = salt.utils.dictupdate.merge( + self.opts[u'pillar'], self._pillar_override, - self.opts.get('pillar_source_merging_strategy', 'smart'), - self.opts.get('renderer', 'yaml'), - self.opts.get('pillar_merge_lists', False)) + self.opts.get(u'pillar_source_merging_strategy', u'smart'), + self.opts.get(u'renderer', u'yaml'), + self.opts.get(u'pillar_merge_lists', False)) else: - self.opts['pillar'] = self._gather_pillar() + self.opts[u'pillar'] = self._gather_pillar() self.state_con = context or {} self.load_modules() self.active = set() @@ -712,11 +714,11 @@ class State(object): self._pillar_override, self._pillar_enc, translate_newlines=True, - renderers=getattr(self, 'rend', None), + renderers=getattr(self, u'rend', None), opts=self.opts, - valid_rend=self.opts['decrypt_pillar_renderers']) + valid_rend=self.opts[u'decrypt_pillar_renderers']) except Exception as exc: - log.error('Failed to decrypt pillar override: %s', exc) + log.error(u'Failed to decrypt pillar override: %s', exc) if isinstance(self._pillar_override, six.string_types): # This can happen if an entire pillar dictionary was passed as @@ -728,20 +730,20 @@ class State(object): self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader) except Exception as exc: - log.error('Failed to load CLI pillar override') + log.error(u'Failed to load CLI pillar override') log.exception(exc) if not isinstance(self._pillar_override, dict): - log.error('Pillar override was not passed as a dictionary') + log.error(u'Pillar override was not passed as a dictionary') self._pillar_override = None pillar = salt.pillar.get_pillar( self.opts, - self.opts['grains'], - self.opts['id'], - self.opts['environment'], + self.opts[u'grains'], + self.opts[u'id'], + self.opts[u'environment'], pillar_override=self._pillar_override, - pillarenv=self.opts.get('pillarenv')) + pillarenv=self.opts.get(u'pillarenv')) return pillar.compile_pillar() def _mod_init(self, low): @@ -752,86 +754,86 @@ class State(object): ''' # ensure that the module is loaded try: - self.states['{0}.{1}'.format(low['state'], low['fun'])] # pylint: disable=W0106 + self.states[u'{0}.{1}'.format(low[u'state'], low[u'fun'])] # pylint: disable=W0106 except KeyError: return - minit = '{0}.mod_init'.format(low['state']) - if low['state'] not in self.mod_init: + minit = u'{0}.mod_init'.format(low[u'state']) + if low[u'state'] not in self.mod_init: if minit in self.states._dict: mret = self.states[minit](low) if not mret: return - self.mod_init.add(low['state']) + self.mod_init.add(low[u'state']) def _mod_aggregate(self, low, running, chunks): ''' Execute the aggregation systems to runtime modify the low chunk ''' - agg_opt = self.functions['config.option']('state_aggregate') - if 'aggregate' in low: - agg_opt = low['aggregate'] + agg_opt = self.functions[u'config.option'](u'state_aggregate') + if u'aggregate' in low: + agg_opt = low[u'aggregate'] if agg_opt is True: - agg_opt = [low['state']] + agg_opt = [low[u'state']] elif not isinstance(agg_opt, list): return low - if low['state'] in agg_opt and not low.get('__agg__'): - agg_fun = '{0}.mod_aggregate'.format(low['state']) + if low[u'state'] in agg_opt and not low.get(u'__agg__'): + agg_fun = u'{0}.mod_aggregate'.format(low[u'state']) if agg_fun in self.states: try: low = self.states[agg_fun](low, chunks, running) - low['__agg__'] = True + low[u'__agg__'] = True except TypeError: - log.error('Failed to execute aggregate for state {0}'.format(low['state'])) + log.error(u'Failed to execute aggregate for state %s', low[u'state']) return low def _run_check(self, low_data): ''' Check that unless doesn't return 0, and that onlyif returns a 0. ''' - ret = {'result': False} + ret = {u'result': False} cmd_opts = {} - if 'shell' in self.opts['grains']: - cmd_opts['shell'] = self.opts['grains'].get('shell') - if 'onlyif' in low_data: - if not isinstance(low_data['onlyif'], list): - low_data_onlyif = [low_data['onlyif']] + if u'shell' in self.opts[u'grains']: + cmd_opts[u'shell'] = self.opts[u'grains'].get(u'shell') + if u'onlyif' in low_data: + if not isinstance(low_data[u'onlyif'], list): + low_data_onlyif = [low_data[u'onlyif']] else: - low_data_onlyif = low_data['onlyif'] + low_data_onlyif = low_data[u'onlyif'] for entry in low_data_onlyif: if not isinstance(entry, six.string_types): - ret.update({'comment': 'onlyif execution failed, bad type passed', 'result': False}) + ret.update({u'comment': u'onlyif execution failed, bad type passed', u'result': False}) return ret - cmd = self.functions['cmd.retcode']( + cmd = self.functions[u'cmd.retcode']( entry, ignore_retcode=True, python_shell=True, **cmd_opts) - log.debug('Last command return code: {0}'.format(cmd)) - if cmd != 0 and ret['result'] is False: - ret.update({'comment': 'onlyif execution failed', - 'skip_watch': True, - 'result': True}) + log.debug(u'Last command return code: %s', cmd) + if cmd != 0 and ret[u'result'] is False: + ret.update({u'comment': u'onlyif execution failed', + u'skip_watch': True, + u'result': True}) return ret elif cmd == 0: - ret.update({'comment': 'onlyif execution succeeded', 'result': False}) + ret.update({u'comment': u'onlyif execution succeeded', u'result': False}) return ret - if 'unless' in low_data: - if not isinstance(low_data['unless'], list): - low_data_unless = [low_data['unless']] + if u'unless' in low_data: + if not isinstance(low_data[u'unless'], list): + low_data_unless = [low_data[u'unless']] else: - low_data_unless = low_data['unless'] + low_data_unless = low_data[u'unless'] for entry in low_data_unless: if not isinstance(entry, six.string_types): - ret.update({'comment': 'unless execution failed, bad type passed', 'result': False}) + ret.update({u'comment': u'unless execution failed, bad type passed', u'result': False}) return ret - cmd = self.functions['cmd.retcode']( + cmd = self.functions[u'cmd.retcode']( entry, ignore_retcode=True, python_shell=True, **cmd_opts) - log.debug('Last command return code: {0}'.format(cmd)) - if cmd == 0 and ret['result'] is False: - ret.update({'comment': 'unless execution succeeded', - 'skip_watch': True, - 'result': True}) + log.debug(u'Last command return code: %s', cmd) + if cmd == 0 and ret[u'result'] is False: + ret.update({u'comment': u'unless execution succeeded', + u'skip_watch': True, + u'result': True}) elif cmd != 0: - ret.update({'comment': 'unless execution failed', 'result': False}) + ret.update({u'comment': u'unless execution failed', u'result': False}) return ret # No reason to stop, return ret @@ -841,18 +843,18 @@ class State(object): ''' Alter the way a successful state run is determined ''' - ret = {'result': False} + ret = {u'result': False} cmd_opts = {} - if 'shell' in self.opts['grains']: - cmd_opts['shell'] = self.opts['grains'].get('shell') - for entry in low_data['check_cmd']: - cmd = self.functions['cmd.retcode']( + if u'shell' in self.opts[u'grains']: + cmd_opts[u'shell'] = self.opts[u'grains'].get(u'shell') + for entry in low_data[u'check_cmd']: + cmd = self.functions[u'cmd.retcode']( entry, ignore_retcode=True, python_shell=True, **cmd_opts) - log.debug('Last command return code: {0}'.format(cmd)) - if cmd == 0 and ret['result'] is False: - ret.update({'comment': 'check_cmd determined the state succeeded', 'result': True}) + log.debug(u'Last command return code: %s', cmd) + if cmd == 0 and ret[u'result'] is False: + ret.update({u'comment': u'check_cmd determined the state succeeded', u'result': True}) elif cmd != 0: - ret.update({'comment': 'check_cmd determined the state failed', 'result': False}) + ret.update({u'comment': u'check_cmd determined the state failed', u'result': False}) return ret return ret @@ -866,7 +868,7 @@ class State(object): ''' Read the state loader value and loadup the correct states subsystem ''' - if self.states_loader == 'thorium': + if self.states_loader == u'thorium': self.states = salt.loader.thorium(self.opts, self.functions, {}) # TODO: Add runners, proxy? else: self.states = salt.loader.states(self.opts, self.functions, self.utils, @@ -876,17 +878,17 @@ class State(object): ''' Load the modules into the state ''' - log.info('Loading fresh modules for state activity') + log.info(u'Loading fresh modules for state activity') self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, self.state_con, utils=self.utils, proxy=self.proxy) if isinstance(data, dict): - if data.get('provider', False): - if isinstance(data['provider'], str): - providers = [{data['state']: data['provider']}] - elif isinstance(data['provider'], list): - providers = data['provider'] + if data.get(u'provider', False): + if isinstance(data[u'provider'], six.string_types): + providers = [{data[u'state']: data[u'provider']}] + elif isinstance(data[u'provider'], list): + providers = data[u'provider'] else: providers = {} for provider in providers: @@ -896,9 +898,9 @@ class State(object): self.functions) if funcs: for func in funcs: - f_key = '{0}{1}'.format( + f_key = u'{0}{1}'.format( mod, - func[func.rindex('.'):] + func[func.rindex(u'.'):] ) self.functions[f_key] = funcs[func] self.serializers = salt.loader.serializers(self.opts) @@ -910,20 +912,20 @@ class State(object): ''' Refresh all the modules ''' - log.debug('Refreshing modules...') - if self.opts['grains'].get('os') != 'MacOS': + log.debug(u'Refreshing modules...') + if self.opts[u'grains'].get(u'os') != u'MacOS': # In case a package has been installed into the current python # process 'site-packages', the 'site' module needs to be reloaded in # order for the newly installed package to be importable. try: reload_module(site) except RuntimeError: - log.error('Error encountered during module reload. Modules were not reloaded.') + log.error(u'Error encountered during module reload. Modules were not reloaded.') except TypeError: - log.error('Error encountered during module reload. Modules were not reloaded.') + log.error(u'Error encountered during module reload. Modules were not reloaded.') self.load_modules() - if not self.opts.get('local', False) and self.opts.get('multiprocessing', True): - self.functions['saltutil.refresh_modules']() + if not self.opts.get(u'local', False) and self.opts.get(u'multiprocessing', True): + self.functions[u'saltutil.refresh_modules']() def check_refresh(self, data, ret): ''' @@ -934,37 +936,37 @@ class State(object): function is recurse, since that can lay down anything. ''' _reload_modules = False - if data.get('reload_grains', False): - log.debug('Refreshing grains...') - self.opts['grains'] = salt.loader.grains(self.opts) + if data.get(u'reload_grains', False): + log.debug(u'Refreshing grains...') + self.opts[u'grains'] = salt.loader.grains(self.opts) _reload_modules = True - if data.get('reload_pillar', False): - log.debug('Refreshing pillar...') - self.opts['pillar'] = self._gather_pillar() + if data.get(u'reload_pillar', False): + log.debug(u'Refreshing pillar...') + self.opts[u'pillar'] = self._gather_pillar() _reload_modules = True - if not ret['changes']: - if data.get('force_reload_modules', False): + if not ret[u'changes']: + if data.get(u'force_reload_modules', False): self.module_refresh() return - if data.get('reload_modules', False) or _reload_modules: + if data.get(u'reload_modules', False) or _reload_modules: # User explicitly requests a reload self.module_refresh() return - if data['state'] == 'file': - if data['fun'] == 'managed': - if data['name'].endswith( - ('.py', '.pyx', '.pyo', '.pyc', '.so')): + if data[u'state'] == u'file': + if data[u'fun'] == u'managed': + if data[u'name'].endswith( + (u'.py', u'.pyx', u'.pyo', u'.pyc', u'.so')): self.module_refresh() - elif data['fun'] == 'recurse': + elif data[u'fun'] == u'recurse': self.module_refresh() - elif data['fun'] == 'symlink': - if 'bin' in data['name']: + elif data[u'fun'] == u'symlink': + if u'bin' in data[u'name']: self.module_refresh() - elif data['state'] in ('pkg', 'ports'): + elif data[u'state'] in (u'pkg', u'ports'): self.module_refresh() def verify_ret(self, ret): @@ -973,54 +975,54 @@ class State(object): ''' if not isinstance(ret, dict): raise SaltException( - 'Malformed state return, return must be a dict' + u'Malformed state return, return must be a dict' ) bad = [] - for val in ['name', 'result', 'changes', 'comment']: + for val in [u'name', u'result', u'changes', u'comment']: if val not in ret: bad.append(val) if bad: raise SaltException( - 'The following keys were not present in the state ' - 'return: {0}'.format(','.join(bad))) + u'The following keys were not present in the state ' + u'return: {0}'.format(u','.join(bad))) def verify_data(self, data): ''' Verify the data, return an error statement if something is wrong ''' errors = [] - if 'state' not in data: - errors.append('Missing "state" data') - if 'fun' not in data: - errors.append('Missing "fun" data') - if 'name' not in data: - errors.append('Missing "name" data') - if data['name'] and not isinstance(data['name'], six.string_types): + if u'state' not in data: + errors.append(u'Missing "state" data') + if u'fun' not in data: + errors.append(u'Missing "fun" data') + if u'name' not in data: + errors.append(u'Missing "name" data') + if data[u'name'] and not isinstance(data[u'name'], six.string_types): errors.append( - 'ID \'{0}\' {1}is not formed as a string, but is a {2}'.format( - data['name'], - 'in SLS \'{0}\' '.format(data['__sls__']) - if '__sls__' in data else '', - type(data['name']).__name__ + u'ID \'{0}\' {1}is not formed as a string, but is a {2}'.format( + data[u'name'], + u'in SLS \'{0}\' '.format(data[u'__sls__']) + if u'__sls__' in data else u'', + type(data[u'name']).__name__ ) ) if errors: return errors - full = data['state'] + '.' + data['fun'] + full = data[u'state'] + u'.' + data[u'fun'] if full not in self.states: - if '__sls__' in data: + if u'__sls__' in data: errors.append( - 'State \'{0}\' was not found in SLS \'{1}\''.format( + u'State \'{0}\' was not found in SLS \'{1}\''.format( full, - data['__sls__'] + data[u'__sls__'] ) ) reason = self.states.missing_fun_string(full) if reason: - errors.append('Reason: {0}'.format(reason)) + errors.append(u'Reason: {0}'.format(reason)) else: errors.append( - 'Specified state \'{0}\' was not found'.format( + u'Specified state \'{0}\' was not found'.format( full ) ) @@ -1036,41 +1038,41 @@ class State(object): for ind in range(arglen - deflen): if aspec.args[ind] not in data: errors.append( - 'Missing parameter {0} for state {1}'.format( + u'Missing parameter {0} for state {1}'.format( aspec.args[ind], full ) ) # If this chunk has a recursive require, then it will cause a # recursive loop when executing, check for it - reqdec = '' - if 'require' in data: - reqdec = 'require' - if 'watch' in data: + reqdec = u'' + if u'require' in data: + reqdec = u'require' + if u'watch' in data: # Check to see if the service has a mod_watch function, if it does # not, then just require # to just require extend the require statement with the contents # of watch so that the mod_watch function is not called and the # requisite capability is still used - if '{0}.mod_watch'.format(data['state']) not in self.states: - if 'require' in data: - data['require'].extend(data.pop('watch')) + if u'{0}.mod_watch'.format(data[u'state']) not in self.states: + if u'require' in data: + data[u'require'].extend(data.pop(u'watch')) else: - data['require'] = data.pop('watch') - reqdec = 'require' + data[u'require'] = data.pop(u'watch') + reqdec = u'require' else: - reqdec = 'watch' + reqdec = u'watch' if reqdec: for req in data[reqdec]: reqfirst = next(iter(req)) - if data['state'] == reqfirst: - if (fnmatch.fnmatch(data['name'], req[reqfirst]) - or fnmatch.fnmatch(data['__id__'], req[reqfirst])): - err = ('Recursive require detected in SLS {0} for' - ' require {1} in ID {2}').format( - data['__sls__'], + if data[u'state'] == reqfirst: + if (fnmatch.fnmatch(data[u'name'], req[reqfirst]) + or fnmatch.fnmatch(data[u'__id__'], req[reqfirst])): + err = (u'Recursive require detected in SLS {0} for' + u' require {1} in ID {2}').format( + data[u'__sls__'], req, - data['__id__']) + data[u'__id__']) errors.append(err) return errors @@ -1080,57 +1082,57 @@ class State(object): ''' errors = [] if not isinstance(high, dict): - errors.append('High data is not a dictionary and is invalid') + errors.append(u'High data is not a dictionary and is invalid') reqs = OrderedDict() for name, body in six.iteritems(high): try: - if name.startswith('__'): + if name.startswith(u'__'): continue except AttributeError: pass if not isinstance(name, six.string_types): errors.append( - 'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but ' - 'is a {2}. It may need to be quoted.'.format( - name, body['__sls__'], type(name).__name__) + u'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but ' + u'is a {2}. It may need to be quoted.'.format( + name, body[u'__sls__'], type(name).__name__) ) if not isinstance(body, dict): - err = ('The type {0} in {1} is not formatted as a dictionary' + err = (u'The type {0} in {1} is not formatted as a dictionary' .format(name, body)) errors.append(err) continue for state in body: - if state.startswith('__'): + if state.startswith(u'__'): continue if body[state] is None: errors.append( - 'ID \'{0}\' in SLS \'{1}\' contains a short declaration ' - '({2}) with a trailing colon. When not passing any ' - 'arguments to a state, the colon must be omitted.' - .format(name, body['__sls__'], state) + u'ID \'{0}\' in SLS \'{1}\' contains a short declaration ' + u'({2}) with a trailing colon. When not passing any ' + u'arguments to a state, the colon must be omitted.' + .format(name, body[u'__sls__'], state) ) continue if not isinstance(body[state], list): errors.append( - 'State \'{0}\' in SLS \'{1}\' is not formed as a list' - .format(name, body['__sls__']) + u'State \'{0}\' in SLS \'{1}\' is not formed as a list' + .format(name, body[u'__sls__']) ) else: fun = 0 - if '.' in state: + if u'.' in state: fun += 1 for arg in body[state]: if isinstance(arg, six.string_types): fun += 1 - if ' ' in arg.strip(): - errors.append(('The function "{0}" in state ' - '"{1}" in SLS "{2}" has ' - 'whitespace, a function with whitespace is ' - 'not supported, perhaps this is an argument ' - 'that is missing a ":"').format( + if u' ' in arg.strip(): + errors.append((u'The function "{0}" in state ' + u'"{1}" in SLS "{2}" has ' + u'whitespace, a function with whitespace is ' + u'not supported, perhaps this is an argument ' + u'that is missing a ":"').format( arg, name, - body['__sls__'])) + body[u'__sls__'])) elif isinstance(arg, dict): # The arg is a dict, if the arg is require or # watch, it must be a list. @@ -1138,22 +1140,22 @@ class State(object): # Add the requires to the reqs dict and check them # all for recursive requisites. argfirst = next(iter(arg)) - if argfirst == 'names': + if argfirst == u'names': if not isinstance(arg[argfirst], list): errors.append( - 'The \'names\' argument in state ' - '\'{0}\' in SLS \'{1}\' needs to be ' - 'formed as a list' - .format(name, body['__sls__']) + u'The \'names\' argument in state ' + u'\'{0}\' in SLS \'{1}\' needs to be ' + u'formed as a list' + .format(name, body[u'__sls__']) ) - if argfirst in ('require', 'watch', 'prereq', 'onchanges'): + if argfirst in (u'require', u'watch', u'prereq', u'onchanges'): if not isinstance(arg[argfirst], list): errors.append( - 'The {0} statement in state \'{1}\' in ' - 'SLS \'{2}\' needs to be formed as a ' - 'list'.format(argfirst, + u'The {0} statement in state \'{1}\' in ' + u'SLS \'{2}\' needs to be formed as a ' + u'list'.format(argfirst, name, - body['__sls__']) + body[u'__sls__']) ) # It is a list, verify that the members of the # list are all single key dicts. @@ -1161,34 +1163,34 @@ class State(object): reqs[name] = OrderedDict(state=state) for req in arg[argfirst]: if isinstance(req, six.string_types): - req = {'id': req} + req = {u'id': req} if not isinstance(req, dict): - err = ('Requisite declaration {0}' - ' in SLS {1} is not formed as a' - ' single key dictionary').format( + err = (u'Requisite declaration {0}' + u' in SLS {1} is not formed as a' + u' single key dictionary').format( req, - body['__sls__']) + body[u'__sls__']) errors.append(err) continue req_key = next(iter(req)) req_val = req[req_key] - if '.' in req_key: + if u'.' in req_key: errors.append( - 'Invalid requisite type \'{0}\' ' - 'in state \'{1}\', in SLS ' - '\'{2}\'. Requisite types must ' - 'not contain dots, did you ' - 'mean \'{3}\'?'.format( + u'Invalid requisite type \'{0}\' ' + u'in state \'{1}\', in SLS ' + u'\'{2}\'. Requisite types must ' + u'not contain dots, did you ' + u'mean \'{3}\'?'.format( req_key, name, - body['__sls__'], - req_key[:req_key.find('.')] + body[u'__sls__'], + req_key[:req_key.find(u'.')] ) ) if not ishashable(req_val): errors.append(( - 'Illegal requisite "{0}", ' - 'please check your syntax.\n' + u'Illegal requisite "{0}", ' + u'please check your syntax.\n' ).format(req_val)) continue @@ -1200,12 +1202,12 @@ class State(object): if req_val in reqs: if name in reqs[req_val]: if reqs[req_val][name] == state: - if reqs[req_val]['state'] == reqs[name][req_val]: - err = ('A recursive ' - 'requisite was found, SLS ' - '"{0}" ID "{1}" ID "{2}"' + if reqs[req_val][u'state'] == reqs[name][req_val]: + err = (u'A recursive ' + u'requisite was found, SLS ' + u'"{0}" ID "{1}" ID "{2}"' ).format( - body['__sls__'], + body[u'__sls__'], name, req_val ) @@ -1214,21 +1216,21 @@ class State(object): # dict if len(list(arg)) != 1: errors.append( - 'Multiple dictionaries defined in ' - 'argument of state \'{0}\' in SLS \'{1}\'' - .format(name, body['__sls__']) + u'Multiple dictionaries defined in ' + u'argument of state \'{0}\' in SLS \'{1}\'' + .format(name, body[u'__sls__']) ) if not fun: - if state == 'require' or state == 'watch': + if state == u'require' or state == u'watch': continue errors.append( - 'No function declared in state \'{0}\' in SLS \'{1}\'' - .format(state, body['__sls__']) + u'No function declared in state \'{0}\' in SLS \'{1}\'' + .format(state, body[u'__sls__']) ) elif fun > 1: errors.append( - 'Too many functions declared in state \'{0}\' in ' - 'SLS \'{1}\''.format(state, body['__sls__']) + u'Too many functions declared in state \'{0}\' in ' + u'SLS \'{1}\''.format(state, body[u'__sls__']) ) return errors @@ -1248,30 +1250,30 @@ class State(object): ''' cap = 1 for chunk in chunks: - if 'order' in chunk: - if not isinstance(chunk['order'], int): + if u'order' in chunk: + if not isinstance(chunk[u'order'], int): continue - chunk_order = chunk['order'] + chunk_order = chunk[u'order'] if chunk_order > cap - 1 and chunk_order > 0: cap = chunk_order + 100 for chunk in chunks: - if 'order' not in chunk: - chunk['order'] = cap + if u'order' not in chunk: + chunk[u'order'] = cap continue - if not isinstance(chunk['order'], (int, float)): - if chunk['order'] == 'last': - chunk['order'] = cap + 1000000 - elif chunk['order'] == 'first': - chunk['order'] = 0 + if not isinstance(chunk[u'order'], (int, float)): + if chunk[u'order'] == u'last': + chunk[u'order'] = cap + 1000000 + elif chunk[u'order'] == u'first': + chunk[u'order'] = 0 else: - chunk['order'] = cap - if 'name_order' in chunk: - chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0 - if chunk['order'] < 0: - chunk['order'] = cap + 1000000 + chunk['order'] - chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk))) + chunk[u'order'] = cap + if u'name_order' in chunk: + chunk[u'order'] = chunk[u'order'] + chunk.pop(u'name_order') / 10000.0 + if chunk[u'order'] < 0: + chunk[u'order'] = cap + 1000000 + chunk[u'order'] + chunks.sort(key=lambda chunk: (chunk[u'order'], u'{0[state]}{0[name]}{0[fun]}'.format(chunk))) return chunks def compile_high_data(self, high, orchestration_jid=None): @@ -1281,36 +1283,36 @@ class State(object): ''' chunks = [] for name, body in six.iteritems(high): - if name.startswith('__'): + if name.startswith(u'__'): continue for state, run in six.iteritems(body): funcs = set() names = [] - if state.startswith('__'): + if state.startswith(u'__'): continue - chunk = {'state': state, - 'name': name} + chunk = {u'state': state, + u'name': name} if orchestration_jid is not None: - chunk['__orchestration_jid__'] = orchestration_jid - if '__sls__' in body: - chunk['__sls__'] = body['__sls__'] - if '__env__' in body: - chunk['__env__'] = body['__env__'] - chunk['__id__'] = name + chunk[u'__orchestration_jid__'] = orchestration_jid + if u'__sls__' in body: + chunk[u'__sls__'] = body[u'__sls__'] + if u'__env__' in body: + chunk[u'__env__'] = body[u'__env__'] + chunk[u'__id__'] = name for arg in run: if isinstance(arg, six.string_types): funcs.add(arg) continue if isinstance(arg, dict): for key, val in six.iteritems(arg): - if key == 'names': + if key == u'names': for _name in val: if _name not in names: names.append(_name) - elif key == 'state': + elif key == u'state': # Don't pass down a state override continue - elif (key == 'name' and + elif (key == u'name' and not isinstance(val, six.string_types)): # Invalid name, fall back to ID chunk[key] = name @@ -1322,19 +1324,19 @@ class State(object): live = copy.deepcopy(chunk) if isinstance(entry, dict): low_name = next(six.iterkeys(entry)) - live['name'] = low_name + live[u'name'] = low_name list(map(live.update, entry[low_name])) else: - live['name'] = entry - live['name_order'] = name_order + live[u'name'] = entry + live[u'name_order'] = name_order name_order += 1 for fun in funcs: - live['fun'] = fun + live[u'fun'] = fun chunks.append(live) else: live = copy.deepcopy(chunk) for fun in funcs: - live['fun'] = fun + live[u'fun'] = fun chunks.append(live) chunks = self.order_chunks(chunks) return chunks @@ -1344,35 +1346,35 @@ class State(object): Pull the extend data and add it to the respective high data ''' errors = [] - if '__extend__' not in high: + if u'__extend__' not in high: return high, errors - ext = high.pop('__extend__') + ext = high.pop(u'__extend__') for ext_chunk in ext: for name, body in six.iteritems(ext_chunk): if name not in high: state_type = next( - x for x in body if not x.startswith('__') + x for x in body if not x.startswith(u'__') ) # Check for a matching 'name' override in high data ids = find_name(name, state_type, high) if len(ids) != 1: errors.append( - 'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not ' - 'part of the high state.\n' - 'This is likely due to a missing include statement ' - 'or an incorrectly typed ID.\nEnsure that a ' - 'state with an ID of \'{0}\' is available\nin ' - 'environment \'{1}\' and to SLS \'{2}\''.format( + u'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not ' + u'part of the high state.\n' + u'This is likely due to a missing include statement ' + u'or an incorrectly typed ID.\nEnsure that a ' + u'state with an ID of \'{0}\' is available\nin ' + u'environment \'{1}\' and to SLS \'{2}\''.format( name, - body.get('__env__', 'base'), - body.get('__sls__', 'base')) + body.get(u'__env__', u'base'), + body.get(u'__sls__', u'base')) ) continue else: name = ids[0][0] for state, run in six.iteritems(body): - if state.startswith('__'): + if state.startswith(u'__'): continue if state not in high[name]: high[name][state] = run @@ -1399,8 +1401,8 @@ class State(object): else: high[name][state][hind] = arg update = True - if (argfirst == 'name' and - next(iter(high[name][state][hind])) == 'names'): + if (argfirst == u'name' and + next(iter(high[name][state][hind])) == u'names'): # If names are overwritten by name use the name high[name][state][hind] = arg if not update: @@ -1412,13 +1414,13 @@ class State(object): Read in the __exclude__ list and remove all excluded objects from the high data ''' - if '__exclude__' not in high: + if u'__exclude__' not in high: return high ex_sls = set() ex_id = set() - exclude = high.pop('__exclude__') + exclude = high.pop(u'__exclude__') for exc in exclude: - if isinstance(exc, str): + if isinstance(exc, six.string_types): # The exclude statement is a string, assume it is an sls ex_sls.add(exc) if isinstance(exc, dict): @@ -1426,17 +1428,17 @@ class State(object): if len(exc) != 1: continue key = next(six.iterkeys(exc)) - if key == 'sls': - ex_sls.add(exc['sls']) - elif key == 'id': - ex_id.add(exc['id']) + if key == u'sls': + ex_sls.add(exc[u'sls']) + elif key == u'id': + ex_id.add(exc[u'id']) # Now the excludes have been simplified, use them if ex_sls: # There are sls excludes, find the associated ids for name, body in six.iteritems(high): - if name.startswith('__'): + if name.startswith(u'__'): continue - sls = body.get('__sls__', '') + sls = body.get(u'__sls__', u'') if not sls: continue for ex_ in ex_sls: @@ -1452,22 +1454,22 @@ class State(object): Extend the data reference with requisite_in arguments ''' req_in = set([ - 'require_in', - 'watch_in', - 'onfail_in', - 'onchanges_in', - 'use', - 'use_in', - 'prereq', - 'prereq_in', + u'require_in', + u'watch_in', + u'onfail_in', + u'onchanges_in', + u'use', + u'use_in', + u'prereq', + u'prereq_in', ]) req_in_all = req_in.union( set([ - 'require', - 'watch', - 'onfail', - 'onfail_stop', - 'onchanges', + u'require', + u'watch', + u'onfail', + u'onfail_stop', + u'onchanges', ])) extend = {} errors = [] @@ -1475,7 +1477,7 @@ class State(object): if not isinstance(body, dict): continue for state, run in six.iteritems(body): - if state.startswith('__'): + if state.startswith(u'__'): continue for arg in run: if isinstance(arg, dict): @@ -1489,7 +1491,7 @@ class State(object): key = next(iter(arg)) if key not in req_in: continue - rkey = key.split('_')[0] + rkey = key.split(u'_')[0] items = arg[key] if isinstance(items, dict): # Formatted as a single req_in @@ -1499,24 +1501,24 @@ class State(object): found = False if name not in extend: extend[name] = OrderedDict() - if '.' in _state: + if u'.' in _state: errors.append( - 'Invalid requisite in {0}: {1} for ' - '{2}, in SLS \'{3}\'. Requisites must ' - 'not contain dots, did you mean \'{4}\'?' + u'Invalid requisite in {0}: {1} for ' + u'{2}, in SLS \'{3}\'. Requisites must ' + u'not contain dots, did you mean \'{4}\'?' .format( rkey, _state, name, - body['__sls__'], - _state[:_state.find('.')] + body[u'__sls__'], + _state[:_state.find(u'.')] ) ) - _state = _state.split(".")[0] + _state = _state.split(u'.')[0] if _state not in extend[name]: extend[name][_state] = [] - extend[name]['__env__'] = body['__env__'] - extend[name]['__sls__'] = body['__sls__'] + extend[name][u'__env__'] = body[u'__env__'] + extend[name][u'__sls__'] = body[u'__sls__'] for ind in range(len(extend[name][_state])): if next(iter( extend[name][_state][ind])) == rkey: @@ -1543,37 +1545,37 @@ class State(object): continue pstate = next(iter(ind)) pname = ind[pstate] - if pstate == 'sls': + if pstate == u'sls': # Expand hinges here hinges = find_sls_ids(pname, high) else: hinges.append((pname, pstate)) - if '.' in pstate: + if u'.' in pstate: errors.append( - 'Invalid requisite in {0}: {1} for ' - '{2}, in SLS \'{3}\'. Requisites must ' - 'not contain dots, did you mean \'{4}\'?' + u'Invalid requisite in {0}: {1} for ' + u'{2}, in SLS \'{3}\'. Requisites must ' + u'not contain dots, did you mean \'{4}\'?' .format( rkey, pstate, pname, - body['__sls__'], - pstate[:pstate.find('.')] + body[u'__sls__'], + pstate[:pstate.find(u'.')] ) ) - pstate = pstate.split(".")[0] + pstate = pstate.split(u".")[0] for tup in hinges: name, _state = tup - if key == 'prereq_in': + if key == u'prereq_in': # Add prerequired to origin if id_ not in extend: extend[id_] = OrderedDict() if state not in extend[id_]: extend[id_][state] = [] extend[id_][state].append( - {'prerequired': [{_state: name}]} + {u'prerequired': [{_state: name}]} ) - if key == 'prereq': + if key == u'prereq': # Add prerequired to prereqs ext_ids = find_name(name, _state, high) for ext_id, _req_state in ext_ids: @@ -1582,10 +1584,10 @@ class State(object): if _req_state not in extend[ext_id]: extend[ext_id][_req_state] = [] extend[ext_id][_req_state].append( - {'prerequired': [{state: id_}]} + {u'prerequired': [{state: id_}]} ) continue - if key == 'use_in': + if key == u'use_in': # Add the running states args to the # use_in states ext_ids = find_name(name, _state, high) @@ -1606,13 +1608,13 @@ class State(object): if next(iter(arg)) in ignore_args: continue # Don't use name or names - if next(six.iterkeys(arg)) == 'name': + if next(six.iterkeys(arg)) == u'name': continue - if next(six.iterkeys(arg)) == 'names': + if next(six.iterkeys(arg)) == u'names': continue extend[ext_id][_req_state].append(arg) continue - if key == 'use': + if key == u'use': # Add the use state's args to the # running state ext_ids = find_name(name, _state, high) @@ -1633,9 +1635,9 @@ class State(object): if next(iter(arg)) in ignore_args: continue # Don't use name or names - if next(six.iterkeys(arg)) == 'name': + if next(six.iterkeys(arg)) == u'name': continue - if next(six.iterkeys(arg)) == 'names': + if next(six.iterkeys(arg)) == u'names': continue extend[id_][state].append(arg) continue @@ -1644,8 +1646,8 @@ class State(object): extend[name] = OrderedDict() if _state not in extend[name]: extend[name][_state] = [] - extend[name]['__env__'] = body['__env__'] - extend[name]['__sls__'] = body['__sls__'] + extend[name][u'__env__'] = body[u'__env__'] + extend[name][u'__sls__'] = body[u'__sls__'] for ind in range(len(extend[name][_state])): if next(iter( extend[name][_state][ind])) == rkey: @@ -1660,9 +1662,9 @@ class State(object): extend[name][_state].append( {rkey: [{state: id_}]} ) - high['__extend__'] = [] + high[u'__extend__'] = [] for key, val in six.iteritems(extend): - high['__extend__'].append({key: val}) + high[u'__extend__'].append({key: val}) req_in_high, req_in_errors = self.reconcile_extend(high) errors.extend(req_in_errors) return req_in_high, errors @@ -1673,8 +1675,8 @@ class State(object): ''' tag = _gen_tag(low) try: - ret = self.states[cdata['full']](*cdata['args'], - **cdata['kwargs']) + ret = self.states[cdata[u'full']](*cdata[u'args'], + **cdata[u'kwargs']) except Exception: trb = traceback.format_exc() # There are a number of possibilities to not have the cdata @@ -1682,20 +1684,20 @@ class State(object): # enough to not raise another KeyError as the name is easily # guessable and fallback in all cases to present the real # exception to the user - if len(cdata['args']) > 0: - name = cdata['args'][0] - elif 'name' in cdata['kwargs']: - name = cdata['kwargs']['name'] + if len(cdata[u'args']) > 0: + name = cdata[u'args'][0] + elif u'name' in cdata[u'kwargs']: + name = cdata[u'kwargs'][u'name'] else: - name = low.get('name', low.get('__id__')) + name = low.get(u'name', low.get(u'__id__')) ret = { - 'result': False, - 'name': name, - 'changes': {}, - 'comment': 'An exception occurred in this state: {0}'.format( + u'result': False, + u'name': name, + u'changes': {}, + u'comment': u'An exception occurred in this state: {0}'.format( trb) } - troot = os.path.join(self.opts['cachedir'], self.jid) + troot = os.path.join(self.opts[u'cachedir'], self.jid) tfile = os.path.join(troot, tag) if not os.path.isdir(troot): try: @@ -1704,7 +1706,7 @@ class State(object): # Looks like the directory was created between the check # and the attempt, we are safe to pass pass - with salt.utils.files.fopen(tfile, 'wb+') as fp_: + with salt.utils.files.fopen(tfile, u'wb+') as fp_: fp_.write(msgpack.dumps(ret)) def call_parallel(self, cdata, low): @@ -1715,11 +1717,11 @@ class State(object): target=self._call_parallel_target, args=(cdata, low)) proc.start() - ret = {'name': cdata['args'][0], - 'result': None, - 'changes': {}, - 'comment': 'Started in a seperate process', - 'proc': proc} + ret = {u'name': cdata[u'args'][0], + u'result': None, + u'changes': {}, + u'comment': u'Started in a seperate process', + u'proc': proc} return ret def call(self, low, chunks=None, running=None, retries=1): @@ -1729,49 +1731,53 @@ class State(object): ''' utc_start_time = datetime.datetime.utcnow() local_start_time = utc_start_time - (datetime.datetime.utcnow() - datetime.datetime.now()) - log.info('Running state [{0}] at time {1}'.format( - low['name'].strip() if isinstance(low['name'], str) - else low['name'], - local_start_time.time().isoformat()) + log.info(u'Running state [%s] at time %s', + low[u'name'].strip() if isinstance(low[u'name'], six.string_types) + else low[u'name'], + local_start_time.time().isoformat() ) errors = self.verify_data(low) if errors: ret = { - 'result': False, - 'name': low['name'], - 'changes': {}, - 'comment': '', + u'result': False, + u'name': low[u'name'], + u'changes': {}, + u'comment': u'', } for err in errors: - ret['comment'] += '{0}\n'.format(err) - ret['__run_num__'] = self.__run_num + ret[u'comment'] += u'{0}\n'.format(err) + ret[u'__run_num__'] = self.__run_num self.__run_num += 1 format_log(ret) self.check_refresh(low, ret) return ret else: - ret = {'result': False, 'name': low['name'], 'changes': {}} + ret = {u'result': False, u'name': low[u'name'], u'changes': {}} - self.state_con['runas'] = low.get('runas', None) + self.state_con[u'runas'] = low.get(u'runas', None) - if not low.get('__prereq__'): + if low[u'state'] == u'cmd' and u'password' in low: + self.state_con[u'runas_password'] = low[u'password'] + else: + self.state_con[u'runas_password'] = low.get(u'runas_password', None) + + if not low.get(u'__prereq__'): log.info( - 'Executing state {0}.{1} for [{2}]'.format( - low['state'], - low['fun'], - low['name'].strip() if isinstance(low['name'], str) - else low['name'] - ) + u'Executing state %s.%s for [%s]', + low[u'state'], + low[u'fun'], + low[u'name'].strip() if isinstance(low[u'name'], six.string_types) + else low[u'name'] ) - if 'provider' in low: + if u'provider' in low: self.load_modules(low) - state_func_name = '{0[state]}.{0[fun]}'.format(low) + state_func_name = u'{0[state]}.{0[fun]}'.format(low) cdata = salt.utils.format_call( self.states[state_func_name], low, - initial_ret={'full': state_func_name}, + initial_ret={u'full': state_func_name}, expected_extra_kws=STATE_INTERNAL_KEYWORDS ) inject_globals = { @@ -1779,18 +1785,18 @@ class State(object): # the current state dictionaries. # We pass deep copies here because we don't want any misbehaving # state module to change these at runtime. - '__low__': immutabletypes.freeze(low), - '__running__': immutabletypes.freeze(running) if running else {}, - '__instance_id__': self.instance_id, - '__lowstate__': immutabletypes.freeze(chunks) if chunks else {} + u'__low__': immutabletypes.freeze(low), + u'__running__': immutabletypes.freeze(running) if running else {}, + u'__instance_id__': self.instance_id, + u'__lowstate__': immutabletypes.freeze(chunks) if chunks else {} } if self.inject_globals: inject_globals.update(self.inject_globals) - if low.get('__prereq__'): - test = sys.modules[self.states[cdata['full']].__module__].__opts__['test'] - sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True + if low.get(u'__prereq__'): + test = sys.modules[self.states[cdata[u'full']].__module__].__opts__[u'test'] + sys.modules[self.states[cdata[u'full']].__module__].__opts__[u'test'] = True try: # Let's get a reference to the salt environment to use within this # state call. @@ -1800,45 +1806,45 @@ class State(object): # that's not found in cdata, we look for what we're being passed in # the original data, namely, the special dunder __env__. If that's # not found we default to 'base' - if ('unless' in low and '{0[state]}.mod_run_check'.format(low) not in self.states) or \ - ('onlyif' in low and '{0[state]}.mod_run_check'.format(low) not in self.states): + if (u'unless' in low and u'{0[state]}.mod_run_check'.format(low) not in self.states) or \ + (u'onlyif' in low and u'{0[state]}.mod_run_check'.format(low) not in self.states): ret.update(self._run_check(low)) - if 'saltenv' in low: - inject_globals['__env__'] = str(low['saltenv']) - elif isinstance(cdata['kwargs'].get('env', None), six.string_types): + if u'saltenv' in low: + inject_globals[u'__env__'] = six.text_type(low[u'saltenv']) + elif isinstance(cdata[u'kwargs'].get(u'env', None), six.string_types): # User is using a deprecated env setting which was parsed by # format_call. # We check for a string type since module functions which # allow setting the OS environ also make use of the "env" # keyword argument, which is not a string - inject_globals['__env__'] = str(cdata['kwargs']['env']) - elif '__env__' in low: + inject_globals[u'__env__'] = six.text_type(cdata[u'kwargs'][u'env']) + elif u'__env__' in low: # The user is passing an alternative environment using __env__ # which is also not the appropriate choice, still, handle it - inject_globals['__env__'] = str(low['__env__']) + inject_globals[u'__env__'] = six.text_type(low[u'__env__']) else: # Let's use the default environment - inject_globals['__env__'] = 'base' + inject_globals[u'__env__'] = u'base' - if '__orchestration_jid__' in low: - inject_globals['__orchestration_jid__'] = \ - low['__orchestration_jid__'] + if u'__orchestration_jid__' in low: + inject_globals[u'__orchestration_jid__'] = \ + low[u'__orchestration_jid__'] - if 'result' not in ret or ret['result'] is False: + if u'result' not in ret or ret[u'result'] is False: self.states.inject_globals = inject_globals if self.mocked: ret = mock_ret(cdata) else: # Execute the state function - if not low.get('__prereq__') and low.get('parallel'): + if not low.get(u'__prereq__') and low.get(u'parallel'): # run the state call in parallel, but only if not in a prereq ret = self.call_parallel(cdata, low) else: - ret = self.states[cdata['full']](*cdata['args'], - **cdata['kwargs']) + ret = self.states[cdata[u'full']](*cdata[u'args'], + **cdata[u'kwargs']) self.states.inject_globals = {} - if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states: + if u'check_cmd' in low and u'{0[state]}.mod_run_check_cmd'.format(low) not in self.states: ret.update(self._run_check_cmd(low)) self.verify_ret(ret) except Exception: @@ -1848,37 +1854,40 @@ class State(object): # enough to not raise another KeyError as the name is easily # guessable and fallback in all cases to present the real # exception to the user - if len(cdata['args']) > 0: - name = cdata['args'][0] - elif 'name' in cdata['kwargs']: - name = cdata['kwargs']['name'] + if len(cdata[u'args']) > 0: + name = cdata[u'args'][0] + elif u'name' in cdata[u'kwargs']: + name = cdata[u'kwargs'][u'name'] else: - name = low.get('name', low.get('__id__')) + name = low.get(u'name', low.get(u'__id__')) ret = { - 'result': False, - 'name': name, - 'changes': {}, - 'comment': 'An exception occurred in this state: {0}'.format( + u'result': False, + u'name': name, + u'changes': {}, + u'comment': u'An exception occurred in this state: {0}'.format( trb) } finally: - if low.get('__prereq__'): - sys.modules[self.states[cdata['full']].__module__].__opts__[ - 'test'] = test + if low.get(u'__prereq__'): + sys.modules[self.states[cdata[u'full']].__module__].__opts__[ + u'test'] = test + + self.state_con.pop('runas') + self.state_con.pop('runas_password') # If format_call got any warnings, let's show them to the user - if 'warnings' in cdata: - ret.setdefault('warnings', []).extend(cdata['warnings']) + if u'warnings' in cdata: + ret.setdefault(u'warnings', []).extend(cdata[u'warnings']) - if 'provider' in low: + if u'provider' in low: self.load_modules() - if low.get('__prereq__'): - low['__prereq__'] = False + if low.get(u'__prereq__'): + low[u'__prereq__'] = False return ret - ret['__sls__'] = low.get('__sls__') - ret['__run_num__'] = self.__run_num + ret[u'__sls__'] = low.get(u'__sls__') + ret[u'__run_num__'] = self.__run_num self.__run_num += 1 format_log(ret) self.check_refresh(low, ret) @@ -1886,56 +1895,57 @@ class State(object): timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now() local_finish_time = utc_finish_time - timezone_delta local_start_time = utc_start_time - timezone_delta - ret['start_time'] = local_start_time.time().isoformat() + ret[u'start_time'] = local_start_time.time().isoformat() delta = (utc_finish_time - utc_start_time) # duration in milliseconds.microseconds duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0 - ret['duration'] = duration - ret['__id__'] = low['__id__'] + ret[u'duration'] = duration + ret[u'__id__'] = low[u'__id__'] log.info( - 'Completed state [{0}] at time {1} duration_in_ms={2}'.format( - low['name'].strip() if isinstance(low['name'], str) - else low['name'], - local_finish_time.time().isoformat(), - duration - ) + u'Completed state [%s] at time %s (duration_in_ms=%s)', + low[u'name'].strip() if isinstance(low[u'name'], six.string_types) + else low[u'name'], + local_finish_time.time().isoformat(), + duration ) - if 'retry' in low: - low['retry'] = self.verify_retry_data(low['retry']) - if not sys.modules[self.states[cdata['full']].__module__].__opts__['test']: - if low['retry']['until'] != ret['result']: - if low['retry']['attempts'] > retries: - interval = low['retry']['interval'] - if low['retry']['splay'] != 0: - interval = interval + random.randint(0, low['retry']['splay']) - log.info(('State result does not match retry until value' - ', state will be re-run in {0} seconds'.format(interval))) - self.functions['test.sleep'](interval) + if u'retry' in low: + low[u'retry'] = self.verify_retry_data(low[u'retry']) + if not sys.modules[self.states[cdata[u'full']].__module__].__opts__[u'test']: + if low[u'retry'][u'until'] != ret[u'result']: + if low[u'retry'][u'attempts'] > retries: + interval = low[u'retry'][u'interval'] + if low[u'retry'][u'splay'] != 0: + interval = interval + random.randint(0, low[u'retry'][u'splay']) + log.info( + u'State result does not match retry until value, ' + u'state will be re-run in %s seconds', interval + ) + self.functions[u'test.sleep'](interval) retry_ret = self.call(low, chunks, running, retries=retries+1) orig_ret = ret ret = retry_ret - ret['comment'] = '\n'.join( + ret[u'comment'] = u'\n'.join( [( - 'Attempt {0}: Returned a result of "{1}", ' - 'with the following comment: "{2}"'.format( + u'Attempt {0}: Returned a result of "{1}", ' + u'with the following comment: "{2}"'.format( retries, - orig_ret['result'], - orig_ret['comment']) + orig_ret[u'result'], + orig_ret[u'comment']) ), - '' if not ret['comment'] else ret['comment']]) - ret['duration'] = ret['duration'] + orig_ret['duration'] + (interval * 1000) + u'' if not ret[u'comment'] else ret[u'comment']]) + ret[u'duration'] = ret[u'duration'] + orig_ret[u'duration'] + (interval * 1000) if retries == 1: - ret['start_time'] = orig_ret['start_time'] + ret[u'start_time'] = orig_ret[u'start_time'] else: - ret['comment'] = ' '.join( - ['' if not ret['comment'] else ret['comment'], - ('The state would be retried every {1} seconds ' - '(with a splay of up to {3} seconds) ' - 'a maximum of {0} times or until a result of {2} ' - 'is returned').format(low['retry']['attempts'], - low['retry']['interval'], - low['retry']['until'], - low['retry']['splay'])]) + ret[u'comment'] = u' '.join( + [u'' if not ret[u'comment'] else ret[u'comment'], + (u'The state would be retried every {1} seconds ' + u'(with a splay of up to {3} seconds) ' + u'a maximum of {0} times or until a result of {2} ' + u'is returned').format(low[u'retry'][u'attempts'], + low[u'retry'][u'interval'], + low[u'retry'][u'until'], + low[u'retry'][u'splay'])]) return ret def verify_retry_data(self, retry_data): @@ -1943,16 +1953,16 @@ class State(object): verifies the specified retry data ''' retry_defaults = { - 'until': True, - 'attempts': 2, - 'splay': 0, - 'interval': 30, + u'until': True, + u'attempts': 2, + u'splay': 0, + u'interval': 30, } expected_data = { - 'until': bool, - 'attempts': int, - 'interval': int, - 'splay': int, + u'until': bool, + u'attempts': int, + u'interval': int, + u'splay': int, } validated_retry_data = {} if isinstance(retry_data, dict): @@ -1961,15 +1971,17 @@ class State(object): if isinstance(retry_data[expected_key], value_type): validated_retry_data[expected_key] = retry_data[expected_key] else: - log.warning(('An invalid value was passed for the retry {0}, ' - 'using default value {1}'.format(expected_key, - retry_defaults[expected_key]))) + log.warning( + u'An invalid value was passed for the retry %s, ' + u'using default value \'%s\'', + expected_key, retry_defaults[expected_key] + ) validated_retry_data[expected_key] = retry_defaults[expected_key] else: validated_retry_data[expected_key] = retry_defaults[expected_key] else: - log.warning(('State is set to retry, but a valid dict for retry ' - 'configuration was not found. Using retry defaults')) + log.warning((u'State is set to retry, but a valid dict for retry ' + u'configuration was not found. Using retry defaults')) validated_retry_data = retry_defaults return validated_retry_data @@ -1979,31 +1991,31 @@ class State(object): ''' # Check for any disabled states disabled = {} - if 'state_runs_disabled' in self.opts['grains']: + if u'state_runs_disabled' in self.opts[u'grains']: for low in chunks[:]: - state_ = '{0}.{1}'.format(low['state'], low['fun']) - for pat in self.opts['grains']['state_runs_disabled']: + state_ = u'{0}.{1}'.format(low[u'state'], low[u'fun']) + for pat in self.opts[u'grains'][u'state_runs_disabled']: if fnmatch.fnmatch(state_, pat): comment = ( - 'The state function "{0}" is currently disabled by "{1}", ' - 'to re-enable, run state.enable {1}.' + u'The state function "{0}" is currently disabled by "{1}", ' + u'to re-enable, run state.enable {1}.' ).format( state_, pat, ) _tag = _gen_tag(low) - disabled[_tag] = {'changes': {}, - 'result': False, - 'comment': comment, - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + disabled[_tag] = {u'changes': {}, + u'result': False, + u'comment': comment, + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} self.__run_num += 1 chunks.remove(low) break running = {} for low in chunks: - if '__FAILHARD__' in running: - running.pop('__FAILHARD__') + if u'__FAILHARD__' in running: + running.pop(u'__FAILHARD__') return running tag = _gen_tag(low) if tag not in running: @@ -2023,12 +2035,12 @@ class State(object): Check if the low data chunk should send a failhard signal ''' tag = _gen_tag(low) - if self.opts.get('test', False): + if self.opts.get(u'test', False): return False - if (low.get('failhard', False) or self.opts['failhard']) and tag in running: - if running[tag]['result'] is None: + if (low.get(u'failhard', False) or self.opts[u'failhard']) and tag in running: + if running[tag][u'result'] is None: return False - return not running[tag]['result'] + return not running[tag][u'result'] return False def reconcile_procs(self, running): @@ -2037,25 +2049,25 @@ class State(object): ''' retset = set() for tag in running: - proc = running[tag].get('proc') + proc = running[tag].get(u'proc') if proc: if not proc.is_alive(): - ret_cache = os.path.join(self.opts['cachedir'], self.jid, tag) + ret_cache = os.path.join(self.opts[u'cachedir'], self.jid, tag) if not os.path.isfile(ret_cache): - ret = {'result': False, - 'comment': 'Parallel process failed to return', - 'name': running[tag]['name'], - 'changes': {}} + ret = {u'result': False, + u'comment': u'Parallel process failed to return', + u'name': running[tag][u'name'], + u'changes': {}} try: - with salt.utils.files.fopen(ret_cache, 'rb') as fp_: + with salt.utils.files.fopen(ret_cache, u'rb') as fp_: ret = msgpack.loads(fp_.read()) except (OSError, IOError): - ret = {'result': False, - 'comment': 'Parallel cache failure', - 'name': running[tag]['name'], - 'changes': {}} + ret = {u'result': False, + u'comment': u'Parallel cache failure', + u'name': running[tag][u'name'], + u'changes': {}} running[tag].update(ret) - running[tag].pop('proc') + running[tag].pop(u'proc') else: retset.add(False) return False not in retset @@ -2067,40 +2079,40 @@ class State(object): ''' present = False # If mod_watch is not available make it a require - if 'watch' in low: - if '{0}.mod_watch'.format(low['state']) not in self.states: - if 'require' in low: - low['require'].extend(low.pop('watch')) + if u'watch' in low: + if u'{0}.mod_watch'.format(low[u'state']) not in self.states: + if u'require' in low: + low[u'require'].extend(low.pop(u'watch')) else: - low['require'] = low.pop('watch') + low[u'require'] = low.pop(u'watch') else: present = True - if 'require' in low: + if u'require' in low: present = True - if 'prerequired' in low: + if u'prerequired' in low: present = True - if 'prereq' in low: + if u'prereq' in low: present = True - if 'onfail' in low: + if u'onfail' in low: present = True - if 'onchanges' in low: + if u'onchanges' in low: present = True if not present: - return 'met', () + return u'met', () self.reconcile_procs(running) reqs = { - 'require': [], - 'watch': [], - 'prereq': [], - 'onfail': [], - 'onchanges': []} + u'require': [], + u'watch': [], + u'prereq': [], + u'onfail': [], + u'onchanges': []} if pre: - reqs['prerequired'] = [] + reqs[u'prerequired'] = [] for r_state in reqs: if r_state in low and low[r_state] is not None: for req in low[r_state]: if isinstance(req, six.string_types): - req = {'id': req} + req = {u'id': req} req = trim_req(req) found = False for chunk in chunks: @@ -2108,87 +2120,87 @@ class State(object): req_val = req[req_key] if req_val is None: continue - if req_key == 'sls': + if req_key == u'sls': # Allow requisite tracking of entire sls files - if fnmatch.fnmatch(chunk['__sls__'], req_val): + if fnmatch.fnmatch(chunk[u'__sls__'], req_val): found = True reqs[r_state].append(chunk) continue try: - if (fnmatch.fnmatch(chunk['name'], req_val) or - fnmatch.fnmatch(chunk['__id__'], req_val)): - if req_key == 'id' or chunk['state'] == req_key: + if (fnmatch.fnmatch(chunk[u'name'], req_val) or + fnmatch.fnmatch(chunk[u'__id__'], req_val)): + if req_key == u'id' or chunk[u'state'] == req_key: found = True reqs[r_state].append(chunk) except KeyError as exc: raise SaltRenderError( - 'Could not locate requisite of [{0}] present in state with name [{1}]'.format( - req_key, chunk['name'])) + u'Could not locate requisite of [{0}] present in state with name [{1}]'.format( + req_key, chunk[u'name'])) except TypeError: # On Python 2, the above req_val, being an OrderedDict, will raise a KeyError, # however on Python 3 it will raise a TypeError # This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite raise SaltRenderError( - 'Could not locate requisite of [{0}] present in state with name [{1}]'.format( - req_key, chunk['name'])) + u'Could not locate requisite of [{0}] present in state with name [{1}]'.format( + req_key, chunk[u'name'])) if not found: - return 'unmet', () + return u'unmet', () fun_stats = set() for r_state, chunks in six.iteritems(reqs): - if r_state == 'prereq': + if r_state == u'prereq': run_dict = self.pre else: run_dict = running for chunk in chunks: tag = _gen_tag(chunk) if tag not in run_dict: - fun_stats.add('unmet') + fun_stats.add(u'unmet') continue - if run_dict[tag].get('proc'): + if run_dict[tag].get(u'proc'): # Run in parallel, first wait for a touch and then recheck time.sleep(0.01) return self.check_requisite(low, running, chunks, pre) - if r_state == 'onfail': - if run_dict[tag]['result'] is True: - fun_stats.add('onfail') # At least one state is OK + if r_state == u'onfail': + if run_dict[tag][u'result'] is True: + fun_stats.add(u'onfail') # At least one state is OK continue else: - if run_dict[tag]['result'] is False: - fun_stats.add('fail') + if run_dict[tag][u'result'] is False: + fun_stats.add(u'fail') continue - if r_state == 'onchanges': - if not run_dict[tag]['changes']: - fun_stats.add('onchanges') + if r_state == u'onchanges': + if not run_dict[tag][u'changes']: + fun_stats.add(u'onchanges') else: - fun_stats.add('onchangesmet') + fun_stats.add(u'onchangesmet') continue - if r_state == 'watch' and run_dict[tag]['changes']: - fun_stats.add('change') + if r_state == u'watch' and run_dict[tag][u'changes']: + fun_stats.add(u'change') continue - if r_state == 'prereq' and run_dict[tag]['result'] is None: - fun_stats.add('premet') - if r_state == 'prereq' and not run_dict[tag]['result'] is None: - fun_stats.add('pre') + if r_state == u'prereq' and run_dict[tag][u'result'] is None: + fun_stats.add(u'premet') + if r_state == u'prereq' and not run_dict[tag][u'result'] is None: + fun_stats.add(u'pre') else: - fun_stats.add('met') + fun_stats.add(u'met') - if 'unmet' in fun_stats: - status = 'unmet' - elif 'fail' in fun_stats: - status = 'fail' - elif 'pre' in fun_stats: - if 'premet' in fun_stats: - status = 'met' + if u'unmet' in fun_stats: + status = u'unmet' + elif u'fail' in fun_stats: + status = u'fail' + elif u'pre' in fun_stats: + if u'premet' in fun_stats: + status = u'met' else: - status = 'pre' - elif 'onfail' in fun_stats and 'met' not in fun_stats: - status = 'onfail' # all onfail states are OK - elif 'onchanges' in fun_stats and 'onchangesmet' not in fun_stats: - status = 'onchanges' - elif 'change' in fun_stats: - status = 'change' + status = u'pre' + elif u'onfail' in fun_stats and u'met' not in fun_stats: + status = u'onfail' # all onfail states are OK + elif u'onchanges' in fun_stats and u'onchangesmet' not in fun_stats: + status = u'onchanges' + elif u'change' in fun_stats: + status = u'change' else: - status = 'met' + status = u'met' return status, reqs @@ -2207,28 +2219,28 @@ class State(object): chunk is evaluated an event will be set up to the master with the results. ''' - if not self.opts.get('local') and (self.opts.get('state_events', True) or fire_event): - if not self.opts.get('master_uri'): + if not self.opts.get(u'local') and (self.opts.get(u'state_events', True) or fire_event): + if not self.opts.get(u'master_uri'): ev_func = lambda ret, tag, preload=None: salt.utils.event.get_master_event( - self.opts, self.opts['sock_dir'], listen=False).fire_event(ret, tag) + self.opts, self.opts[u'sock_dir'], listen=False).fire_event(ret, tag) else: - ev_func = self.functions['event.fire_master'] + ev_func = self.functions[u'event.fire_master'] - ret = {'ret': chunk_ret} + ret = {u'ret': chunk_ret} if fire_event is True: tag = salt.utils.event.tagify( - [self.jid, self.opts['id'], str(chunk_ret['name'])], 'state_result' + [self.jid, self.opts[u'id'], str(chunk_ret[u'name'])], u'state_result' ) elif isinstance(fire_event, six.string_types): tag = salt.utils.event.tagify( - [self.jid, self.opts['id'], str(fire_event)], 'state_result' + [self.jid, self.opts[u'id'], str(fire_event)], u'state_result' ) else: tag = salt.utils.event.tagify( - [self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job' + [self.jid, u'prog', self.opts[u'id'], str(chunk_ret[u'__run_num__'])], u'job' ) - ret['len'] = length - preload = {'jid': self.jid} + ret[u'len'] = length + preload = {u'jid': self.jid} ev_func(ret, tag, preload=preload) def call_chunk(self, low, running, chunks): @@ -2239,15 +2251,15 @@ class State(object): low = self._mod_aggregate(low, running, chunks) self._mod_init(low) tag = _gen_tag(low) - if not low.get('prerequired'): + if not low.get(u'prerequired'): self.active.add(tag) - requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges'] - if not low.get('__prereq__'): - requisites.append('prerequired') + requisites = [u'require', u'watch', u'prereq', u'onfail', u'onchanges'] + if not low.get(u'__prereq__'): + requisites.append(u'prerequired') status, reqs = self.check_requisite(low, running, chunks, pre=True) else: status, reqs = self.check_requisite(low, running, chunks) - if status == 'unmet': + if status == u'unmet': lost = {} reqs = [] for requisite in requisites: @@ -2256,7 +2268,7 @@ class State(object): continue for req in low[requisite]: if isinstance(req, six.string_types): - req = {'id': req} + req = {u'id': req} req = trim_req(req) found = False req_key = next(iter(req)) @@ -2264,44 +2276,46 @@ class State(object): for chunk in chunks: if req_val is None: continue - if req_key == 'sls': + if req_key == u'sls': # Allow requisite tracking of entire sls files - if fnmatch.fnmatch(chunk['__sls__'], req_val): - if requisite == 'prereq': - chunk['__prereq__'] = True + if fnmatch.fnmatch(chunk[u'__sls__'], req_val): + if requisite == u'prereq': + chunk[u'__prereq__'] = True reqs.append(chunk) found = True continue - if (fnmatch.fnmatch(chunk['name'], req_val) or - fnmatch.fnmatch(chunk['__id__'], req_val)): - if req_key == 'id' or chunk['state'] == req_key: - if requisite == 'prereq': - chunk['__prereq__'] = True - elif requisite == 'prerequired': - chunk['__prerequired__'] = True + if (fnmatch.fnmatch(chunk[u'name'], req_val) or + fnmatch.fnmatch(chunk[u'__id__'], req_val)): + if req_key == u'id' or chunk[u'state'] == req_key: + if requisite == u'prereq': + chunk[u'__prereq__'] = True + elif requisite == u'prerequired': + chunk[u'__prerequired__'] = True reqs.append(chunk) found = True if not found: lost[requisite].append(req) - if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'): - comment = 'The following requisites were not found:\n' + if lost[u'require'] or lost[u'watch'] or lost[u'prereq'] \ + or lost[u'onfail'] or lost[u'onchanges'] \ + or lost.get(u'prerequired'): + comment = u'The following requisites were not found:\n' for requisite, lreqs in six.iteritems(lost): if not lreqs: continue comment += \ - '{0}{1}:\n'.format(' ' * 19, requisite) + u'{0}{1}:\n'.format(u' ' * 19, requisite) for lreq in lreqs: req_key = next(iter(lreq)) req_val = lreq[req_key] comment += \ - '{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val) - running[tag] = {'changes': {}, - 'result': False, - 'comment': comment, - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + u'{0}{1}: {2}\n'.format(u' ' * 23, req_key, req_val) + running[tag] = {u'changes': {}, + u'result': False, + u'comment': comment, + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} self.__run_num += 1 - self.event(running[tag], len(chunks), fire_event=low.get('fire_event')) + self.event(running[tag], len(chunks), fire_event=low.get(u'fire_event')) return running for chunk in reqs: # Check to see if the chunk has been run, only run it if @@ -2309,52 +2323,52 @@ class State(object): ctag = _gen_tag(chunk) if ctag not in running: if ctag in self.active: - if chunk.get('__prerequired__'): + if chunk.get(u'__prerequired__'): # Prereq recusive, run this chunk with prereq on if tag not in self.pre: - low['__prereq__'] = True + low[u'__prereq__'] = True self.pre[ctag] = self.call(low, chunks, running) return running else: return running elif ctag not in running: - log.error('Recursive requisite found') + log.error(u'Recursive requisite found') running[tag] = { - 'changes': {}, - 'result': False, - 'comment': 'Recursive requisite found', - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + u'changes': {}, + u'result': False, + u'comment': u'Recursive requisite found', + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} self.__run_num += 1 - self.event(running[tag], len(chunks), fire_event=low.get('fire_event')) + self.event(running[tag], len(chunks), fire_event=low.get(u'fire_event')) return running running = self.call_chunk(chunk, running, chunks) if self.check_failhard(chunk, running): - running['__FAILHARD__'] = True + running[u'__FAILHARD__'] = True return running - if low.get('__prereq__'): + if low.get(u'__prereq__'): status, reqs = self.check_requisite(low, running, chunks) self.pre[tag] = self.call(low, chunks, running) - if not self.pre[tag]['changes'] and status == 'change': - self.pre[tag]['changes'] = {'watch': 'watch'} - self.pre[tag]['result'] = None + if not self.pre[tag][u'changes'] and status == u'change': + self.pre[tag][u'changes'] = {u'watch': u'watch'} + self.pre[tag][u'result'] = None else: running = self.call_chunk(low, running, chunks) if self.check_failhard(chunk, running): - running['__FAILHARD__'] = True + running[u'__FAILHARD__'] = True return running - elif status == 'met': - if low.get('__prereq__'): + elif status == u'met': + if low.get(u'__prereq__'): self.pre[tag] = self.call(low, chunks, running) else: running[tag] = self.call(low, chunks, running) - elif status == 'fail': + elif status == u'fail': # if the requisite that failed was due to a prereq on this low state # show the normal error if tag in self.pre: running[tag] = self.pre[tag] - running[tag]['__run_num__'] = self.__run_num - running[tag]['__sls__'] = low['__sls__'] + running[tag][u'__run_num__'] = self.__run_num + running[tag][u'__sls__'] = low[u'__sls__'] # otherwise the failure was due to a requisite down the chain else: # determine what the requisite failures where, and return @@ -2370,62 +2384,62 @@ class State(object): if req_ret is None: continue # If the result was False (not None) it was a failure - if req_ret['result'] is False: + if req_ret[u'result'] is False: # use SLS.ID for the key-- so its easier to find - key = '{sls}.{_id}'.format(sls=req_low['__sls__'], - _id=req_low['__id__']) + key = u'{sls}.{_id}'.format(sls=req_low[u'__sls__'], + _id=req_low[u'__id__']) failed_requisites.add(key) - _cmt = 'One or more requisite failed: {0}'.format( - ', '.join(str(i) for i in failed_requisites) + _cmt = u'One or more requisite failed: {0}'.format( + u', '.join(str(i) for i in failed_requisites) ) running[tag] = { - 'changes': {}, - 'result': False, - 'comment': _cmt, - '__run_num__': self.__run_num, - '__sls__': low['__sls__'] + u'changes': {}, + u'result': False, + u'comment': _cmt, + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__'] } self.__run_num += 1 - elif status == 'change' and not low.get('__prereq__'): + elif status == u'change' and not low.get(u'__prereq__'): ret = self.call(low, chunks, running) - if not ret['changes'] and not ret.get('skip_watch', False): + if not ret[u'changes'] and not ret.get(u'skip_watch', False): low = low.copy() - low['sfun'] = low['fun'] - low['fun'] = 'mod_watch' - low['__reqs__'] = reqs + low[u'sfun'] = low[u'fun'] + low[u'fun'] = u'mod_watch' + low[u'__reqs__'] = reqs ret = self.call(low, chunks, running) running[tag] = ret - elif status == 'pre': - pre_ret = {'changes': {}, - 'result': True, - 'comment': 'No changes detected', - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + elif status == u'pre': + pre_ret = {u'changes': {}, + u'result': True, + u'comment': u'No changes detected', + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} running[tag] = pre_ret self.pre[tag] = pre_ret self.__run_num += 1 - elif status == 'onfail': - running[tag] = {'changes': {}, - 'result': True, - 'comment': 'State was not run because onfail req did not change', - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + elif status == u'onfail': + running[tag] = {u'changes': {}, + u'result': True, + u'comment': u'State was not run because onfail req did not change', + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} self.__run_num += 1 - elif status == 'onchanges': - running[tag] = {'changes': {}, - 'result': True, - 'comment': 'State was not run because none of the onchanges reqs changed', - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + elif status == u'onchanges': + running[tag] = {u'changes': {}, + u'result': True, + u'comment': u'State was not run because none of the onchanges reqs changed', + u'__run_num__': self.__run_num, + u'__sls__': low[u'__sls__']} self.__run_num += 1 else: - if low.get('__prereq__'): + if low.get(u'__prereq__'): self.pre[tag] = self.call(low, chunks, running) else: running[tag] = self.call(low, chunks, running) if tag in running: - self.event(running[tag], len(chunks), fire_event=low.get('fire_event')) + self.event(running[tag], len(chunks), fire_event=low.get(u'fire_event')) return running def call_listen(self, chunks, running): @@ -2435,14 +2449,14 @@ class State(object): listeners = [] crefs = {} for chunk in chunks: - crefs[(chunk['state'], chunk['name'])] = chunk - crefs[(chunk['state'], chunk['__id__'])] = chunk - if 'listen' in chunk: - listeners.append({(chunk['state'], chunk['__id__']): chunk['listen']}) - if 'listen_in' in chunk: - for l_in in chunk['listen_in']: + crefs[(chunk[u'state'], chunk[u'name'])] = chunk + crefs[(chunk[u'state'], chunk[u'__id__'])] = chunk + if u'listen' in chunk: + listeners.append({(chunk[u'state'], chunk[u'__id__']): chunk[u'listen']}) + if u'listen_in' in chunk: + for l_in in chunk[u'listen_in']: for key, val in six.iteritems(l_in): - listeners.append({(key, val): [{chunk['state']: chunk['__id__']}]}) + listeners.append({(key, val): [{chunk[u'state']: chunk[u'__id__']}]}) mod_watchers = [] errors = {} for l_dict in listeners: @@ -2454,30 +2468,30 @@ class State(object): if (lkey, lval) not in crefs: rerror = {_l_tag(lkey, lval): { - 'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval), - 'name': 'listen_{0}:{1}'.format(lkey, lval), - 'result': False, - 'changes': {} + u'comment': u'Referenced state {0}: {1} does not exist'.format(lkey, lval), + u'name': u'listen_{0}:{1}'.format(lkey, lval), + u'result': False, + u'changes': {} }} errors.update(rerror) continue to_tag = _gen_tag(crefs[(lkey, lval)]) if to_tag not in running: continue - if running[to_tag]['changes']: + if running[to_tag][u'changes']: if key not in crefs: rerror = {_l_tag(key[0], key[1]): - {'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]), - 'name': 'listen_{0}:{1}'.format(key[0], key[1]), - 'result': False, - 'changes': {}}} + {u'comment': u'Referenced state {0}: {1} does not exist'.format(key[0], key[1]), + u'name': u'listen_{0}:{1}'.format(key[0], key[1]), + u'result': False, + u'changes': {}}} errors.update(rerror) continue chunk = crefs[key] low = chunk.copy() - low['sfun'] = chunk['fun'] - low['fun'] = 'mod_watch' - low['__id__'] = 'listener_{0}'.format(low['__id__']) + low[u'sfun'] = chunk[u'fun'] + low[u'fun'] = u'mod_watch' + low[u'__id__'] = u'listener_{0}'.format(low[u'__id__']) for req in STATE_REQUISITE_KEYWORDS: if req in low: low.pop(req) @@ -2485,7 +2499,7 @@ class State(object): ret = self.call_chunks(mod_watchers) running.update(ret) for err in errors: - errors[err]['__run_num__'] = self.__run_num + errors[err][u'__run_num__'] = self.__run_num self.__run_num += 1 running.update(errors) return running @@ -2519,18 +2533,14 @@ class State(object): def _cleanup_accumulator_data(): accum_data_path = os.path.join( - salt.utils.get_accumulator_dir(self.opts['cachedir']), + salt.utils.get_accumulator_dir(self.opts[u'cachedir']), self.instance_id ) try: os.remove(accum_data_path) - log.debug('Deleted accumulator data file {0}'.format( - accum_data_path) - ) + log.debug(u'Deleted accumulator data file %s', accum_data_path) except OSError: - log.debug('File {0} does not exist, no need to cleanup.'.format( - accum_data_path) - ) + log.debug(u'File %s does not exist, no need to cleanup', accum_data_path) _cleanup_accumulator_data() return ret @@ -2542,16 +2552,16 @@ class State(object): if not isinstance(high, dict): errors.append( - 'Template {0} does not render to a dictionary'.format(template) + u'Template {0} does not render to a dictionary'.format(template) ) return high, errors - invalid_items = ('include', 'exclude', 'extends') + invalid_items = (u'include', u'exclude', u'extends') for item in invalid_items: if item in high: errors.append( - 'The \'{0}\' declaration found on \'{1}\' is invalid when ' - 'rendering single templates'.format(item, template) + u'The \'{0}\' declaration found on \'{1}\' is invalid when ' + u'rendering single templates'.format(item, template) ) return high, errors @@ -2559,8 +2569,8 @@ class State(object): if not isinstance(high[name], dict): if isinstance(high[name], six.string_types): # Is this is a short state, it needs to be padded - if '.' in high[name]: - comps = high[name].split('.') + if u'.' in high[name]: + comps = high[name].split(u'.') high[name] = { # '__sls__': template, # '__env__': None, @@ -2569,27 +2579,27 @@ class State(object): continue errors.append( - 'ID {0} in template {1} is not a dictionary'.format( + u'ID {0} in template {1} is not a dictionary'.format( name, template ) ) continue skeys = set() for key in sorted(high[name]): - if key.startswith('_'): + if key.startswith(u'_'): continue if high[name][key] is None: errors.append( - 'ID \'{0}\' in template {1} contains a short ' - 'declaration ({2}) with a trailing colon. When not ' - 'passing any arguments to a state, the colon must be ' - 'omitted.'.format(name, template, key) + u'ID \'{0}\' in template {1} contains a short ' + u'declaration ({2}) with a trailing colon. When not ' + u'passing any arguments to a state, the colon must be ' + u'omitted.'.format(name, template, key) ) continue if not isinstance(high[name][key], list): continue - if '.' in key: - comps = key.split('.') + if u'.' in key: + comps = key.split(u'.') # Salt doesn't support state files such as: # # /etc/redis/redis.conf: @@ -2601,8 +2611,8 @@ class State(object): # - regex: ^requirepass if comps[0] in skeys: errors.append( - 'ID \'{0}\' in template \'{1}\' contains multiple ' - 'state declarations of the same type' + u'ID \'{0}\' in template \'{1}\' contains multiple ' + u'state declarations of the same type' .format(name, template) ) continue @@ -2620,9 +2630,9 @@ class State(object): ''' high = compile_template(template, self.rend, - self.opts['renderer'], - self.opts['renderer_blacklist'], - self.opts['renderer_whitelist']) + self.opts[u'renderer'], + self.opts[u'renderer_blacklist'], + self.opts[u'renderer_whitelist']) if not high: return high high, errors = self.render_template(high, template) @@ -2636,12 +2646,12 @@ class State(object): ''' high = compile_template_str(template, self.rend, - self.opts['renderer'], - self.opts['renderer_blacklist'], - self.opts['renderer_whitelist']) + self.opts[u'renderer'], + self.opts[u'renderer_blacklist'], + self.opts[u'renderer_whitelist']) if not high: return high - high, errors = self.render_template(high, '') + high, errors = self.render_template(high, u'') if errors: return errors return self.call_high(high) @@ -2680,51 +2690,51 @@ class BaseHighState(object): # If the state is intended to be applied locally, then the local opts # should have all of the needed data, otherwise overwrite the local # data items with data from the master - if 'local_state' in opts: - if opts['local_state']: + if u'local_state' in opts: + if opts[u'local_state']: return opts mopts = self.client.master_opts() if not isinstance(mopts, dict): # An error happened on the master - opts['renderer'] = 'yaml_jinja' - opts['failhard'] = False - opts['state_top'] = salt.utils.url.create('top.sls') - opts['nodegroups'] = {} - opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]} + opts[u'renderer'] = u'yaml_jinja' + opts[u'failhard'] = False + opts[u'state_top'] = salt.utils.url.create(u'top.sls') + opts[u'nodegroups'] = {} + opts[u'file_roots'] = {u'base': [syspaths.BASE_FILE_ROOTS_DIR]} else: - opts['renderer'] = mopts['renderer'] - opts['failhard'] = mopts.get('failhard', False) - if mopts['state_top'].startswith('salt://'): - opts['state_top'] = mopts['state_top'] - elif mopts['state_top'].startswith('/'): - opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:]) + opts[u'renderer'] = mopts[u'renderer'] + opts[u'failhard'] = mopts.get(u'failhard', False) + if mopts[u'state_top'].startswith(u'salt://'): + opts[u'state_top'] = mopts[u'state_top'] + elif mopts[u'state_top'].startswith(u'/'): + opts[u'state_top'] = salt.utils.url.create(mopts[u'state_top'][1:]) else: - opts['state_top'] = salt.utils.url.create(mopts['state_top']) - opts['state_top_saltenv'] = mopts.get('state_top_saltenv', None) - opts['nodegroups'] = mopts.get('nodegroups', {}) - opts['state_auto_order'] = mopts.get( - 'state_auto_order', - opts['state_auto_order']) - opts['file_roots'] = mopts['file_roots'] - opts['top_file_merging_strategy'] = mopts.get('top_file_merging_strategy', - opts.get('top_file_merging_strategy')) - opts['env_order'] = mopts.get('env_order', opts.get('env_order', [])) - opts['default_top'] = mopts.get('default_top', opts.get('default_top')) - opts['state_events'] = mopts.get('state_events') - opts['state_aggregate'] = mopts.get('state_aggregate', opts.get('state_aggregate', False)) - opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False) - opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False) + opts[u'state_top'] = salt.utils.url.create(mopts[u'state_top']) + opts[u'state_top_saltenv'] = mopts.get(u'state_top_saltenv', None) + opts[u'nodegroups'] = mopts.get(u'nodegroups', {}) + opts[u'state_auto_order'] = mopts.get( + u'state_auto_order', + opts[u'state_auto_order']) + opts[u'file_roots'] = mopts[u'file_roots'] + opts[u'top_file_merging_strategy'] = mopts.get(u'top_file_merging_strategy', + opts.get(u'top_file_merging_strategy')) + opts[u'env_order'] = mopts.get(u'env_order', opts.get(u'env_order', [])) + opts[u'default_top'] = mopts.get(u'default_top', opts.get(u'default_top')) + opts[u'state_events'] = mopts.get(u'state_events') + opts[u'state_aggregate'] = mopts.get(u'state_aggregate', opts.get(u'state_aggregate', False)) + opts[u'jinja_lstrip_blocks'] = mopts.get(u'jinja_lstrip_blocks', False) + opts[u'jinja_trim_blocks'] = mopts.get(u'jinja_trim_blocks', False) return opts def _get_envs(self): ''' Pull the file server environments out of the master options ''' - envs = ['base'] - if 'file_roots' in self.opts: - envs.extend([x for x in list(self.opts['file_roots']) + envs = [u'base'] + if u'file_roots' in self.opts: + envs.extend([x for x in list(self.opts[u'file_roots']) if x not in envs]) - env_order = self.opts.get('env_order', []) + env_order = self.opts.get(u'env_order', []) # Remove duplicates while preserving the order members = set() env_order = [env for env in env_order if not (env in members or members.add(env))] @@ -2747,37 +2757,37 @@ class BaseHighState(object): done = DefaultOrderedDict(list) found = 0 # did we find any contents in the top files? # Gather initial top files - merging_strategy = self.opts['top_file_merging_strategy'] - if merging_strategy == 'same' and not self.opts['environment']: - if not self.opts['default_top']: + merging_strategy = self.opts[u'top_file_merging_strategy'] + if merging_strategy == u'same' and not self.opts[u'environment']: + if not self.opts[u'default_top']: raise SaltRenderError( - 'top_file_merging_strategy set to \'same\', but no ' - 'default_top configuration option was set' + u'top_file_merging_strategy set to \'same\', but no ' + u'default_top configuration option was set' ) - if self.opts['environment']: + if self.opts[u'environment']: contents = self.client.cache_file( - self.opts['state_top'], - self.opts['environment'] + self.opts[u'state_top'], + self.opts[u'environment'] ) if contents: found = 1 - tops[self.opts['environment']] = [ + tops[self.opts[u'environment']] = [ compile_template( contents, self.state.rend, - self.state.opts['renderer'], - self.state.opts['renderer_blacklist'], - self.state.opts['renderer_whitelist'], - saltenv=self.opts['environment'] + self.state.opts[u'renderer'], + self.state.opts[u'renderer_blacklist'], + self.state.opts[u'renderer_whitelist'], + saltenv=self.opts[u'environment'] ) ] else: - tops[self.opts['environment']] = [{}] + tops[self.opts[u'environment']] = [{}] else: found = 0 - state_top_saltenv = self.opts.get('state_top_saltenv', False) + state_top_saltenv = self.opts.get(u'state_top_saltenv', False) if state_top_saltenv \ and not isinstance(state_top_saltenv, six.string_types): state_top_saltenv = str(state_top_saltenv) @@ -2785,7 +2795,7 @@ class BaseHighState(object): for saltenv in [state_top_saltenv] if state_top_saltenv \ else self._get_envs(): contents = self.client.cache_file( - self.opts['state_top'], + self.opts[u'state_top'], saltenv ) if contents: @@ -2794,42 +2804,42 @@ class BaseHighState(object): compile_template( contents, self.state.rend, - self.state.opts['renderer'], - self.state.opts['renderer_blacklist'], - self.state.opts['renderer_whitelist'], + self.state.opts[u'renderer'], + self.state.opts[u'renderer_blacklist'], + self.state.opts[u'renderer_whitelist'], saltenv=saltenv ) ) else: tops[saltenv].append({}) - log.debug('No contents loaded for env: {0}'.format(saltenv)) + log.debug(u'No contents loaded for saltenv \'%s\'', saltenv) - if found > 1 and merging_strategy == 'merge' and not self.opts.get('env_order', None): + if found > 1 and merging_strategy == u'merge' and not self.opts.get(u'env_order', None): log.warning( - 'top_file_merging_strategy is set to \'%s\' and ' - 'multiple top files were found. Merging order is not ' - 'deterministic, it may be desirable to either set ' - 'top_file_merging_strategy to \'same\' or use the ' - '\'env_order\' configuration parameter to specify the ' - 'merging order.', merging_strategy + u'top_file_merging_strategy is set to \'%s\' and ' + u'multiple top files were found. Merging order is not ' + u'deterministic, it may be desirable to either set ' + u'top_file_merging_strategy to \'same\' or use the ' + u'\'env_order\' configuration parameter to specify the ' + u'merging order.', merging_strategy ) if found == 0: log.debug( - 'No contents found in top file. If this is not expected, ' - 'verify that the \'file_roots\' specified in \'etc/master\' ' - 'are accessible. The \'file_roots\' configuration is: %s', - repr(self.state.opts['file_roots']) + u'No contents found in top file. If this is not expected, ' + u'verify that the \'file_roots\' specified in \'etc/master\' ' + u'are accessible. The \'file_roots\' configuration is: %s', + repr(self.state.opts[u'file_roots']) ) # Search initial top files for includes for saltenv, ctops in six.iteritems(tops): for ctop in ctops: - if 'include' not in ctop: + if u'include' not in ctop: continue - for sls in ctop['include']: + for sls in ctop[u'include']: include[saltenv].append(sls) - ctop.pop('include') + ctop.pop(u'include') # Go through the includes and pull out the extra tops and add them while include: pops = [] @@ -2846,11 +2856,11 @@ class BaseHighState(object): self.client.get_state( sls, saltenv - ).get('dest', False), + ).get(u'dest', False), self.state.rend, - self.state.opts['renderer'], - self.state.opts['renderer_blacklist'], - self.state.opts['renderer_whitelist'], + self.state.opts[u'renderer'], + self.state.opts[u'renderer_blacklist'], + self.state.opts[u'renderer_whitelist'], saltenv ) ) @@ -2864,18 +2874,18 @@ class BaseHighState(object): ''' Cleanly merge the top files ''' - merging_strategy = self.opts['top_file_merging_strategy'] + merging_strategy = self.opts[u'top_file_merging_strategy'] try: - merge_attr = '_merge_tops_{0}'.format(merging_strategy) + merge_attr = u'_merge_tops_{0}'.format(merging_strategy) merge_func = getattr(self, merge_attr) - if not hasattr(merge_func, '__call__'): - msg = '\'{0}\' is not callable'.format(merge_attr) + if not hasattr(merge_func, u'__call__'): + msg = u'\'{0}\' is not callable'.format(merge_attr) log.error(msg) raise TypeError(msg) except (AttributeError, TypeError): log.warning( - 'Invalid top_file_merging_strategy \'%s\', falling back to ' - '\'merge\'', merging_strategy + u'Invalid top_file_merging_strategy \'%s\', falling back to ' + u'\'merge\'', merging_strategy ) merge_func = self._merge_tops_merge return merge_func(tops) @@ -2891,44 +2901,44 @@ class BaseHighState(object): top = DefaultOrderedDict(OrderedDict) # Check base env first as it is authoritative - base_tops = tops.pop('base', DefaultOrderedDict(OrderedDict)) + base_tops = tops.pop(u'base', DefaultOrderedDict(OrderedDict)) for ctop in base_tops: for saltenv, targets in six.iteritems(ctop): - if saltenv == 'include': + if saltenv == u'include': continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: - raise SaltRenderError('Unable to render top file. No targets found.') + raise SaltRenderError(u'Unable to render top file. No targets found.') for cenv, ctops in six.iteritems(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): - if saltenv == 'include': + if saltenv == u'include': continue elif saltenv != cenv: log.debug( - 'Section for saltenv \'%s\' in the \'%s\' ' - 'saltenv\'s top file will be ignored, as the ' - 'top_file_merging_strategy is set to \'merge\' ' - 'and the saltenvs do not match', + u'Section for saltenv \'%s\' in the \'%s\' ' + u'saltenv\'s top file will be ignored, as the ' + u'top_file_merging_strategy is set to \'merge\' ' + u'and the saltenvs do not match', saltenv, cenv ) continue elif saltenv in top: log.debug( - 'Section for saltenv \'%s\' in the \'%s\' ' - 'saltenv\'s top file will be ignored, as this ' - 'saltenv was already defined in the \'base\' top ' - 'file', saltenv, cenv + u'Section for saltenv \'%s\' in the \'%s\' ' + u'saltenv\'s top file will be ignored, as this ' + u'saltenv was already defined in the \'base\' top ' + u'file', saltenv, cenv ) continue try: for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: - raise SaltRenderError('Unable to render top file. No targets found.') + raise SaltRenderError(u'Unable to render top file. No targets found.') return top def _merge_tops_same(self, tops): @@ -2941,14 +2951,14 @@ class BaseHighState(object): for cenv, ctops in six.iteritems(tops): if all([x == {} for x in ctops]): # No top file found in this env, check the default_top - default_top = self.opts['default_top'] + default_top = self.opts[u'default_top'] fallback_tops = tops.get(default_top, []) if all([x == {} for x in fallback_tops]): # Nothing in the fallback top file log.error( - 'The \'%s\' saltenv has no top file, and the fallback ' - 'saltenv specified by default_top (%s) also has no ' - 'top file', cenv, default_top + u'The \'%s\' saltenv has no top file, and the fallback ' + u'saltenv specified by default_top (%s) also has no ' + u'top file', cenv, default_top ) continue @@ -2957,17 +2967,17 @@ class BaseHighState(object): if saltenv != cenv: continue log.debug( - 'The \'%s\' saltenv has no top file, using the ' - 'default_top saltenv (%s)', cenv, default_top + u'The \'%s\' saltenv has no top file, using the ' + u'default_top saltenv (%s)', cenv, default_top ) for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] break else: log.error( - 'The \'%s\' saltenv has no top file, and no ' - 'matches were found in the top file for the ' - 'default_top saltenv (%s)', cenv, default_top + u'The \'%s\' saltenv has no top file, and no ' + u'matches were found in the top file for the ' + u'default_top saltenv (%s)', cenv, default_top ) continue @@ -2975,14 +2985,14 @@ class BaseHighState(object): else: for ctop in ctops: for saltenv, targets in six.iteritems(ctop): - if saltenv == 'include': + if saltenv == u'include': continue elif saltenv != cenv: log.debug( - 'Section for saltenv \'%s\' in the \'%s\' ' - 'saltenv\'s top file will be ignored, as the ' - 'top_file_merging_strategy is set to \'same\' ' - 'and the saltenvs do not match', + u'Section for saltenv \'%s\' in the \'%s\' ' + u'saltenv\'s top file will be ignored, as the ' + u'top_file_merging_strategy is set to \'same\' ' + u'and the saltenvs do not match', saltenv, cenv ) continue @@ -2991,7 +3001,7 @@ class BaseHighState(object): for tgt in targets: top[saltenv][tgt] = ctop[saltenv][tgt] except TypeError: - raise SaltRenderError('Unable to render top file. No targets found.') + raise SaltRenderError(u'Unable to render top file. No targets found.') return top def _merge_tops_merge_all(self, tops): @@ -3012,7 +3022,7 @@ class BaseHighState(object): for ctops in six.itervalues(tops): for ctop in ctops: for saltenv, targets in six.iteritems(ctop): - if saltenv == 'include': + if saltenv == u'include': continue try: for tgt in targets: @@ -3029,7 +3039,7 @@ class BaseHighState(object): merged.extend([x for x in m_states2 if x not in merged]) top[saltenv][tgt] = merged except TypeError: - raise SaltRenderError('Unable to render top file. No targets found.') + raise SaltRenderError(u'Unable to render top file. No targets found.') return top def verify_tops(self, tops): @@ -3038,28 +3048,28 @@ class BaseHighState(object): ''' errors = [] if not isinstance(tops, dict): - errors.append('Top data was not formed as a dict') + errors.append(u'Top data was not formed as a dict') # No further checks will work, bail out return errors for saltenv, matches in six.iteritems(tops): - if saltenv == 'include': + if saltenv == u'include': continue if not isinstance(saltenv, six.string_types): errors.append( - 'Environment {0} in top file is not formed as a ' - 'string'.format(saltenv) + u'Environment {0} in top file is not formed as a ' + u'string'.format(saltenv) ) - if saltenv == '': - errors.append('Empty saltenv statement in top file') + if saltenv == u'': + errors.append(u'Empty saltenv statement in top file') if not isinstance(matches, dict): errors.append( - 'The top file matches for saltenv {0} are not ' - 'formatted as a dict'.format(saltenv) + u'The top file matches for saltenv {0} are not ' + u'formatted as a dict'.format(saltenv) ) for slsmods in six.itervalues(matches): if not isinstance(slsmods, list): - errors.append('Malformed topfile (state declarations not ' - 'formed as a list)') + errors.append(u'Malformed topfile (state declarations not ' + u'formed as a list)') continue for slsmod in slsmods: if isinstance(slsmod, dict): @@ -3067,8 +3077,8 @@ class BaseHighState(object): for val in six.itervalues(slsmod): if not val: errors.append( - 'Improperly formatted top file matcher ' - 'in saltenv {0}: {1} file'.format( + u'Improperly formatted top file matcher ' + u'in saltenv {0}: {1} file'.format( slsmod, val ) @@ -3077,8 +3087,8 @@ class BaseHighState(object): # This is a sls module if not slsmod: errors.append( - 'Environment {0} contains an empty sls ' - 'index'.format(saltenv) + u'Environment {0} contains an empty sls ' + u'index'.format(saltenv) ) return errors @@ -3090,7 +3100,7 @@ class BaseHighState(object): try: tops = self.get_tops() except SaltRenderError as err: - log.error('Unable to render top file: ' + str(err.error)) + log.error(u'Unable to render top file: ' + str(err.error)) return {} return self.merge_tops(tops) @@ -3105,8 +3115,8 @@ class BaseHighState(object): matches = {} # pylint: disable=cell-var-from-loop for saltenv, body in six.iteritems(top): - if self.opts['environment']: - if saltenv != self.opts['environment']: + if self.opts[u'environment']: + if saltenv != self.opts[u'environment']: continue for match, data in six.iteritems(body): def _filter_matches(_match, _data, _opts): @@ -3120,8 +3130,8 @@ class BaseHighState(object): if saltenv not in matches: matches[saltenv] = [] for item in _data: - if 'subfilter' in item: - _tmpdata = item.pop('subfilter') + if u'subfilter' in item: + _tmpdata = item.pop(u'subfilter') for match, data in six.iteritems(_tmpdata): _filter_matches(match, data, _opts) if isinstance(item, six.string_types): @@ -3133,11 +3143,11 @@ class BaseHighState(object): if env_key not in matches: matches[env_key] = [] matches[env_key].append(inc_sls) - _filter_matches(match, data, self.opts['nodegroups']) + _filter_matches(match, data, self.opts[u'nodegroups']) ext_matches = self._master_tops() for saltenv in ext_matches: top_file_matches = matches.get(saltenv, []) - if self.opts['master_tops_first']: + if self.opts[u'master_tops_first']: first = ext_matches[saltenv] second = top_file_matches else: @@ -3160,13 +3170,13 @@ class BaseHighState(object): If autoload_dynamic_modules is True then automatically load the dynamic modules ''' - if not self.opts['autoload_dynamic_modules']: + if not self.opts[u'autoload_dynamic_modules']: return - syncd = self.state.functions['saltutil.sync_all'](list(matches), + syncd = self.state.functions[u'saltutil.sync_all'](list(matches), refresh=False) - if syncd['grains']: - self.opts['grains'] = salt.loader.grains(self.opts) - self.state.opts['pillar'] = self.state._gather_pillar() + if syncd[u'grains']: + self.opts[u'grains'] = salt.loader.grains(self.opts) + self.state.opts[u'pillar'] = self.state._gather_pillar() self.state.module_refresh() def render_state(self, sls, saltenv, mods, matches, local=False): @@ -3176,39 +3186,39 @@ class BaseHighState(object): errors = [] if not local: state_data = self.client.get_state(sls, saltenv) - fn_ = state_data.get('dest', False) + fn_ = state_data.get(u'dest', False) else: fn_ = sls if not os.path.isfile(fn_): errors.append( - 'Specified SLS {0} on local filesystem cannot ' - 'be found.'.format(sls) + u'Specified SLS {0} on local filesystem cannot ' + u'be found.'.format(sls) ) if not fn_: errors.append( - 'Specified SLS {0} in saltenv {1} is not ' - 'available on the salt master or through a configured ' - 'fileserver'.format(sls, saltenv) + u'Specified SLS {0} in saltenv {1} is not ' + u'available on the salt master or through a configured ' + u'fileserver'.format(sls, saltenv) ) state = None try: state = compile_template(fn_, self.state.rend, - self.state.opts['renderer'], - self.state.opts['renderer_blacklist'], - self.state.opts['renderer_whitelist'], + self.state.opts[u'renderer'], + self.state.opts[u'renderer_blacklist'], + self.state.opts[u'renderer_whitelist'], saltenv, sls, rendered_sls=mods ) except SaltRenderError as exc: - msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format( + msg = u'Rendering SLS \'{0}:{1}\' failed: {2}'.format( saltenv, sls, exc ) log.critical(msg) errors.append(msg) except Exception as exc: - msg = 'Rendering SLS {0} failed, render error: {1}'.format( + msg = u'Rendering SLS {0} failed, render error: {1}'.format( sls, exc ) log.critical( @@ -3216,25 +3226,25 @@ class BaseHighState(object): # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) - errors.append('{0}\n{1}'.format(msg, traceback.format_exc())) + errors.append(u'{0}\n{1}'.format(msg, traceback.format_exc())) try: - mods.add('{0}:{1}'.format(saltenv, sls)) + mods.add(u'{0}:{1}'.format(saltenv, sls)) except AttributeError: pass if state: if not isinstance(state, dict): errors.append( - 'SLS {0} does not render to a dictionary'.format(sls) + u'SLS {0} does not render to a dictionary'.format(sls) ) else: include = [] - if 'include' in state: - if not isinstance(state['include'], list): - err = ('Include Declaration in SLS {0} is not formed ' - 'as a list'.format(sls)) + if u'include' in state: + if not isinstance(state[u'include'], list): + err = (u'Include Declaration in SLS {0} is not formed ' + u'as a list'.format(sls)) errors.append(err) else: - include = state.pop('include') + include = state.pop(u'include') self._handle_extend(state, sls, saltenv, errors) self._handle_exclude(state, sls, saltenv, errors) @@ -3245,7 +3255,7 @@ class BaseHighState(object): # 'sls.to.include' <- same as {: 'sls.to.include'} # {: 'sls.to.include'} # {'_xenv': 'sls.to.resolve'} - xenv_key = '_xenv' + xenv_key = u'_xenv' if isinstance(inc_sls, dict): env_key, inc_sls = inc_sls.popitem() @@ -3253,37 +3263,37 @@ class BaseHighState(object): env_key = saltenv if env_key not in self.avail: - msg = ('Nonexistent saltenv \'{0}\' found in include ' - 'of \'{1}\' within SLS \'{2}:{3}\'' + msg = (u'Nonexistent saltenv \'{0}\' found in include ' + u'of \'{1}\' within SLS \'{2}:{3}\'' .format(env_key, inc_sls, saltenv, sls)) log.error(msg) errors.append(msg) continue - if inc_sls.startswith('.'): - match = re.match(r'^(\.+)(.*)$', inc_sls) + if inc_sls.startswith(u'.'): + match = re.match(r'^(\.+)(.*)$', inc_sls) # future lint: disable=non-unicode-string if match: levels, include = match.groups() else: - msg = ('Badly formatted include {0} found in include ' - 'in SLS \'{2}:{3}\'' + msg = (u'Badly formatted include {0} found in include ' + u'in SLS \'{2}:{3}\'' .format(inc_sls, saltenv, sls)) log.error(msg) errors.append(msg) continue level_count = len(levels) - p_comps = sls.split('.') - if state_data.get('source', '').endswith('/init.sls'): - p_comps.append('init') + p_comps = sls.split(u'.') + if state_data.get(u'source', u'').endswith(u'/init.sls'): + p_comps.append(u'init') if level_count > len(p_comps): - msg = ('Attempted relative include of \'{0}\' ' - 'within SLS \'{1}:{2}\' ' - 'goes beyond top level package ' + msg = (u'Attempted relative include of \'{0}\' ' + u'within SLS \'{1}:{2}\' ' + u'goes beyond top level package ' .format(inc_sls, saltenv, sls)) log.error(msg) errors.append(msg) continue - inc_sls = '.'.join(p_comps[:-level_count] + [include]) + inc_sls = u'.'.join(p_comps[:-level_count] + [include]) if env_key != xenv_key: if matches is None: @@ -3315,7 +3325,7 @@ class BaseHighState(object): for sls_target in sls_targets: r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv - mod_tgt = '{0}:{1}'.format(r_env, sls_target) + mod_tgt = u'{0}:{1}'.format(r_env, sls_target) if mod_tgt not in mods: nstate, err = self.render_state( sls_target, @@ -3329,25 +3339,25 @@ class BaseHighState(object): if err: errors.extend(err) else: - msg = '' + msg = u'' if not resolved_envs: - msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt ' - 'master in saltenv(s): {2} ' + msg = (u'Unknown include: Specified SLS {0}: {1} is not available on the salt ' + u'master in saltenv(s): {2} ' ).format(env_key, inc_sls, - ', '.join(matches) if env_key == xenv_key else env_key) + u', '.join(matches) if env_key == xenv_key else env_key) elif len(resolved_envs) > 1: - msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master ' - 'in multiple available saltenvs: {2}' + msg = (u'Ambiguous include: Specified SLS {0}: {1} is available on the salt master ' + u'in multiple available saltenvs: {2}' ).format(env_key, inc_sls, - ', '.join(resolved_envs)) + u', '.join(resolved_envs)) log.critical(msg) errors.append(msg) try: self._handle_iorder(state) except TypeError: - log.critical('Could not render SLS {0}. Syntax error detected.'.format(sls)) + log.critical(u'Could not render SLS %s. Syntax error detected.', sls) else: state = {} return state, errors @@ -3356,7 +3366,7 @@ class BaseHighState(object): ''' Take a state and apply the iorder system ''' - if self.opts['state_auto_order']: + if self.opts[u'state_auto_order']: for name in state: for s_dec in state[name]: if not isinstance(s_dec, six.string_types): @@ -3371,20 +3381,20 @@ class BaseHighState(object): continue found = False - if s_dec.startswith('_'): + if s_dec.startswith(u'_'): continue for arg in state[name][s_dec]: if isinstance(arg, dict): if len(arg) > 0: - if next(six.iterkeys(arg)) == 'order': + if next(six.iterkeys(arg)) == u'order': found = True if not found: if not isinstance(state[name][s_dec], list): # quite certainly a syntax error, managed elsewhere continue state[name][s_dec].append( - {'order': self.iorder} + {u'order': self.iorder} ) self.iorder += 1 return state @@ -3395,31 +3405,31 @@ class BaseHighState(object): ''' for name in state: if not isinstance(state[name], dict): - if name == '__extend__': + if name == u'__extend__': continue - if name == '__exclude__': + if name == u'__exclude__': continue if isinstance(state[name], six.string_types): # Is this is a short state, it needs to be padded - if '.' in state[name]: - comps = state[name].split('.') - state[name] = {'__sls__': sls, - '__env__': saltenv, + if u'.' in state[name]: + comps = state[name].split(u'.') + state[name] = {u'__sls__': sls, + u'__env__': saltenv, comps[0]: [comps[1]]} continue errors.append( - 'ID {0} in SLS {1} is not a dictionary'.format(name, sls) + u'ID {0} in SLS {1} is not a dictionary'.format(name, sls) ) continue skeys = set() for key in list(state[name]): - if key.startswith('_'): + if key.startswith(u'_'): continue if not isinstance(state[name][key], list): continue - if '.' in key: - comps = key.split('.') + if u'.' in key: + comps = key.split(u'.') # Salt doesn't support state files such as: # # /etc/redis/redis.conf: @@ -3432,8 +3442,8 @@ class BaseHighState(object): # - regex: ^requirepass if comps[0] in skeys: errors.append( - 'ID \'{0}\' in SLS \'{1}\' contains multiple state ' - 'declarations of the same type'.format(name, sls) + u'ID \'{0}\' in SLS \'{1}\' contains multiple state ' + u'declarations of the same type'.format(name, sls) ) continue state[name][comps[0]] = state[name].pop(key) @@ -3441,55 +3451,55 @@ class BaseHighState(object): skeys.add(comps[0]) continue skeys.add(key) - if '__sls__' not in state[name]: - state[name]['__sls__'] = sls - if '__env__' not in state[name]: - state[name]['__env__'] = saltenv + if u'__sls__' not in state[name]: + state[name][u'__sls__'] = sls + if u'__env__' not in state[name]: + state[name][u'__env__'] = saltenv def _handle_extend(self, state, sls, saltenv, errors): ''' Take the extend dec out of state and apply to the highstate global dec ''' - if 'extend' in state: - ext = state.pop('extend') + if u'extend' in state: + ext = state.pop(u'extend') if not isinstance(ext, dict): - errors.append(('Extension value in SLS \'{0}\' is not a ' - 'dictionary').format(sls)) + errors.append((u'Extension value in SLS \'{0}\' is not a ' + u'dictionary').format(sls)) return for name in ext: if not isinstance(ext[name], dict): - errors.append(('Extension name \'{0}\' in SLS \'{1}\' is ' - 'not a dictionary' + errors.append((u'Extension name \'{0}\' in SLS \'{1}\' is ' + u'not a dictionary' .format(name, sls))) continue - if '__sls__' not in ext[name]: - ext[name]['__sls__'] = sls - if '__env__' not in ext[name]: - ext[name]['__env__'] = saltenv + if u'__sls__' not in ext[name]: + ext[name][u'__sls__'] = sls + if u'__env__' not in ext[name]: + ext[name][u'__env__'] = saltenv for key in list(ext[name]): - if key.startswith('_'): + if key.startswith(u'_'): continue if not isinstance(ext[name][key], list): continue - if '.' in key: - comps = key.split('.') + if u'.' in key: + comps = key.split(u'.') ext[name][comps[0]] = ext[name].pop(key) ext[name][comps[0]].append(comps[1]) - state.setdefault('__extend__', []).append(ext) + state.setdefault(u'__extend__', []).append(ext) def _handle_exclude(self, state, sls, saltenv, errors): ''' Take the exclude dec out of the state and apply it to the highstate global dec ''' - if 'exclude' in state: - exc = state.pop('exclude') + if u'exclude' in state: + exc = state.pop(u'exclude') if not isinstance(exc, list): - err = ('Exclude Declaration in SLS {0} is not formed ' - 'as a list'.format(sls)) + err = (u'Exclude Declaration in SLS {0} is not formed ' + u'as a list'.format(sls)) errors.append(err) - state.setdefault('__exclude__', []).extend(exc) + state.setdefault(u'__exclude__', []).extend(exc) def render_highstate(self, matches): ''' @@ -3506,8 +3516,8 @@ class BaseHighState(object): statefiles = fnmatch.filter(self.avail[saltenv], sls_match) except KeyError: all_errors.extend( - ['No matching salt environment for environment ' - '\'{0}\' found'.format(saltenv)] + [u'No matching salt environment for environment ' + u'\'{0}\' found'.format(saltenv)] ) # if we did not found any sls in the fileserver listing, this # may be because the sls was generated or added later, we can @@ -3517,7 +3527,7 @@ class BaseHighState(object): statefiles = [sls_match] for sls in statefiles: - r_env = '{0}:{1}'.format(saltenv, sls) + r_env = u'{0}:{1}'.format(saltenv, sls) if r_env in mods: continue state, errors = self.render_state( @@ -3525,55 +3535,55 @@ class BaseHighState(object): if state: self.merge_included_states(highstate, state, errors) for i, error in enumerate(errors[:]): - if 'is not available' in error: + if u'is not available' in error: # match SLS foobar in environment - this_sls = 'SLS {0} in saltenv'.format( + this_sls = u'SLS {0} in saltenv'.format( sls_match) if this_sls in error: errors[i] = ( - 'No matching sls found for \'{0}\' ' - 'in env \'{1}\''.format(sls_match, saltenv)) + u'No matching sls found for \'{0}\' ' + u'in env \'{1}\''.format(sls_match, saltenv)) all_errors.extend(errors) self.clean_duplicate_extends(highstate) return highstate, all_errors def clean_duplicate_extends(self, highstate): - if '__extend__' in highstate: + if u'__extend__' in highstate: highext = [] - for items in (six.iteritems(ext) for ext in highstate['__extend__']): + for items in (six.iteritems(ext) for ext in highstate[u'__extend__']): for item in items: if item not in highext: highext.append(item) - highstate['__extend__'] = [{t[0]: t[1]} for t in highext] + highstate[u'__extend__'] = [{t[0]: t[1]} for t in highext] def merge_included_states(self, highstate, state, errors): # The extend members can not be treated as globally unique: - if '__extend__' in state: - highstate.setdefault('__extend__', - []).extend(state.pop('__extend__')) - if '__exclude__' in state: - highstate.setdefault('__exclude__', - []).extend(state.pop('__exclude__')) + if u'__extend__' in state: + highstate.setdefault(u'__extend__', + []).extend(state.pop(u'__extend__')) + if u'__exclude__' in state: + highstate.setdefault(u'__exclude__', + []).extend(state.pop(u'__exclude__')) for id_ in state: if id_ in highstate: if highstate[id_] != state[id_]: errors.append(( - 'Detected conflicting IDs, SLS' - ' IDs need to be globally unique.\n The' - ' conflicting ID is \'{0}\' and is found in SLS' - ' \'{1}:{2}\' and SLS \'{3}:{4}\'').format( + u'Detected conflicting IDs, SLS' + u' IDs need to be globally unique.\n The' + u' conflicting ID is \'{0}\' and is found in SLS' + u' \'{1}:{2}\' and SLS \'{3}:{4}\'').format( id_, - highstate[id_]['__env__'], - highstate[id_]['__sls__'], - state[id_]['__env__'], - state[id_]['__sls__']) + highstate[id_][u'__env__'], + highstate[id_][u'__sls__'], + state[id_][u'__env__'], + state[id_][u'__sls__']) ) try: highstate.update(state) except ValueError: errors.append( - 'Error when rendering state with contents: {0}'.format(state) + u'Error when rendering state with contents: {0}'.format(state) ) def _check_pillar(self, force=False): @@ -3583,7 +3593,7 @@ class BaseHighState(object): ''' if force: return True - if '_errors' in self.state.opts['pillar']: + if u'_errors' in self.state.opts[u'pillar']: return False return True @@ -3596,7 +3606,7 @@ class BaseHighState(object): return matches ret_matches = {} if not isinstance(whitelist, list): - whitelist = whitelist.split(',') + whitelist = whitelist.split(u',') for env in matches: for sls in matches[env]: if sls in whitelist: @@ -3604,28 +3614,28 @@ class BaseHighState(object): ret_matches[env].append(sls) return ret_matches - def call_highstate(self, exclude=None, cache=None, cache_name='highstate', + def call_highstate(self, exclude=None, cache=None, cache_name=u'highstate', force=False, whitelist=None, orchestration_jid=None): ''' Run the sequence to execute the salt highstate for this minion ''' # Check that top file exists - tag_name = 'no_|-states_|-states_|-None' + tag_name = u'no_|-states_|-states_|-None' ret = {tag_name: { - 'result': False, - 'comment': 'No states found for this minion', - 'name': 'No States', - 'changes': {}, - '__run_num__': 0, + u'result': False, + u'comment': u'No states found for this minion', + u'name': u'No States', + u'changes': {}, + u'__run_num__': 0, }} cfn = os.path.join( - self.opts['cachedir'], - '{0}.cache.p'.format(cache_name) + self.opts[u'cachedir'], + u'{0}.cache.p'.format(cache_name) ) if cache: if os.path.isfile(cfn): - with salt.utils.files.fopen(cfn, 'rb') as fp_: + with salt.utils.files.fopen(cfn, u'rb') as fp_: high = self.serial.load(fp_) return self.state.call_high(high, orchestration_jid) # File exists so continue @@ -3633,8 +3643,8 @@ class BaseHighState(object): try: top = self.get_top() except SaltRenderError as err: - ret[tag_name]['comment'] = 'Unable to render top file: ' - ret[tag_name]['comment'] += str(err.error) + ret[tag_name][u'comment'] = u'Unable to render top file: ' + ret[tag_name][u'comment'] += str(err.error) return ret except Exception: trb = traceback.format_exc() @@ -3643,23 +3653,23 @@ class BaseHighState(object): err += self.verify_tops(top) matches = self.top_matches(top) if not matches: - msg = 'No Top file or master_tops data matches found.' - ret[tag_name]['comment'] = msg + msg = u'No Top file or master_tops data matches found.' + ret[tag_name][u'comment'] = msg return ret matches = self.matches_whitelist(matches, whitelist) self.load_dynamic(matches) if not self._check_pillar(force): - err += ['Pillar failed to render with the following messages:'] - err += self.state.opts['pillar']['_errors'] + err += [u'Pillar failed to render with the following messages:'] + err += self.state.opts[u'pillar'][u'_errors'] else: high, errors = self.render_highstate(matches) if exclude: - if isinstance(exclude, str): - exclude = exclude.split(',') - if '__exclude__' in high: - high['__exclude__'].extend(exclude) + if isinstance(exclude, six.string_types): + exclude = exclude.split(u',') + if u'__exclude__' in high: + high[u'__exclude__'].extend(exclude) else: - high['__exclude__'] = exclude + high[u'__exclude__'] = exclude err += errors if err: return err @@ -3667,18 +3677,20 @@ class BaseHighState(object): return ret cumask = os.umask(0o77) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure cache file isn't read-only - self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet') - with salt.utils.files.fopen(cfn, 'w+b') as fp_: + self.state.functions[u'cmd.run']( + [u'attrib', u'-R', cfn], + python_shell=False, + output_loglevel=u'quiet') + with salt.utils.files.fopen(cfn, u'w+b') as fp_: try: self.serial.dump(high, fp_) except TypeError: # Can't serialize pydsl pass except (IOError, OSError): - msg = 'Unable to write to "state.highstate" cache file {0}' - log.error(msg.format(cfn)) + log.error(u'Unable to write to "state.highstate" cache file %s', cfn) os.umask(cumask) return self.state.call_high(high, orchestration_jid) @@ -3742,23 +3754,23 @@ class BaseHighState(object): for saltenv, states in self.avail.items(): env_usage = { - 'used': [], - 'unused': [], - 'count_all': 0, - 'count_used': 0, - 'count_unused': 0 + u'used': [], + u'unused': [], + u'count_all': 0, + u'count_used': 0, + u'count_unused': 0 } env_matches = matches.get(saltenv) for state in states: - env_usage['count_all'] += 1 + env_usage[u'count_all'] += 1 if state in env_matches: - env_usage['count_used'] += 1 - env_usage['used'].append(state) + env_usage[u'count_used'] += 1 + env_usage[u'used'].append(state) else: - env_usage['count_unused'] += 1 - env_usage['unused'].append(state) + env_usage[u'count_unused'] += 1 + env_usage[u'unused'].append(state) state_usage[saltenv] = env_usage @@ -3783,7 +3795,7 @@ class HighState(BaseHighState): proxy=None, context=None, mocked=False, - loader='states', + loader=u'states', initial_pillar=None): self.opts = opts self.client = salt.fileclient.get_file_client(self.opts) @@ -3841,13 +3853,13 @@ class MasterState(State): ''' Load the modules into the state ''' - log.info('Loading fresh modules for state activity') + log.info(u'Loading fresh modules for state activity') # Load a modified client interface that looks like the interface used # from the minion, but uses remote execution # self.functions = salt.client.FunctionWrapper( self.opts, - self.opts['id'] + self.opts[u'id'] ) # Load the states, but they should not be used in this class apart # from inspection @@ -3865,12 +3877,12 @@ class MasterHighState(HighState): saltenv=None): # Force the fileclient to be local opts = copy.deepcopy(minion_opts) - opts['file_client'] = 'local' - opts['file_roots'] = master_opts['master_roots'] - opts['renderer'] = master_opts['renderer'] - opts['state_top'] = master_opts['state_top'] - opts['id'] = id_ - opts['grains'] = grains + opts[u'file_client'] = u'local' + opts[u'file_roots'] = master_opts[u'master_roots'] + opts[u'renderer'] = master_opts[u'renderer'] + opts[u'state_top'] = master_opts[u'state_top'] + opts[u'id'] = id_ + opts[u'grains'] = grains HighState.__init__(self, opts) @@ -3883,15 +3895,15 @@ class RemoteHighState(object): self.grains = grains self.serial = salt.payload.Serial(self.opts) # self.auth = salt.crypt.SAuth(opts) - self.channel = salt.transport.Channel.factory(self.opts['master_uri']) + self.channel = salt.transport.Channel.factory(self.opts[u'master_uri']) def compile_master(self): ''' Return the state data from the master ''' - load = {'grains': self.grains, - 'opts': self.opts, - 'cmd': '_master_state'} + load = {u'grains': self.grains, + u'opts': self.opts, + u'cmd': u'_master_state'} try: return self.channel.send(load, tries=3, timeout=72000) except SaltReqTimeoutError: diff --git a/salt/states/apache_conf.py b/salt/states/apache_conf.py index 17cef65e1e..e4494ab205 100644 --- a/salt/states/apache_conf.py +++ b/salt/states/apache_conf.py @@ -17,17 +17,17 @@ Enable and disable apache confs. - name: security ''' from __future__ import absolute_import -from salt.ext.six import string_types +from salt.ext import six # Import salt libs -import salt.utils +import salt.utils.path def __virtual__(): ''' Only load if a2enconf is available. ''' - return 'apache_conf' if 'apache.a2enconf' in __salt__ and salt.utils.which('a2enconf') else False + return 'apache_conf' if 'apache.a2enconf' in __salt__ and salt.utils.path.which('a2enconf') else False def enabled(name): @@ -49,14 +49,14 @@ def enabled(name): ret['result'] = None return ret status = __salt__['apache.a2enconf'](name)['Status'] - if isinstance(status, string_types) and 'enabled' in status: + if isinstance(status, six.string_types) and 'enabled' in status: ret['result'] = True ret['changes']['old'] = None ret['changes']['new'] = name else: ret['result'] = False ret['comment'] = 'Failed to enable {0} Apache conf'.format(name) - if isinstance(status, string_types): + if isinstance(status, six.string_types): ret['comment'] = ret['comment'] + ' ({0})'.format(status) return ret else: @@ -83,14 +83,14 @@ def disabled(name): ret['result'] = None return ret status = __salt__['apache.a2disconf'](name)['Status'] - if isinstance(status, string_types) and 'disabled' in status: + if isinstance(status, six.string_types) and 'disabled' in status: ret['result'] = True ret['changes']['old'] = name ret['changes']['new'] = None else: ret['result'] = False ret['comment'] = 'Failed to disable {0} Apache conf'.format(name) - if isinstance(status, string_types): + if isinstance(status, six.string_types): ret['comment'] = ret['comment'] + ' ({0})'.format(status) return ret else: diff --git a/salt/states/archive.py b/salt/states/archive.py index 4acb541e27..c2308cbbd0 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -17,13 +17,16 @@ import tarfile from contextlib import closing # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import shlex_quote as _cmd_quote from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.utils.url from salt.exceptions import CommandExecutionError, CommandNotFoundError @@ -621,7 +624,7 @@ def extracted(name, ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} # Remove pub kwargs as they're irrelevant here. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if not _path_is_abs(name): ret['comment'] = '{0} is not an absolute path'.format(name) @@ -674,7 +677,7 @@ def extracted(name, return ret if user or group: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret['comment'] = \ 'User/group ownership cannot be enforced on Windows minions' return ret @@ -1194,7 +1197,7 @@ def extracted(name, return ret if not os.path.isdir(name): - __salt__['file.makedirs'](name, user=user) + __states__['file.directory'](name, user=user, makedirs=True) created_destdir = True log.debug('Extracting {0} to {1}'.format(cached_source, name)) @@ -1218,6 +1221,7 @@ def extracted(name, options=options, trim_output=trim_output, password=password, + extract_perms=extract_perms, **kwargs) elif archive_format == 'rar': try: @@ -1237,7 +1241,7 @@ def extracted(name, if trim_output: files = files[:trim_output] except tarfile.ReadError: - if salt.utils.which('xz'): + if salt.utils.path.which('xz'): if __salt__['cmd.retcode']( ['xz', '-t', cached_source], python_shell=False, @@ -1295,7 +1299,7 @@ def extracted(name, ) return ret else: - if not salt.utils.which('tar'): + if not salt.utils.path.which('tar'): ret['comment'] = ( 'tar command not available, it might not be ' 'installed on minion' diff --git a/salt/states/artifactory.py b/salt/states/artifactory.py index ddcc7a6707..cf6068824a 100644 --- a/salt/states/artifactory.py +++ b/salt/states/artifactory.py @@ -11,10 +11,10 @@ import logging log = logging.getLogger(__name__) -def downloaded(name, artifact, target_dir='/tmp', target_file=None): +def downloaded(name, artifact, target_dir='/tmp', target_file=None, use_literal_group_id=False): ''' Ensures that the artifact from artifactory exists at given location. If it doesn't exist, then - it will be downloaded. It it already exists then the checksum of existing file is checked against checksum + it will be downloaded. If it already exists then the checksum of existing file is checked against checksum in artifactory. If it is different then the step will fail. artifact @@ -84,7 +84,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None): 'comment': ''} try: - fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file) + fetch_result = __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id) except Exception as exc: ret['result'] = False ret['comment'] = str(exc) @@ -100,7 +100,7 @@ def downloaded(name, artifact, target_dir='/tmp', target_file=None): return ret -def __fetch_from_artifactory(artifact, target_dir, target_file): +def __fetch_from_artifactory(artifact, target_dir, target_file, use_literal_group_id): if ('latest_snapshot' in artifact and artifact['latest_snapshot']) or artifact['version'] == 'latest_snapshot': fetch_result = __salt__['artifactory.get_latest_snapshot'](artifactory_url=artifact['artifactory_url'], repository=artifact['repository'], @@ -111,7 +111,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file): target_dir=target_dir, target_file=target_file, username=artifact['username'] if 'username' in artifact else None, - password=artifact['password'] if 'password' in artifact else None) + password=artifact['password'] if 'password' in artifact else None, + use_literal_group_id=use_literal_group_id) elif artifact['version'].endswith('SNAPSHOT'): fetch_result = __salt__['artifactory.get_snapshot'](artifactory_url=artifact['artifactory_url'], repository=artifact['repository'], @@ -123,7 +124,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file): target_dir=target_dir, target_file=target_file, username=artifact['username'] if 'username' in artifact else None, - password=artifact['password'] if 'password' in artifact else None) + password=artifact['password'] if 'password' in artifact else None, + use_literal_group_id=use_literal_group_id) elif artifact['version'] == 'latest': fetch_result = __salt__['artifactory.get_latest_release'](artifactory_url=artifact['artifactory_url'], repository=artifact['repository'], @@ -134,7 +136,8 @@ def __fetch_from_artifactory(artifact, target_dir, target_file): target_dir=target_dir, target_file=target_file, username=artifact['username'] if 'username' in artifact else None, - password=artifact['password'] if 'password' in artifact else None) + password=artifact['password'] if 'password' in artifact else None, + use_literal_group_id=use_literal_group_id) else: fetch_result = __salt__['artifactory.get_release'](artifactory_url=artifact['artifactory_url'], repository=artifact['repository'], @@ -146,5 +149,6 @@ def __fetch_from_artifactory(artifact, target_dir, target_file): target_dir=target_dir, target_file=target_file, username=artifact['username'] if 'username' in artifact else None, - password=artifact['password'] if 'password' in artifact else None) + password=artifact['password'] if 'password' in artifact else None, + use_literal_group_id=use_literal_group_id) return fetch_result diff --git a/salt/states/augeas.py b/salt/states/augeas.py index b18153c96a..60759a79af 100644 --- a/salt/states/augeas.py +++ b/salt/states/augeas.py @@ -36,7 +36,7 @@ import logging import difflib # Import Salt libs -import salt.utils +import salt.utils.args import salt.utils.files from salt.modules.augeas_cfg import METHOD_MAP @@ -75,7 +75,7 @@ def _check_filepath(changes): error = 'Command {0} is not supported (yet)'.format(cmd) raise ValueError(error) method = METHOD_MAP[cmd] - parts = salt.utils.shlex_split(arg) + parts = salt.utils.args.shlex_split(arg) if method in ['set', 'setm', 'move', 'remove']: filename_ = parts[0] else: diff --git a/salt/states/beacon.py b/salt/states/beacon.py index 5b9730c2b9..7398bf757a 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -10,23 +10,25 @@ Management of the Salt beacons ps: beacon.present: - enable: False - - salt-master: running - - apache2: stopped + - services: + salt-master: running + apache2: stopped sh: - beacon.present: + beacon.present: [] load: beacon.present: - - 1m: - - 0.0 - - 2.0 - - 5m: - - 0.0 - - 1.5 - - 15m: - - 0.1 - - 1.0 + - averages: + 1m: + - 0.0 + - 2.0 + 5m: + - 0.0 + - 1.5 + 15m: + - 0.1 + - 1.0 ''' from __future__ import absolute_import @@ -50,7 +52,7 @@ def present(name, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False) - beacon_data = kwargs + beacon_data = [kwargs] if name in current_beacons: @@ -59,11 +61,11 @@ def present(name, else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True - result = __salt__['beacons.modify'](name, beacon_data, **kwargs) + result = __salt__['beacons.modify'](name, beacon_data) ret['comment'].append(result['comment']) ret['changes'] = result['changes'] else: - result = __salt__['beacons.modify'](name, beacon_data, **kwargs) + result = __salt__['beacons.modify'](name, beacon_data) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] @@ -77,10 +79,10 @@ def present(name, else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True - result = __salt__['beacons.add'](name, beacon_data, **kwargs) + result = __salt__['beacons.add'](name, beacon_data) ret['comment'].append(result['comment']) else: - result = __salt__['beacons.add'](name, beacon_data, **kwargs) + result = __salt__['beacons.add'](name, beacon_data) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] diff --git a/salt/states/bigip.py b/salt/states/bigip.py index 8a575b0d1d..11247b3618 100644 --- a/salt/states/bigip.py +++ b/salt/states/bigip.py @@ -11,7 +11,7 @@ from __future__ import absolute_import import json # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six #set up virtual function @@ -85,7 +85,7 @@ def _check_for_changes(entity_type, ret, existing, modified): if 'generation' in existing['content'].keys(): del existing['content']['generation'] - if cmp(modified['content'], existing['content']) == 0: + if modified['content'] == existing['content']: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ @@ -94,7 +94,7 @@ def _check_for_changes(entity_type, ret, existing, modified): ret['changes']['new'] = modified['content'] else: - if cmp(modified, existing) == 0: + if modified == existing: ret['comment'] = '{entity_type} is currently enforced to the desired state. No changes made.'.format(entity_type=entity_type) else: ret['comment'] = '{entity_type} was enforced to the desired state. Note: Only parameters specified ' \ diff --git a/salt/states/blockdev.py b/salt/states/blockdev.py index 4b0dc5ca81..6866c0c26b 100644 --- a/salt/states/blockdev.py +++ b/salt/states/blockdev.py @@ -29,7 +29,7 @@ import time import logging # Import salt libs -import salt.utils +import salt.utils.path from salt.ext.six.moves import range __virtualname__ = 'blockdev' @@ -150,7 +150,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs): if current_fs == fs_type: ret['result'] = True return ret - elif not salt.utils.which('mkfs.{0}'.format(fs_type)): + elif not salt.utils.path.which('mkfs.{0}'.format(fs_type)): ret['comment'] = 'Invalid fs_type: {0}'.format(fs_type) ret['result'] = False return ret diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py index 0a49dcc3fe..d55ca48bb4 100644 --- a/salt/states/boto3_route53.py +++ b/salt/states/boto3_route53.py @@ -70,7 +70,8 @@ import uuid # Import Salt Libs import salt.utils.dictupdate as dictupdate -from salt.utils import SaltInvocationError, exactly_one +from salt.utils import exactly_one +from salt.exceptions import SaltInvocationError import logging log = logging.getLogger(__name__) # pylint: disable=W1699 diff --git a/salt/states/boto3_sns.py b/salt/states/boto3_sns.py index 47f88fc377..720277e538 100644 --- a/salt/states/boto3_sns.py +++ b/salt/states/boto3_sns.py @@ -60,7 +60,7 @@ import re import logging import json import copy -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/boto_apigateway.py b/salt/states/boto_apigateway.py index 154fc5e7fd..7288230144 100644 --- a/salt/states/boto_apigateway.py +++ b/salt/states/boto_apigateway.py @@ -56,7 +56,7 @@ import re import json import yaml # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.utils.yamlloader import SaltYamlSafeLoader diff --git a/salt/states/boto_asg.py b/salt/states/boto_asg.py index e8260a688d..73305ae44e 100644 --- a/salt/states/boto_asg.py +++ b/salt/states/boto_asg.py @@ -200,7 +200,7 @@ import copy # Import Salt libs import salt.utils.dictupdate as dictupdate -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) diff --git a/salt/states/boto_cfn.py b/salt/states/boto_cfn.py index fcc7375464..3db51d7579 100644 --- a/salt/states/boto_cfn.py +++ b/salt/states/boto_cfn.py @@ -37,13 +37,16 @@ Connection module for Amazon Cloud Formation - name: mystack ''' -from __future__ import absolute_import - # Import Python libs +from __future__ import absolute_import import logging import json -# Import 3rd party libs +# Import Salt libs +import salt.utils.compat +from salt.ext import six + +# Import 3rd-party libs try: from salt._compat import ElementTree as ET HAS_ELEMENT_TREE = True @@ -141,10 +144,14 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi stack_policy_body = _get_template(stack_policy_body, name) stack_policy_during_update_body = _get_template(stack_policy_during_update_body, name) + for i in [template_body, stack_policy_body, stack_policy_during_update_body]: + if isinstance(i, dict): + return i + _valid = _validate(template_body, template_url, region, key, keyid, profile) log.debug('Validate is : {0}.'.format(_valid)) if _valid is not True: - code, message = _get_error(_valid) + code, message = _valid ret['result'] = False ret['comment'] = 'Template could not be validated.\n{0} \n{1}'.format(code, message) return ret @@ -154,7 +161,7 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi template = template['GetTemplateResponse']['GetTemplateResult']['TemplateBody'].encode('ascii', 'ignore') template = json.loads(template) _template_body = json.loads(template_body) - compare = cmp(template, _template_body) + compare = salt.utils.compat.cmp(template, _template_body) if compare != 0: log.debug('Templates are not the same. Compare value is {0}'.format(compare)) # At this point we should be able to run update safely since we already validated the template @@ -169,7 +176,7 @@ def present(name, template_body=None, template_url=None, parameters=None, notifi stack_policy_during_update_url, stack_policy_body, stack_policy_url, region, key, keyid, profile) - if isinstance(updated, str): + if isinstance(updated, six.string_types): code, message = _get_error(updated) log.debug('Update error is {0} and message is {1}'.format(code, message)) ret['result'] = False @@ -221,7 +228,7 @@ def absent(name, region=None, key=None, keyid=None, profile=None): ret['result'] = None return ret deleted = __salt__['boto_cfn.delete'](name, region, key, keyid, profile) - if isinstance(deleted, str): + if isinstance(deleted, six.string_types): code, message = _get_error(deleted) ret['comment'] = 'Stack {0} could not be deleted.\n{1}\n{2}'.format(name, code, message) ret['result'] = False @@ -250,8 +257,8 @@ def _get_template(template, name): def _validate(template_body=None, template_url=None, region=None, key=None, keyid=None, profile=None): # Validates template. returns true if template syntax is correct. validate = __salt__['boto_cfn.validate_template'](template_body, template_url, region, key, keyid, profile) - log.debug('Validate is result is {0}.'.format(str(validate))) - if isinstance(validate, str): + log.debug('Validate result is {0}.'.format(str(validate))) + if isinstance(validate, six.string_types): code, message = _get_error(validate) log.debug('Validate error is {0} and message is {1}.'.format(code, message)) return code, message diff --git a/salt/states/boto_cloudtrail.py b/salt/states/boto_cloudtrail.py index e79823faf9..589c1d6a5b 100644 --- a/salt/states/boto_cloudtrail.py +++ b/salt/states/boto_cloudtrail.py @@ -59,7 +59,7 @@ import os import os.path # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils log = logging.getLogger(__name__) diff --git a/salt/states/boto_cloudwatch_alarm.py b/salt/states/boto_cloudwatch_alarm.py index 19e4825694..a543396335 100644 --- a/salt/states/boto_cloudwatch_alarm.py +++ b/salt/states/boto_cloudwatch_alarm.py @@ -57,7 +57,7 @@ as a passed in dict, or as a string to pull from pillars or minion config: from __future__ import absolute_import # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): diff --git a/salt/states/boto_cloudwatch_event.py b/salt/states/boto_cloudwatch_event.py index 6a799e776c..3faddf7347 100644 --- a/salt/states/boto_cloudwatch_event.py +++ b/salt/states/boto_cloudwatch_event.py @@ -60,7 +60,7 @@ import os.path import json # Import Salt Libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/boto_datapipeline.py b/salt/states/boto_datapipeline.py index 85f97882cd..f29c3bcbc4 100644 --- a/salt/states/boto_datapipeline.py +++ b/salt/states/boto_datapipeline.py @@ -57,7 +57,7 @@ import difflib import json # Import Salt lobs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip diff --git a/salt/states/boto_dynamodb.py b/salt/states/boto_dynamodb.py index 8ac818c99f..5a2d2f7289 100644 --- a/salt/states/boto_dynamodb.py +++ b/salt/states/boto_dynamodb.py @@ -163,7 +163,7 @@ import logging import copy # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.dictupdate as dictupdate logging.basicConfig( diff --git a/salt/states/boto_ec2.py b/salt/states/boto_ec2.py index 5a543a9e25..1cacd624d9 100644 --- a/salt/states/boto_ec2.py +++ b/salt/states/boto_ec2.py @@ -57,7 +57,7 @@ import logging from time import time, sleep # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils import salt.utils.dictupdate as dictupdate diff --git a/salt/states/boto_elasticsearch_domain.py b/salt/states/boto_elasticsearch_domain.py index af4537ffd5..2e544a655b 100644 --- a/salt/states/boto_elasticsearch_domain.py +++ b/salt/states/boto_elasticsearch_domain.py @@ -86,7 +86,7 @@ import os.path import json # Import Salt Libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/boto_elb.py b/salt/states/boto_elb.py index b93c9dd6eb..0b9357f79d 100644 --- a/salt/states/boto_elb.py +++ b/salt/states/boto_elb.py @@ -245,7 +245,7 @@ import re import salt.utils.dictupdate as dictupdate from salt.utils import exactly_one from salt.exceptions import SaltInvocationError -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 6c8f2db44a..4bf0e12bac 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -40,7 +40,9 @@ from __future__ import absolute_import import logging import copy -# Import Salt Libs +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -83,7 +85,7 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, changes = False newhealth_mock = copy.copy(health) - if isinstance(targets, str): + if isinstance(targets, six.string_types): targets = [targets] for target in targets: @@ -154,7 +156,7 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None, failure = False changes = False newhealth_mock = copy.copy(health) - if isinstance(targets, str): + if isinstance(targets, six.string_types): targets = [targets] for target in targets: if target not in health or health.get(target) == "draining": diff --git a/salt/states/boto_iam.py b/salt/states/boto_iam.py index a2f980bcdc..b06dcacdb1 100644 --- a/salt/states/boto_iam.py +++ b/salt/states/boto_iam.py @@ -142,7 +142,7 @@ import salt.utils import salt.utils.files import salt.utils.odict as odict import salt.utils.dictupdate as dictupdate -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Import 3rd party libs @@ -274,7 +274,7 @@ def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile= if profile_deleted: ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} login profile is deleted.'.format(name)]) if __opts__['test']: - ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} policies are set to be deleted.'.format(name)]) + ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} managed policies are set to be detached.'.format(name)]) ret['result'] = None else: _ret = _user_policies_detached(name, region, key, keyid, profile) @@ -283,6 +283,16 @@ def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile= ret['result'] = _ret['result'] if ret['result'] is False: return ret + if __opts__['test']: + ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} inline policies are set to be deleted.'.format(name)]) + ret['result'] = None + else: + _ret = _user_policies_deleted(name, region, key, keyid, profile) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + if ret['result'] is False: + return ret # finally, actually delete the user if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} is set to be deleted.'.format(name)]) @@ -350,7 +360,7 @@ def keys_present(name, number, save_dir, region=None, key=None, keyid=None, prof return ret keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key, keyid=keyid, profile=profile) - if isinstance(keys, str): + if isinstance(keys, six.string_types): log.debug('keys are : false {0}'.format(keys)) error, message = _get_error(keys) ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message) @@ -369,7 +379,7 @@ def keys_present(name, number, save_dir, region=None, key=None, keyid=None, prof new_keys = {} for i in range(number-len(keys)): created = __salt__['boto_iam.create_access_key'](name, region, key, keyid, profile) - if isinstance(created, str): + if isinstance(created, six.string_types): error, message = _get_error(created) ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message) ret['result'] = False @@ -436,7 +446,7 @@ def _delete_key(ret, access_key_id, user_name, region=None, key=None, keyid=None keys = __salt__['boto_iam.get_all_access_keys'](user_name=user_name, region=region, key=key, keyid=keyid, profile=profile) log.debug('Keys for user {1} are : {0}.'.format(keys, user_name)) - if isinstance(keys, str): + if isinstance(keys, six.string_types): log.debug('Keys {0} are a string. Something went wrong.'.format(keys)) ret['comment'] = ' '.join([ret['comment'], 'Key {0} could not be deleted.'.format(access_key_id)]) return ret @@ -738,7 +748,49 @@ def _user_policies_detached( newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} msg = '{0} policies detached from user {1}.' - ret['comment'] = msg.format(', '.join(newpolicies), name) + ret['comment'] = msg.format(', '.join(oldpolicies), name) + return ret + + +def _user_policies_deleted( + name, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + oldpolicies = __salt__['boto_iam.get_all_user_policies'](user_name=name, + region=region, key=key, keyid=keyid, profile=profile) + if not oldpolicies: + msg = 'No inline policies in user {0}.'.format(name) + ret['comment'] = msg + return ret + if __opts__['test']: + msg = '{0} policies to be deleted from user {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'inline_policies': oldpolicies} + for policy_name in oldpolicies: + policy_deleted = __salt__['boto_iam.delete_user_policy'](name, + policy_name, + region=region, key=key, + keyid=keyid, + profile=profile) + if not policy_deleted: + newpolicies = __salt__['boto_iam.get_all_user_policies'](name, region=region, + key=key, keyid=keyid, + profile=profile) + ret['changes']['new'] = {'inline_policies': newpolicies} + ret['result'] = False + msg = 'Failed to detach {0} from user {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + newpolicies = __salt__['boto_iam.get_all_user_policies'](name, region=region, key=key, + keyid=keyid, profile=profile) + ret['changes']['new'] = {'inline_policies': newpolicies} + msg = '{0} policies deleted from user {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) return ret @@ -790,7 +842,7 @@ def group_absent(name, region=None, key=None, keyid=None, profile=None): ret['comment'] = 'IAM Group {0} does not exist.'.format(name) return ret if __opts__['test']: - ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} policies are set to be deleted.'.format(name)]) + ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} managed policies are set to be detached.'.format(name)]) ret['result'] = None else: _ret = _group_policies_detached(name, region, key, keyid, profile) @@ -799,6 +851,16 @@ def group_absent(name, region=None, key=None, keyid=None, profile=None): ret['result'] = _ret['result'] if ret['result'] is False: return ret + if __opts__['test']: + ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} inline policies are set to be deleted.'.format(name)]) + ret['result'] = None + else: + _ret = _group_policies_deleted(name, region, key, keyid, profile) + ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) + if not _ret['result']: + ret['result'] = _ret['result'] + if ret['result'] is False: + return ret ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} users are set to be removed.'.format(name)]) existing_users = __salt__['boto_iam.get_group_members'](group_name=name, region=region, key=key, keyid=keyid, profile=profile) _ret = _case_group(ret, [], name, existing_users, region, key, keyid, profile) @@ -1152,6 +1214,48 @@ def _group_policies_detached( return ret +def _group_policies_deleted( + name, + region=None, + key=None, + keyid=None, + profile=None): + ret = {'result': True, 'comment': '', 'changes': {}} + oldpolicies = __salt__['boto_iam.get_all_group_policies'](group_name=name, + region=region, key=key, keyid=keyid, profile=profile) + if not oldpolicies: + msg = 'No inline policies in group {0}.'.format(name) + ret['comment'] = msg + return ret + if __opts__['test']: + msg = '{0} policies to be deleted from group {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + ret['result'] = None + return ret + ret['changes']['old'] = {'inline_policies': oldpolicies} + for policy_name in oldpolicies: + policy_deleted = __salt__['boto_iam.delete_group_policy'](name, + policy_name, + region=region, key=key, + keyid=keyid, + profile=profile) + if not policy_deleted: + newpolicies = __salt__['boto_iam.get_all_group_policies'](name, region=region, + key=key, keyid=keyid, + profile=profile) + ret['changes']['new'] = {'inline_policies': newpolicies} + ret['result'] = False + msg = 'Failed to detach {0} from group {1}' + ret['comment'] = msg.format(policy_name, name) + return ret + newpolicies = __salt__['boto_iam.get_all_group_policies'](name, region=region, key=key, + keyid=keyid, profile=profile) + ret['changes']['new'] = {'inline_policies': newpolicies} + msg = '{0} policies deleted from group {1}.' + ret['comment'] = msg.format(', '.join(oldpolicies), name) + return ret + + def account_policy(name=None, allow_users_to_change_password=None, hard_expiry=None, max_password_age=None, minimum_password_length=None, password_reuse_prevention=None, diff --git a/salt/states/boto_iam_role.py b/salt/states/boto_iam_role.py index 84b0a24343..26faff5e36 100644 --- a/salt/states/boto_iam_role.py +++ b/salt/states/boto_iam_role.py @@ -89,7 +89,7 @@ on the IAM role to be persistent. This functionality was added in 2015.8.0. from __future__ import absolute_import import logging import salt.utils.dictupdate as dictupdate -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/boto_lambda.py b/salt/states/boto_lambda.py index 7c24f94ab2..e190a74d59 100644 --- a/salt/states/boto_lambda.py +++ b/salt/states/boto_lambda.py @@ -69,7 +69,7 @@ import hashlib import json # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils.dictupdate as dictupdate import salt.utils import salt.utils.files diff --git a/salt/states/boto_route53.py b/salt/states/boto_route53.py index dddbc2705b..930790660d 100644 --- a/salt/states/boto_route53.py +++ b/salt/states/boto_route53.py @@ -76,7 +76,8 @@ from __future__ import absolute_import import json # Import Salt Libs -from salt.utils import SaltInvocationError, exactly_one +from salt.utils import exactly_one +from salt.exceptions import SaltInvocationError import logging log = logging.getLogger(__name__) diff --git a/salt/states/boto_sqs.py b/salt/states/boto_sqs.py index a97b37ba5d..8a33e078ee 100644 --- a/salt/states/boto_sqs.py +++ b/salt/states/boto_sqs.py @@ -58,9 +58,15 @@ passed in as a dict, or as a string to pull from pillars or minion config: key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' from __future__ import absolute_import -import salt.ext.six as six -import logging + +# Import Python libs +import difflib import json +import logging +import yaml + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -78,7 +84,8 @@ def present( region=None, key=None, keyid=None, - profile=None): + profile=None, +): ''' Ensure the SQS queue exists. @@ -101,68 +108,143 @@ def present( A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' - ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} + comments = [] + ret = {'name': name, 'result': True, 'changes': {}} - is_present = __salt__['boto_sqs.exists'](name, region, key, keyid, profile) + r = __salt__['boto_sqs.exists']( + name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: + ret['result'] = False + ret['comment'] = '\n'.join(comments + [str(r['error'])]) + return ret - if not is_present: + if r['result']: + comments.append('SQS queue {0} present.'.format(name)) + else: if __opts__['test']: - msg = 'AWS SQS queue {0} is set to be created.'.format(name) - ret['comment'] = msg ret['result'] = None + comments.append('SQS queue {0} is set to be created.'.format(name)) + ret['pchanges'] = {'old': None, 'new': name} + ret['comment'] = '\n'.join(comments) return ret - created = __salt__['boto_sqs.create'](name, region, key, keyid, - profile) - if created: - ret['changes']['old'] = None - ret['changes']['new'] = {'queue': name} - else: + + r = __salt__['boto_sqs.create']( + name, + attributes=attributes, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: ret['result'] = False - ret['comment'] = 'Failed to create {0} AWS queue'.format(name) + comments.append('Failed to create SQS queue {0}: {1}'.format( + name, + str(r['error']), + )) + ret['comment'] = '\n'.join(comments) return ret - else: - ret['comment'] = '{0} present.'.format(name) + + comments.append('SQS queue {0} created.'.format(name)) + ret['changes']['old'] = None + ret['changes']['new'] = name + # Return immediately, as the create call also set all attributes + ret['comment'] = '\n'.join(comments) + return ret + + if not attributes: + ret['comment'] = '\n'.join(comments) + return ret + + r = __salt__['boto_sqs.get_attributes']( + name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: + ret['result'] = False + comments.append('Failed to get queue attributes: {0}'.format( + str(r['error']), + )) + ret['comment'] = '\n'.join(comments) + return ret + current_attributes = r['result'] + attrs_to_set = {} - _attributes = __salt__['boto_sqs.get_attributes'](name, region, key, keyid, - profile) - if attributes: - for attr, val in six.iteritems(attributes): - _val = _attributes.get(attr, None) - if attr == 'Policy': - # Normalize these guys by brute force.. - if isinstance(_val, six.string_types): - _val = json.loads(_val) - if isinstance(val, six.string_types): - val = json.loads(val) - if _val != val: - log.debug('Policies differ:\n{0}\n{1}'.format(_val, val)) - attrs_to_set[attr] = json.dumps(val, sort_keys=True) - elif str(_val) != str(val): - log.debug('Attributes differ:\n{0}\n{1}'.format(_val, val)) - attrs_to_set[attr] = val - attr_names = ','.join(attrs_to_set) - if attrs_to_set: - if __opts__['test']: - ret['comment'] = 'Attribute(s) {0} to be set on {1}.'.format( - attr_names, name) - ret['result'] = None - return ret - msg = (' Setting {0} attribute(s).'.format(attr_names)) - ret['comment'] = ret['comment'] + msg - if 'new' in ret['changes']: - ret['changes']['new']['attributes_set'] = [] - else: - ret['changes']['new'] = {'attributes_set': []} - for attr, val in six.iteritems(attrs_to_set): - set_attr = __salt__['boto_sqs.set_attributes'](name, {attr: val}, - region, key, keyid, - profile) - if not set_attr: - ret['result'] = False - msg = 'Set attribute {0}.'.format(attr) - ret['changes']['new']['attributes_set'].append(attr) - else: - ret['comment'] = ret['comment'] + ' Attributes set.' + for attr, val in six.iteritems(attributes): + _val = current_attributes.get(attr, None) + if attr == 'Policy': + # Normalize by brute force + if isinstance(_val, six.string_types): + _val = json.loads(_val) + if isinstance(val, six.string_types): + val = json.loads(val) + if _val != val: + log.debug('Policies differ:\n{0}\n{1}'.format(_val, val)) + attrs_to_set[attr] = json.dumps(val, sort_keys=True) + elif str(_val) != str(val): + log.debug('Attributes differ:\n{0}\n{1}'.format(_val, val)) + attrs_to_set[attr] = val + attr_names = ', '.join(attrs_to_set) + + if not attrs_to_set: + comments.append('Queue attributes already set correctly.') + ret['comment'] = '\n'.join(comments) + return ret + + final_attributes = current_attributes.copy() + final_attributes.update(attrs_to_set) + + def _yaml_safe_dump(attrs): + '''Safely dump YAML using a readable flow style''' + dumper_name = 'IndentedSafeOrderedDumper' + dumper = __utils__['yamldumper.get_dumper'](dumper_name) + return yaml.dump( + attrs, + default_flow_style=False, + Dumper=dumper, + ) + attributes_diff = ''.join(difflib.unified_diff( + _yaml_safe_dump(current_attributes).splitlines(True), + _yaml_safe_dump(final_attributes).splitlines(True), + )) + + if __opts__['test']: + ret['result'] = None + comments.append('Attribute(s) {0} set to be updated:'.format( + attr_names, + )) + comments.append(attributes_diff) + ret['pchanges'] = {'attributes': {'diff': attributes_diff}} + ret['comment'] = '\n'.join(comments) + return ret + + r = __salt__['boto_sqs.set_attributes']( + name, + attrs_to_set, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: + ret['result'] = False + comments.append('Failed to set queue attributes: {0}'.format( + str(r['error']), + )) + ret['comment'] = '\n'.join(comments) + return ret + + comments.append('Updated SQS queue attribute(s) {0}.'.format(attr_names)) + ret['changes']['attributes'] = {'diff': attributes_diff} + ret['comment'] = '\n'.join(comments) return ret @@ -171,7 +253,8 @@ def absent( region=None, key=None, keyid=None, - profile=None): + profile=None, +): ''' Ensure the named sqs queue is deleted. @@ -193,23 +276,44 @@ def absent( ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - is_present = __salt__['boto_sqs.exists'](name, region, key, keyid, profile) + r = __salt__['boto_sqs.exists']( + name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: + ret['result'] = False + ret['comment'] = str(r['error']) + return ret - if is_present: - if __opts__['test']: - ret['comment'] = 'AWS SQS queue {0} is set to be removed.'.format( - name) - ret['result'] = None - return ret - deleted = __salt__['boto_sqs.delete'](name, region, key, keyid, - profile) - if deleted: - ret['changes']['old'] = name - ret['changes']['new'] = None - else: - ret['result'] = False - ret['comment'] = 'Failed to delete {0} sqs queue.'.format(name) - else: - ret['comment'] = '{0} does not exist in {1}.'.format(name, region) + if not r['result']: + ret['comment'] = 'SQS queue {0} does not exist in {1}.'.format( + name, + region + ) + return ret + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'SQS queue {0} is set to be removed.'.format(name) + ret['pchanges'] = {'old': name, 'new': None} + return ret + + r = __salt__['boto_sqs.delete']( + name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in r: + ret['result'] = False + ret['comment'] = str(r['error']) + return ret + + ret['comment'] = 'SQS queue {0} was deleted.'.format(name) + ret['changes']['old'] = name + ret['changes']['new'] = None return ret diff --git a/salt/states/boto_vpc.py b/salt/states/boto_vpc.py index b289789670..238431f2bb 100644 --- a/salt/states/boto_vpc.py +++ b/salt/states/boto_vpc.py @@ -147,7 +147,7 @@ from __future__ import absolute_import import logging # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils.dictupdate as dictupdate __virtualname__ = 'boto_vpc' diff --git a/salt/states/bower.py b/salt/states/bower.py index ffcbb9c8a3..848f97d926 100644 --- a/salt/states/bower.py +++ b/salt/states/bower.py @@ -36,7 +36,7 @@ from __future__ import absolute_import from salt.exceptions import CommandExecutionError, CommandNotFoundError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): diff --git a/salt/states/cabal.py b/salt/states/cabal.py index 620b0cd160..8db7860919 100644 --- a/salt/states/cabal.py +++ b/salt/states/cabal.py @@ -26,15 +26,15 @@ pkg.installed state for the package which provides cabal from __future__ import absolute_import from salt.exceptions import CommandExecutionError, CommandNotFoundError -import salt.utils +import salt.utils.path def __virtual__(): ''' Only work when cabal-install is installed. ''' - return (salt.utils.which('cabal') is not None) and \ - (salt.utils.which('ghc-pkg') is not None) + return (salt.utils.path.which('cabal') is not None) and \ + (salt.utils.path.which('ghc-pkg') is not None) def _parse_pkg_string(pkg): diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index 79c69048ef..d83f9bddd3 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -92,7 +92,7 @@ def installed(name, version=None, source=None, force=False, pre_versions=False, # Determine action # Package not installed - if name not in [package.split('|')[0].lower() for package in pre_install.splitlines()]: + if name.lower() not in [package.lower() for package in pre_install.keys()]: if version: ret['changes'] = {name: 'Version {0} will be installed' ''.format(version)} @@ -193,9 +193,13 @@ def uninstalled(name, version=None, uninstall_args=None, override_args=False): pre_uninstall = __salt__['chocolatey.list'](local_only=True) # Determine if package is installed - if name in [package.split('|')[0].lower() for package in pre_uninstall.splitlines()]: - ret['changes'] = {name: '{0} version {1} will be removed' - ''.format(name, pre_uninstall[name][0])} + if name.lower() in [package.lower() for package in pre_uninstall.keys()]: + try: + ret['changes'] = {name: '{0} version {1} will be removed' + ''.format(name, pre_uninstall[name][0])} + except KeyError: + ret['changes'] = {name: '{0} will be removed' + ''.format(name)} else: ret['comment'] = 'The package {0} is not installed'.format(name) return ret diff --git a/salt/states/cisconso.py b/salt/states/cisconso.py index eb26cfaf33..7622a3102d 100644 --- a/salt/states/cisconso.py +++ b/salt/states/cisconso.py @@ -8,6 +8,12 @@ For documentation on setting up the cisconso proxy minion look in the documentat for :mod:`salt.proxy.cisconso `. ''' +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +import salt.utils.compat + def __virtual__(): return 'cisconso.set_data_value' in __salt__ @@ -53,7 +59,7 @@ def value_present(name, datastore, path, config): existing = __salt__['cisconso.get_data'](datastore, path) - if cmp(existing, config): + if salt.utils.compat.cmp(existing, config): ret['result'] = True ret['comment'] = 'Config is already set' diff --git a/salt/states/cloud.py b/salt/states/cloud.py index 299bc2f35c..773e589e0d 100644 --- a/salt/states/cloud.py +++ b/salt/states/cloud.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import pprint # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import Salt Libs import salt.utils.cloud as suc diff --git a/salt/states/cmd.py b/salt/states/cmd.py index 0c847b5a74..7b7ef0aecd 100644 --- a/salt/states/cmd.py +++ b/salt/states/cmd.py @@ -241,6 +241,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.args from salt.exceptions import CommandExecutionError, SaltRenderError from salt.ext.six import string_types @@ -276,7 +277,7 @@ def _reinterpreted_state(state): out = out[idx + 1:] data = {} try: - for item in salt.utils.shlex_split(out): + for item in salt.utils.args.shlex_split(out): key, val = item.split('=') data[key] = val except ValueError: @@ -838,7 +839,7 @@ def run(name, ret['changes'] = cmd_all ret['result'] = not bool(cmd_all['retcode']) - ret['comment'] = 'Command "{0}" run'.format(name) + ret['comment'] = u'Command "{0}" run'.format(name) # Ignore timeout errors if asked (for nohups) and treat cmd as a success if ignore_timeout: diff --git a/salt/states/cron.py b/salt/states/cron.py index 0f788bb395..d936762514 100644 --- a/salt/states/cron.py +++ b/salt/states/cron.py @@ -150,6 +150,13 @@ from salt.modules.cron import ( ) +def __virtual__(): + if 'cron.list_tab' in __salt__: + return True + else: + return (False, 'cron module could not be loaded') + + def _check_cron(user, cmd, minute=None, diff --git a/salt/states/debconfmod.py b/salt/states/debconfmod.py index 67c60a1da9..8eb5961074 100644 --- a/salt/states/debconfmod.py +++ b/salt/states/debconfmod.py @@ -38,7 +38,7 @@ set_file the values in the ``data`` dict must be indented four spaces instead of two. ''' from __future__ import absolute_import -import salt.ext.six as six +from salt.ext import six # Define the module's virtual name diff --git a/salt/states/dellchassis.py b/salt/states/dellchassis.py index 3f6d1a86a9..398cebaddf 100644 --- a/salt/states/dellchassis.py +++ b/salt/states/dellchassis.py @@ -160,7 +160,7 @@ import logging import os # Import Salt lobs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError # Get logging started diff --git a/salt/states/docker.py b/salt/states/docker.py index 962887445c..cb50617861 100644 --- a/salt/states/docker.py +++ b/salt/states/docker.py @@ -60,7 +60,8 @@ import copy import logging # Import salt libs -import salt.utils +import salt.utils.args +import salt.utils.versions # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -87,14 +88,14 @@ def running(name, **kwargs): ''' ret = __states__['docker_container.running']( name, - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.running state has been renamed to ' 'docker_container.running. To get rid of this warning, update your ' 'SLS to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -106,14 +107,14 @@ def stopped(**kwargs): `. ''' ret = __states__['docker_container.stopped']( - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.stopped state has been renamed to ' 'docker_container.stopped. To get rid of this warning, update your ' 'SLS to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -126,14 +127,14 @@ def absent(name, **kwargs): ''' ret = __states__['docker_container.absent']( name, - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.absent state has been renamed to ' 'docker_container.absent. To get rid of this warning, update your ' 'SLS to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -146,14 +147,14 @@ def network_present(name, **kwargs): ''' ret = __states__['docker_network.present']( name, - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.network_present state has been renamed to ' 'docker_network.present. To get rid of this warning, update your SLS ' 'to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -166,14 +167,14 @@ def network_absent(name, **kwargs): ''' ret = __states__['docker_network.absent']( name, - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.network_absent state has been renamed to ' 'docker_network.absent. To get rid of this warning, update your SLS ' 'to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -186,14 +187,14 @@ def image_present(name, **kwargs): ''' ret = __states__['docker_image.present']( name, - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.image_present state has been renamed to ' 'docker_image.present. To get rid of this warning, update your SLS ' 'to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -205,14 +206,14 @@ def image_absent(**kwargs): `. ''' ret = __states__['docker_image.absent']( - **salt.utils.clean_kwargs(**kwargs) + **salt.utils.args.clean_kwargs(**kwargs) ) msg = ( 'The docker.image_absent state has been renamed to ' 'docker_image.absent. To get rid of this warning, update your SLS to ' 'use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -232,7 +233,7 @@ def volume_present(name, driver=None, driver_opts=None, force=False): 'docker_volume.present. To get rid of this warning, update your SLS ' 'to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -249,7 +250,7 @@ def volume_absent(name, driver=None): 'docker_volume.absent. To get rid of this warning, update your SLS ' 'to use the new name.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret diff --git a/salt/states/docker_container.py b/salt/states/docker_container.py index 1bfc8cf489..3e5a7646dc 100644 --- a/salt/states/docker_container.py +++ b/salt/states/docker_container.py @@ -52,8 +52,9 @@ import logging from salt.exceptions import CommandExecutionError import copy import salt.utils +import salt.utils.args import salt.utils.docker -import salt.ext.six as six +from salt.ext import six # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -145,7 +146,7 @@ def running(name, .. _docker-container-running-skip-translate: skip_translate - This function translates Salt CLI input into the format which + This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular @@ -677,24 +678,14 @@ def running(name, - foo2.domain.tld domainname - Set custom DNS search domains. Can be expressed as a comma-separated - list or a YAML list. The below two examples are equivalent: + The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - - dommainname: domain.tld,domain2.tld - - .. code-block:: yaml - - foo: - docker_container.running: - - image: bar/baz:latest - - dommainname: - - domain.tld - - domain2.tld + - dommainname: domain.tld entrypoint Entrypoint for the container @@ -2068,7 +2059,7 @@ def mod_watch(name, sfun=None, **kwargs): return running(name, **watch_kwargs) if sfun == 'stopped': - return stopped(name, **salt.utils.clean_kwargs(**kwargs)) + return stopped(name, **salt.utils.args.clean_kwargs(**kwargs)) return {'name': name, 'changes': {}, diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index bcc785cbb1..a58d923aea 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -105,8 +105,20 @@ def present(name, # map containers to container's Ids. containers = [__salt__['docker.inspect_container'](c)['Id'] for c in containers] networks = __salt__['docker.networks'](names=[name]) + log.trace( + 'docker_network.present: current networks: {0}'.format(networks) + ) + + # networks will contain all Docker networks which partially match 'name'. + # We need to loop through to find the matching network, if there is one. + network = None if networks: - network = networks[0] # we expect network's name to be unique + for network_iter in networks: + if network_iter['Name'] == name: + network = network_iter + break + + if network is not None: if all(c in network['Containers'] for c in containers): ret['result'] = True ret['comment'] = 'Network \'{0}\' already exists.'.format(name) @@ -173,7 +185,20 @@ def absent(name, driver=None): 'comment': ''} networks = __salt__['docker.networks'](names=[name]) - if not networks: + log.trace( + 'docker_network.absent: current networks: {0}'.format(networks) + ) + + # networks will contain all Docker networks which partially match 'name'. + # We need to loop through to find the matching network, if there is one. + network = None + if networks: + for network_iter in networks: + if network_iter['Name'] == name: + network = network_iter + break + + if network is None: ret['result'] = True ret['comment'] = 'Network \'{0}\' already absent'.format(name) return ret diff --git a/salt/states/drac.py b/salt/states/drac.py index 11ee84abbd..534efacbfb 100644 --- a/salt/states/drac.py +++ b/salt/states/drac.py @@ -39,13 +39,14 @@ Ensure DRAC network is in a consistent state from __future__ import absolute_import import salt.exceptions +import salt.utils.path def __virtual__(): ''' Ensure the racadm command is installed ''' - if salt.utils.which('racadm'): + if salt.utils.path.which('racadm'): return True return False diff --git a/salt/states/environ.py b/salt/states/environ.py index c2bddfcb8a..8c3ea50226 100644 --- a/salt/states/environ.py +++ b/salt/states/environ.py @@ -4,13 +4,15 @@ Support for getting and setting the environment variables of the current salt process. ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os -# Import salt libs -import salt.utils as utils -import salt.ext.six as six +# Import Salt libs +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six def __virtual__(): @@ -117,7 +119,7 @@ def setenv(name, # false_unsets is True. Otherwise we want to set # the value to '' def key_exists(): - if utils.is_windows(): + if salt.utils.platform.is_windows(): permanent_hive = 'HKCU' permanent_key = 'Environment' if permanent == 'HKLM': diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 4a2207259c..12240422e4 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -97,7 +97,7 @@ from __future__ import absolute_import import logging # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError diff --git a/salt/states/etcd_mod.py b/salt/states/etcd_mod.py index 5d041813aa..7ab5828bc9 100644 --- a/salt/states/etcd_mod.py +++ b/salt/states/etcd_mod.py @@ -143,6 +143,13 @@ def set_(name, value, profile=None): profile Optional, defaults to ``None``. Sets the etcd profile to use which has been defined in the Salt Master config. + + .. code-block:: yaml + + my_etd_config: + etcd.host: 127.0.0.1 + etcd.port: 4001 + ''' created = False @@ -184,6 +191,13 @@ def wait_set(name, value, profile=None): profile The etcd profile to use that has been configured on the Salt Master, this is optional and defaults to ``None``. + + .. code-block:: yaml + + my_etd_config: + etcd.host: 127.0.0.1 + etcd.port: 4001 + ''' return { @@ -194,6 +208,48 @@ def wait_set(name, value, profile=None): } +def directory(name, profile=None): + ''' + Create a directory in etcd. + + name + The etcd directory name, for example: ``/foo/bar/baz``. + profile + Optional, defaults to ``None``. Sets the etcd profile to use which has + been defined in the Salt Master config. + + .. code-block:: yaml + + my_etd_config: + etcd.host: 127.0.0.1 + etcd.port: 4001 + ''' + + created = False + + rtn = { + 'name': name, + 'comment': 'Directory exists', + 'result': True, + 'changes': {} + } + + current = __salt__['etcd.get'](name, profile=profile, recurse=True) + if not current: + created = True + + result = __salt__['etcd.set'](name, None, directory=True, profile=profile) + + if result and result != current: + if created: + rtn['comment'] = 'New directory created' + rtn['changes'] = { + name: 'Created' + } + + return rtn + + def rm_(name, recurse=False, profile=None): ''' Deletes a key from etcd. This function is also aliased as ``rm``. @@ -205,6 +261,12 @@ def rm_(name, recurse=False, profile=None): profile Optional, defaults to ``None``. Sets the etcd profile to use which has been defined in the Salt Master config. + + .. code-block:: yaml + + my_etd_config: + etcd.host: 127.0.0.1 + etcd.port: 4001 ''' rtn = { @@ -241,6 +303,12 @@ def wait_rm(name, recurse=False, profile=None): profile Optional, defaults to ``None``. Sets the etcd profile to use which has been defined in the Salt Master config. + + .. code-block:: yaml + + my_etd_config: + etcd.host: 127.0.0.1 + etcd.port: 4001 ''' return { diff --git a/salt/states/file.py b/salt/states/file.py index 9897bc83ef..8aea2cfefa 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -285,19 +285,21 @@ import salt.payload import salt.utils import salt.utils.dictupdate import salt.utils.files +import salt.utils.platform import salt.utils.templates import salt.utils.url +import salt.utils.versions from salt.utils.locales import sdecode from salt.exceptions import CommandExecutionError, SaltInvocationError -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): import salt.utils.win_dacl import salt.utils.win_functions # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip_longest -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): import pywintypes import win32com.client @@ -1262,7 +1264,7 @@ def symlink( if user is None: user = __opts__['user'] - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure the user exists in Windows # Salt default is 'root' @@ -1436,7 +1438,10 @@ def absent(name): ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: - __salt__['file.remove'](name) + if salt.utils.platform.is_windows(): + __salt__['file.remove'](name, force=True) + else: + __salt__['file.remove'](name) ret['comment'] = 'Removed file {0}'.format(name) ret['changes']['removed'] = name return ret @@ -1450,7 +1455,10 @@ def absent(name): ret['comment'] = 'Directory {0} is set for removal'.format(name) return ret try: - __salt__['file.remove'](name) + if salt.utils.platform.is_windows(): + __salt__['file.remove'](name, force=True) + else: + __salt__['file.remove'](name) ret['comment'] = 'Removed directory {0}'.format(name) ret['changes']['removed'] = name return ret @@ -1479,11 +1487,11 @@ def exists(name): 'result': True, 'comment': ''} if not name: - return _error(ret, 'Must provide name to file.exists') + return _error(ret, u'Must provide name to file.exists') if not os.path.exists(name): - return _error(ret, 'Specified path {0} does not exist'.format(name)) + return _error(ret, u'Specified path {0} does not exist'.format(name)) - ret['comment'] = 'Path {0} exists'.format(name) + ret['comment'] = u'Path {0} exists'.format(name) return ret @@ -1503,11 +1511,11 @@ def missing(name): 'result': True, 'comment': ''} if not name: - return _error(ret, 'Must provide name to file.missing') + return _error(ret, u'Must provide name to file.missing') if os.path.exists(name): - return _error(ret, 'Specified path {0} exists'.format(name)) + return _error(ret, u'Specified path {0} exists'.format(name)) - ret['comment'] = 'Path {0} is missing'.format(name) + ret['comment'] = u'Path {0} is missing'.format(name) return ret @@ -2067,7 +2075,7 @@ def managed(name, - win_inheritance: False ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2083,10 +2091,10 @@ def managed(name, 'name': name, 'result': True} - if mode is not None and salt.utils.is_windows(): + if mode is not None and salt.utils.platform.is_windows(): return _error(ret, 'The \'mode\' option is not supported on Windows') - if attrs is not None and salt.utils.is_windows(): + if attrs is not None and salt.utils.platform.is_windows(): return _error(ret, 'The \'attrs\' option is not supported on Windows') try: @@ -2239,7 +2247,7 @@ def managed(name, if not name: return _error(ret, 'Must provide name to file.managed') user = _test_owner(kwargs, user=user) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # If win_owner not passed, use user if win_owner is None: @@ -2257,8 +2265,8 @@ def managed(name, if not create: if not os.path.isfile(name): # Don't create a file that is not already present - ret['comment'] = ('File {0} is not present and is not set for ' - 'creation').format(name) + ret['comment'] = (u'File {0} is not present and is not set for ' + u'creation').format(name) return ret u_check = _check_user(user, group) if u_check: @@ -2266,10 +2274,10 @@ def managed(name, return _error(ret, u_check) if not os.path.isabs(name): return _error( - ret, 'Specified file {0} is not an absolute path'.format(name)) + ret, u'Specified file {0} is not an absolute path'.format(name)) if os.path.isdir(name): - ret['comment'] = 'Specified target {0} is a directory'.format(name) + ret['comment'] = u'Specified target {0} is a directory'.format(name) ret['result'] = False return ret @@ -2284,7 +2292,7 @@ def managed(name, if not replace and os.path.exists(name): # Check and set the permissions if necessary - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = __salt__['file.check_perms']( name, ret, win_owner, win_perms, win_deny_perms, None, win_inheritance) @@ -2292,10 +2300,10 @@ def managed(name, ret, _ = __salt__['file.check_perms']( name, ret, user, group, mode, attrs, follow_symlinks) if __opts__['test']: - ret['comment'] = 'File {0} not updated'.format(name) + ret['comment'] = u'File {0} not updated'.format(name) elif not ret['changes'] and ret['result']: - ret['comment'] = ('File {0} exists with proper permissions. ' - 'No changes made.'.format(name)) + ret['comment'] = (u'File {0} exists with proper permissions. ' + u'No changes made.'.format(name)) return ret accum_data, _ = _load_accumulators() @@ -2326,7 +2334,7 @@ def managed(name, **kwargs ) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = __salt__['file.check_perms']( name, ret, win_owner, win_perms, win_deny_perms, None, win_inheritance) @@ -2335,14 +2343,14 @@ def managed(name, ret['result'], ret['comment'] = ret['pchanges'] elif ret['pchanges']: ret['result'] = None - ret['comment'] = 'The file {0} is set to be changed'.format(name) + ret['comment'] = u'The file {0} is set to be changed'.format(name) if show_changes and 'diff' in ret['pchanges']: ret['changes']['diff'] = ret['pchanges']['diff'] if not show_changes: ret['changes']['diff'] = '' else: ret['result'] = True - ret['comment'] = 'The file {0} is in the correct state'.format(name) + ret['comment'] = u'The file {0} is in the correct state'.format(name) return ret @@ -2452,12 +2460,16 @@ def managed(name, if sfn and os.path.isfile(sfn): os.remove(sfn) return ret + + if sfn and os.path.isfile(sfn): + os.remove(sfn) + # Since we generated a new tempfile and we are not returning here # lets change the original sfn to the new tempfile or else we will # get file not found - if sfn and os.path.isfile(sfn): - os.remove(sfn) - sfn = tmp_filename + + sfn = tmp_filename + else: ret = {'changes': {}, 'comment': '', @@ -2770,7 +2782,7 @@ def directory(name, return _error(ret, 'Cannot specify both max_depth and clean') user = _test_owner(kwargs, user=user) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # If win_owner not passed, use user if win_owner is None: @@ -2795,7 +2807,7 @@ def directory(name, dir_mode = salt.utils.normalize_mode(dir_mode) file_mode = salt.utils.normalize_mode(file_mode) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Verify win_owner is valid on the target system try: salt.utils.win_dacl.get_sid(win_owner) @@ -2848,7 +2860,7 @@ def directory(name, 'Specified location {0} exists and is a symlink'.format(name)) # Check directory? - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): presult, pcomment, ret['pchanges'] = _check_directory_win( name, win_owner, win_perms, win_deny_perms, win_inheritance) else: @@ -2867,7 +2879,7 @@ def directory(name, # The parent directory does not exist, create them if makedirs: # Everything's good, create the parent Dirs - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Make sure the drive is mapped before trying to create the # path in windows drive, path = os.path.splitdrive(name) @@ -2883,7 +2895,7 @@ def directory(name, return _error( ret, 'No directory to create {0} in'.format(name)) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): __salt__['file.mkdir'](name, win_owner, win_perms, win_deny_perms, win_inheritance) else: @@ -2897,7 +2909,7 @@ def directory(name, # issue 32707: skip this __salt__['file.check_perms'] call if children_only == True # Check permissions if not children_only: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = __salt__['file.check_perms']( name, ret, win_owner, win_perms, win_deny_perms, None, win_inheritance) else: @@ -2968,7 +2980,7 @@ def directory(name, for fn_ in files: full = os.path.join(root, fn_) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = __salt__['file.check_perms']( full, ret, win_owner, win_perms, win_deny_perms, None, win_inheritance) @@ -2983,7 +2995,7 @@ def directory(name, for dir_ in dirs: full = os.path.join(root, dir_) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): ret = __salt__['file.check_perms']( full, ret, win_owner, win_perms, win_deny_perms, None, win_inheritance) @@ -3001,14 +3013,14 @@ def directory(name, removed = _clean_dir(name, list(keep), exclude_pat) if removed: ret['changes']['removed'] = removed - ret['comment'] = 'Files cleaned from directory {0}'.format(name) + ret['comment'] = u'Files cleaned from directory {0}'.format(name) # issue 32707: reflect children_only selection in comments if not ret['comment']: if children_only: - ret['comment'] = 'Directory {0}/* updated'.format(name) + ret['comment'] = u'Directory {0}/* updated'.format(name) else: - ret['comment'] = 'Directory {0} updated'.format(name) + ret['comment'] = u'Directory {0} updated'.format(name) if __opts__['test']: ret['comment'] = 'Directory {0} not updated'.format(name) @@ -3017,9 +3029,9 @@ def directory(name, if ret['comment']: orig_comment = ret['comment'] - ret['comment'] = 'Directory {0} is in the correct state'.format(name) + ret['comment'] = u'Directory {0} is in the correct state'.format(name) if orig_comment: - ret['comment'] = '\n'.join([ret['comment'], orig_comment]) + ret['comment'] = u'\n'.join([ret['comment'], orig_comment]) if errors: ret['result'] = False @@ -3196,7 +3208,7 @@ def recurse(name, option is usually not needed except in special circumstances. ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -3207,7 +3219,7 @@ def recurse(name, name = os.path.expanduser(sdecode(name)) user = _test_owner(kwargs, user=user) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' @@ -3231,7 +3243,7 @@ def recurse(name, return ret if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \ - and salt.utils.is_windows(): + and salt.utils.platform.is_windows(): return _error(ret, 'mode management is not supported on Windows') # Make sure that leading zeros stripped by YAML loader are added back @@ -3285,8 +3297,8 @@ def recurse(name, if x.startswith(srcpath + '/'))): ret['result'] = False ret['comment'] = ( - 'The directory \'{0}\' does not exist on the salt fileserver ' - 'in saltenv \'{1}\''.format(srcpath, senv) + u'The directory \'{0}\' does not exist on the salt fileserver ' + u'in saltenv \'{1}\''.format(srcpath, senv) ) return ret @@ -3322,8 +3334,8 @@ def recurse(name, if clean and os.path.exists(path) and os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: - _ret['comment'] = 'Replacing directory {0} with a ' \ - 'file'.format(path) + _ret['comment'] = u'Replacing directory {0} with a ' \ + u'file'.format(path) _ret['result'] = None merge_ret(path, _ret) return @@ -3361,7 +3373,7 @@ def recurse(name, if clean and os.path.exists(path) and not os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: - _ret['comment'] = 'Replacing {0} with a directory'.format(path) + _ret['comment'] = u'Replacing {0} with a directory'.format(path) _ret['result'] = None merge_ret(path, _ret) return @@ -3426,10 +3438,10 @@ def recurse(name, ) for (k, v) in six.iteritems(ret['comment'])).strip() if not ret['comment']: - ret['comment'] = 'Recursively updated {0}'.format(name) + ret['comment'] = u'Recursively updated {0}'.format(name) if not ret['changes'] and ret['result']: - ret['comment'] = 'The directory {0} is in the correct state'.format( + ret['comment'] = u'The directory {0} is in the correct state'.format( name ) @@ -3613,13 +3625,13 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None): # TODO: track and report how much space was / would be reclaimed if __opts__['test']: - ret['comment'] = '{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name) + ret['comment'] = u'{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name) if deletable_files: ret['result'] = None else: for f in deletable_files: __salt__['file.remove'](os.path.join(name, f)) - ret['comment'] = '{0} backups were removed from {1}.\n'.format(len(deletable_files), name) + ret['comment'] = u'{0} backups were removed from {1}.\n'.format(len(deletable_files), name) ret['changes'] = changes return ret @@ -3756,7 +3768,13 @@ def line(name, content=None, match=None, mode=None, location=None, if not name: return _error(ret, 'Must provide name to file.line') - managed(name, create=create, user=user, group=group, mode=file_mode) + managed( + name, + create=create, + user=user, + group=group, + mode=file_mode, + replace=False) check_res, check_msg = _check_file(name) if not check_res: @@ -3804,7 +3822,8 @@ def replace(name, not_found_content=None, backup='.bak', show_changes=True, - ignore_if_missing=False): + ignore_if_missing=False, + backslash_literal=False): r''' Maintain an edit in a file. @@ -3818,12 +3837,13 @@ def replace(name, A regular expression, to be matched using Python's :py:func:`~re.search`. - ..note:: + .. note:: + If you need to match a literal string that contains regex special characters, you may want to use salt's custom Jinja filter, ``escape_regex``. - ..code-block:: jinja + .. code-block:: jinja {{ 'http://example.com?foo=bar%20baz' | escape_regex }} @@ -3903,6 +3923,14 @@ def replace(name, state will display an error raised by the execution module. If set to ``True``, the state will simply report no changes. + backslash_literal : False + .. versionadded:: 2016.11.7 + + Interpret backslashes as literal backslashes for the repl and not + escape characters. This will help when using append/prepend so that + the backslashes are not interpreted for the repl on the second run of + the state. + For complex regex patterns, it can be useful to avoid the need for complex quoting and escape sequences by making use of YAML's multiline string syntax. @@ -3951,7 +3979,8 @@ def replace(name, backup=backup, dry_run=__opts__['test'], show_changes=show_changes, - ignore_if_missing=ignore_if_missing) + ignore_if_missing=ignore_if_missing, + backslash_literal=backslash_literal) if changes: ret['pchanges']['diff'] = changes @@ -4311,7 +4340,7 @@ def comment(name, regex, char='#', backup='.bak'): ret['pchanges'][name] = 'updated' if __opts__['test']: - ret['comment'] = 'File {0} is set to be updated'.format(name) + ret['comment'] = u'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.files.fopen(name, 'rb') as fp_: @@ -4418,7 +4447,7 @@ def uncomment(name, regex, char='#', backup='.bak'): ret['pchanges'][name] = 'updated' if __opts__['test']: - ret['comment'] = 'File {0} is set to be updated'.format(name) + ret['comment'] = u'File {0} is set to be updated'.format(name) ret['result'] = None return ret @@ -4683,7 +4712,7 @@ def append(name, return _error(ret, 'No text found to append. Nothing appended') if __opts__['test']: - ret['comment'] = 'File {0} is set to be updated'.format(name) + ret['comment'] = u'File {0} is set to be updated'.format(name) ret['result'] = None nlines = list(slines) nlines.extend(append_lines) @@ -4696,7 +4725,7 @@ def append(name, '\n'.join(difflib.unified_diff(slines, nlines)) ) else: - ret['comment'] = 'File {0} is in correct state'.format(name) + ret['comment'] = u'File {0} is in correct state'.format(name) ret['result'] = True return ret @@ -4704,7 +4733,7 @@ def append(name, __salt__['file.append'](name, args=append_lines) ret['comment'] = 'Appended {0} lines'.format(len(append_lines)) else: - ret['comment'] = 'File {0} is in correct state'.format(name) + ret['comment'] = u'File {0} is in correct state'.format(name) with salt.utils.files.fopen(name, 'rb') as fp_: nlines = fp_.read() @@ -4871,7 +4900,7 @@ def prepend(name, for line in lines: if __opts__['test']: - ret['comment'] = 'File {0} is set to be updated'.format(name) + ret['comment'] = u'File {0} is set to be updated'.format(name) ret['result'] = None test_lines.append('{0}\n'.format(line)) else: @@ -4890,7 +4919,7 @@ def prepend(name, ) ret['result'] = None else: - ret['comment'] = 'File {0} is in correct state'.format(name) + ret['comment'] = u'File {0} is in correct state'.format(name) ret['result'] = True return ret @@ -4935,7 +4964,7 @@ def prepend(name, if count: ret['comment'] = 'Prepended {0} lines'.format(count) else: - ret['comment'] = 'File {0} is in correct state'.format(name) + ret['comment'] = u'File {0} is in correct state'.format(name) ret['result'] = True return ret @@ -5000,7 +5029,7 @@ def patch(name, hash_ = kwargs.pop('hash', None) if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -5033,7 +5062,7 @@ def patch(name, # get cached file or copy it to cache cached_source_path = __salt__['cp.cache_file'](source, __env__) if not cached_source_path: - ret['comment'] = ('Unable to cache {0} from saltenv \'{1}\'' + ret['comment'] = (u'Unable to cache {0} from saltenv \'{1}\'' .format(source, __env__)) return ret @@ -5047,7 +5076,7 @@ def patch(name, name, cached_source_path, options=options, dry_run=True ) if __opts__['test']: - ret['comment'] = 'File {0} will be patched'.format(name) + ret['comment'] = u'File {0} will be patched'.format(name) ret['result'] = None return ret if ret['changes']['retcode'] != 0: @@ -5126,10 +5155,10 @@ def touch(name, atime=None, mtime=None, makedirs=False): ret['result'] = __salt__['file.touch'](name, atime, mtime) if not extant and ret['result']: - ret['comment'] = 'Created empty file {0}'.format(name) + ret['comment'] = u'Created empty file {0}'.format(name) ret['changes']['new'] = name elif extant and ret['result']: - ret['comment'] = 'Updated times on {0} {1}'.format( + ret['comment'] = u'Updated times on {0} {1}'.format( 'directory' if os.path.isdir(name) else 'file', name ) ret['changes']['touched'] = name @@ -5217,7 +5246,7 @@ def copy( ret = { 'name': name, 'changes': {}, - 'comment': 'Copied "{0}" to "{1}"'.format(source, name), + 'comment': u'Copied "{0}" to "{1}"'.format(source, name), 'result': True} if not name: return _error(ret, 'Must provide name to file.copy') @@ -5239,7 +5268,7 @@ def copy( if user is None: user = __opts__['user'] - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this is ' @@ -5287,20 +5316,20 @@ def copy( if __opts__['test']: if changed: - ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format( + ret['comment'] = u'File "{0}" is set to be copied to "{1}"'.format( source, name ) ret['result'] = None else: - ret['comment'] = ('The target file "{0}" exists and will not be ' - 'overwritten'.format(name)) + ret['comment'] = (u'The target file "{0}" exists and will not be ' + u'overwritten'.format(name)) ret['result'] = True return ret if not changed: - ret['comment'] = ('The target file "{0}" exists and will not be ' - 'overwritten'.format(name)) + ret['comment'] = (u'The target file "{0}" exists and will not be ' + u'overwritten'.format(name)) ret['result'] = True return ret @@ -5369,17 +5398,17 @@ def rename(name, source, force=False, makedirs=False): if not os.path.isabs(name): return _error( - ret, 'Specified file {0} is not an absolute path'.format(name)) + ret, u'Specified file {0} is not an absolute path'.format(name)) if not os.path.lexists(source): - ret['comment'] = ('Source file "{0}" has already been moved out of ' - 'place').format(source) + ret['comment'] = (u'Source file "{0}" has already been moved out of ' + u'place').format(source) return ret if os.path.lexists(source) and os.path.lexists(name): if not force: - ret['comment'] = ('The target file "{0}" exists and will not be ' - 'overwritten'.format(name)) + ret['comment'] = (u'The target file "{0}" exists and will not be ' + u'overwritten'.format(name)) ret['result'] = False return ret elif not __opts__['test']: @@ -5389,12 +5418,12 @@ def rename(name, source, force=False, makedirs=False): except (IOError, OSError): return _error( ret, - 'Failed to delete "{0}" in preparation for ' - 'forced move'.format(name) + u'Failed to delete "{0}" in preparation for ' + u'forced move'.format(name) ) if __opts__['test']: - ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format( + ret['comment'] = u'File "{0}" is set to be moved to "{1}"'.format( source, name ) @@ -5409,7 +5438,7 @@ def rename(name, source, force=False, makedirs=False): else: return _error( ret, - 'The target directory {0} is not present'.format(dname)) + u'The target directory {0} is not present'.format(dname)) # All tests pass, move the file into place try: if os.path.islink(source): @@ -5420,9 +5449,9 @@ def rename(name, source, force=False, makedirs=False): shutil.move(source, name) except (IOError, OSError): return _error( - ret, 'Failed to move "{0}" to "{1}"'.format(source, name)) + ret, u'Failed to move "{0}" to "{1}"'.format(source, name)) - ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name) + ret['comment'] = u'Moved "{0}" to "{1}"'.format(source, name) ret['changes'] = {name: source} return ret @@ -5501,7 +5530,7 @@ def accumulated(name, filename, text, **kwargs): deps = require_in + watch_in if not [x for x in deps if 'file' in x]: ret['result'] = False - ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format( + ret['comment'] = u'Orphaned accumulator {0} in {1}:{2}'.format( name, __low__['__sls__'], __low__['__id__'] @@ -5525,8 +5554,8 @@ def accumulated(name, filename, text, **kwargs): for chunk in text: if chunk not in accum_data[filename][name]: accum_data[filename][name].append(chunk) - ret['comment'] = ('Accumulator {0} for file {1} ' - 'was charged by text'.format(name, filename)) + ret['comment'] = (u'Accumulator {0} for file {1} ' + u'was charged by text'.format(name, filename)) _persist_accummulators(accum_data, accum_deps) return ret @@ -5658,7 +5687,7 @@ def serialize(name, } ''' if 'env' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -5687,8 +5716,8 @@ def serialize(name, if not create: if not os.path.isfile(name): # Don't create a file that is not already present - ret['comment'] = ('File {0} is not present and is not set for ' - 'creation').format(name) + ret['comment'] = (u'File {0} is not present and is not set for ' + u'creation').format(name) return ret formatter = kwargs.pop('formatter', 'yaml').lower() @@ -5704,11 +5733,11 @@ def serialize(name, return _error( ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if group is not None: log.warning( - 'The group argument for {0} has been ignored as this ' - 'is a Windows system.'.format(name) + u'The group argument for %s has been ignored as this ' + u'is a Windows system.', name ) group = user @@ -5717,7 +5746,7 @@ def serialize(name, if serializer_name not in __serializers__: return {'changes': {}, - 'comment': '{0} format is not supported'.format( + 'comment': u'{0} format is not supported'.format( formatter.capitalize()), 'name': name, 'result': False @@ -5727,7 +5756,7 @@ def serialize(name, if os.path.isfile(name): if '{0}.deserialize'.format(formatter) not in __serializers__: return {'changes': {}, - 'comment': ('{0} format is not supported for merging' + 'comment': (u'{0} format is not supported for merging' .format(formatter.capitalize())), 'name': name, 'result': False} @@ -5739,7 +5768,7 @@ def serialize(name, merged_data = salt.utils.dictupdate.merge_recurse(existing_data, dataset) if existing_data == merged_data: ret['result'] = True - ret['comment'] = 'The file {0} is in the correct state'.format(name) + ret['comment'] = u'The file {0} is in the correct state'.format(name) return ret dataset = merged_data contents = __serializers__[serializer_name](dataset, **default_serializer_opts.get(serializer_name, {})) @@ -5770,14 +5799,14 @@ def serialize(name, if ret['changes']: ret['result'] = None - ret['comment'] = 'Dataset will be serialized and stored into {0}'.format( + ret['comment'] = u'Dataset will be serialized and stored into {0}'.format( name) if not show_changes: ret['changes']['diff'] = '' else: ret['result'] = True - ret['comment'] = 'The file {0} is in the correct state'.format(name) + ret['comment'] = u'The file {0} is in the correct state'.format(name) return ret return __salt__['file.manage_file'](name=name, @@ -5882,16 +5911,15 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( - 'File exists and is not a character device {0}. Cowardly ' - 'refusing to continue'.format(name) + u'File {0} exists and is not a character device. Refusing ' + u'to continue'.format(name) ) # Check if it is a character device elif not __salt__['file.is_chrdev'](name): if __opts__['test']: - ret['comment'] = ( - 'Character device {0} is set to be created' - ).format(name) + ret['comment'] = \ + u'Character device {0} is set to be created'.format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, @@ -5907,8 +5935,8 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( - 'Character device {0} exists and has a different ' - 'major/minor {1}/{2}. Cowardly refusing to continue' + u'Character device {0} exists and has a different ' + u'major/minor {1}/{2}. Refusing to continue' .format(name, devmaj, devmin) ) # Check the perms @@ -5920,7 +5948,7 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): mode)[0] if not ret['changes']: ret['comment'] = ( - 'Character device {0} is in the correct state'.format( + u'Character device {0} is in the correct state'.format( name ) ) @@ -5929,16 +5957,14 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( - 'File exists and is not a block device {0}. Cowardly ' - 'refusing to continue'.format(name) + u'File {0} exists and is not a block device. Refusing to ' + u'continue'.format(name) ) # Check if it is a block device elif not __salt__['file.is_blkdev'](name): if __opts__['test']: - ret['comment'] = ( - 'Block device {0} is set to be created' - ).format(name) + ret['comment'] = u'Block device {0} is set to be created'.format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, @@ -5954,8 +5980,8 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( - 'Block device {0} exists and has a different major/minor ' - '{1}/{2}. Cowardly refusing to continue'.format( + u'Block device {0} exists and has a different major/minor ' + u'{1}/{2}. Refusing to continue'.format( name, devmaj, devmin ) ) @@ -5968,21 +5994,21 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): mode)[0] if not ret['changes']: ret['comment'] = ( - 'Block device {0} is in the correct state'.format(name) + u'Block device {0} is in the correct state'.format(name) ) elif ntype == 'p': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( - 'File exists and is not a fifo pipe {0}. Cowardly refusing ' - 'to continue'.format(name) + u'File {0} exists and is not a fifo pipe. Refusing to ' + u'continue'.format(name) ) # Check if it is a fifo elif not __salt__['file.is_fifo'](name): if __opts__['test']: - ret['comment'] = 'Fifo pipe {0} is set to be created'.format( + ret['comment'] = u'Fifo pipe {0} is set to be created'.format( name ) ret['result'] = None @@ -6004,7 +6030,7 @@ def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): mode)[0] if not ret['changes']: ret['comment'] = ( - 'Fifo pipe {0} is in the correct state'.format(name) + u'Fifo pipe {0} is in the correct state'.format(name) ) else: @@ -6227,7 +6253,7 @@ def shortcut( 'changes': {}, 'result': True, 'comment': ''} - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return _error(ret, 'Shortcuts are only supported on Windows') if not name: return _error(ret, 'Must provide name to file.shortcut') @@ -6360,18 +6386,18 @@ def shortcut( else: if _check_shortcut_ownership(name, user): # The shortcut looks good! - ret['comment'] = ('Shortcut {0} is present and owned by ' - '{1}'.format(name, user)) + ret['comment'] = (u'Shortcut {0} is present and owned by ' + u'{1}'.format(name, user)) else: if _set_shortcut_ownership(name, user): - ret['comment'] = ('Set ownership of shortcut {0} to ' - '{1}'.format(name, user)) + ret['comment'] = (u'Set ownership of shortcut {0} to ' + u'{1}'.format(name, user)) ret['changes']['ownership'] = '{0}'.format(user) else: ret['result'] = False ret['comment'] += ( - 'Failed to set ownership of shortcut {0} to ' - '{1}'.format(name, user) + u'Failed to set ownership of shortcut {0} to ' + u'{1}'.format(name, user) ) return ret @@ -6390,12 +6416,12 @@ def shortcut( scut.Save() except (AttributeError, pywintypes.com_error) as exc: ret['result'] = False - ret['comment'] = ('Unable to create new shortcut {0} -> ' - '{1}: {2}'.format(name, target, exc)) + ret['comment'] = (u'Unable to create new shortcut {0} -> ' + u'{1}: {2}'.format(name, target, exc)) return ret else: - ret['comment'] = ('Created new shortcut {0} -> ' - '{1}'.format(name, target)) + ret['comment'] = (u'Created new shortcut {0} -> ' + u'{1}'.format(name, target)) ret['changes']['new'] = name if not _check_shortcut_ownership(name, user): diff --git a/salt/states/firewalld.py b/salt/states/firewalld.py index 5ef4f3cd46..7b751547fb 100644 --- a/salt/states/firewalld.py +++ b/salt/states/firewalld.py @@ -82,7 +82,7 @@ import logging # Import Salt Libs from salt.exceptions import CommandExecutionError -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -145,7 +145,7 @@ def __virtual__(): ''' Ensure the firewall-cmd is available ''' - if salt.utils.which('firewall-cmd'): + if salt.utils.path.which('firewall-cmd'): return True return (False, 'firewall-cmd is not available, firewalld is probably not installed.') diff --git a/salt/states/git.py b/salt/states/git.py index d4a3c1a329..39606277f0 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -21,14 +21,15 @@ import re import string # Import salt libs -import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.url +import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -573,11 +574,11 @@ def latest(name, ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, - salt.utils.invalid_kwargs(kwargs, raise_exc=False) + salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: @@ -1139,13 +1140,22 @@ def latest(name, password=password, https_user=https_user, https_pass=https_pass) - comments.append( - 'Remote \'{0}\' changed from {1} to {2}'.format( - remote, - salt.utils.url.redact_http_basic_auth(fetch_url), - redacted_fetch_url + if fetch_url is None: + comments.append( + 'Remote \'{0}\' set to {1}'.format( + remote, + redacted_fetch_url + ) + ) + ret['changes']['new'] = name + ' => ' + remote + else: + comments.append( + 'Remote \'{0}\' changed from {1} to {2}'.format( + remote, + salt.utils.url.redact_http_basic_auth(fetch_url), + redacted_fetch_url + ) ) - ) if remote_rev is not None: if __opts__['test']: @@ -2108,11 +2118,11 @@ def detached(name, ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} ref = kwargs.pop('ref', None) - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, - salt.utils.invalid_kwargs(kwargs, raise_exc=False) + salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if ref is not None: @@ -2122,7 +2132,7 @@ def detached(name, 'consistency. Please update your SLS to reflect this.' ) ret.setdefault('warnings', []).append(deprecation_msg) - salt.utils.warn_until('Fluorine', deprecation_msg) + salt.utils.versions.warn_until('Fluorine', deprecation_msg) if not rev: return _fail( @@ -2214,7 +2224,7 @@ def detached(name, # Determine if supplied ref is a hash remote_rev_type = 'ref' - if len(ref) <= 40 \ + if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' @@ -2420,7 +2430,7 @@ def detached(name, https_pass=https_pass, ignore_retcode=False) - if 'refs/remotes/'+remote+'/'+ref in all_remote_refs: + if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] @@ -2597,13 +2607,13 @@ def config_unset(name, # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, - salt.utils.invalid_kwargs(kwargs, raise_exc=False) + salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: @@ -2848,12 +2858,12 @@ def config_set(name, # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, - salt.utils.invalid_kwargs(kwargs, raise_exc=False) + salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: diff --git a/salt/states/github.py b/salt/states/github.py index 734766aede..0526643305 100644 --- a/salt/states/github.py +++ b/salt/states/github.py @@ -22,7 +22,7 @@ import datetime import logging # Import Salt Libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError from salt.ext.six.moves import range diff --git a/salt/states/glusterfs.py b/salt/states/glusterfs.py index d2fbe176c6..cdc3684d94 100644 --- a/salt/states/glusterfs.py +++ b/salt/states/glusterfs.py @@ -120,13 +120,13 @@ def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, .. code-block:: yaml myvolume: - glusterfs.created: + glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: - glusterfs.created: + glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 diff --git a/salt/states/gnomedesktop.py b/salt/states/gnomedesktop.py index d3713e7073..e02f38166d 100644 --- a/salt/states/gnomedesktop.py +++ b/salt/states/gnomedesktop.py @@ -29,6 +29,9 @@ from __future__ import absolute_import import logging import re +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -65,7 +68,7 @@ def _do(name, gnome_kwargs, preferences): elif isinstance(value, int): ftype = 'int' - elif isinstance(value, str): + elif isinstance(value, six.string_types): ftype = 'string' else: ftype = 'string' diff --git a/salt/states/grafana4_dashboard.py b/salt/states/grafana4_dashboard.py index 0f3318c3b8..984b96273e 100644 --- a/salt/states/grafana4_dashboard.py +++ b/salt/states/grafana4_dashboard.py @@ -44,7 +44,7 @@ import copy import json # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.utils.dictdiffer import DictDiffer diff --git a/salt/states/grafana4_org.py b/salt/states/grafana4_org.py index 08376ba5cd..791e6f61e8 100644 --- a/salt/states/grafana4_org.py +++ b/salt/states/grafana4_org.py @@ -42,11 +42,13 @@ Basic auth setup ''' from __future__ import absolute_import -from salt.ext.six import string_types -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate from salt.utils.dictdiffer import deep_diff from requests.exceptions import HTTPError +# Import 3rd-party libs +from salt.ext.six import string_types + def __virtual__(): '''Only load if grafana4 module is available''' diff --git a/salt/states/grafana4_user.py b/salt/states/grafana4_user.py index ce1a46c50b..a8211bac63 100644 --- a/salt/states/grafana4_user.py +++ b/salt/states/grafana4_user.py @@ -37,10 +37,12 @@ Basic auth setup ''' from __future__ import absolute_import -from salt.ext.six import string_types -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate from salt.utils.dictdiffer import deep_diff +# Import 3rd-party libs +from salt.ext.six import string_types + def __virtual__(): '''Only load if grafana4 module is available''' diff --git a/salt/states/grafana_dashboard.py b/salt/states/grafana_dashboard.py index bc088ef5b0..6e1ef15aab 100644 --- a/salt/states/grafana_dashboard.py +++ b/salt/states/grafana_dashboard.py @@ -45,7 +45,7 @@ import json import requests # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.utils.dictdiffer import DictDiffer diff --git a/salt/states/group.py b/salt/states/group.py index 8218e415d6..8754a9cb06 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -34,7 +34,7 @@ from __future__ import absolute_import import sys # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def _changes(name, diff --git a/salt/states/heat.py b/salt/states/heat.py index 6b1d0c4cd1..caf549f433 100644 --- a/salt/states/heat.py +++ b/salt/states/heat.py @@ -39,7 +39,7 @@ import json import logging # Import third party libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files import salt.exceptions import yaml diff --git a/salt/states/hg.py b/salt/states/hg.py index 0b6425d863..9e5ec190d2 100644 --- a/salt/states/hg.py +++ b/salt/states/hg.py @@ -14,23 +14,20 @@ in ~/.ssh/known_hosts, and the remote host has this host's public key. - target: /tmp/example_repo ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging import os import shutil -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError from salt.states.git import _fail, _neutral_test log = logging.getLogger(__name__) -if salt.utils.is_windows(): - HG_BINARY = "hg.exe" -else: - HG_BINARY = "hg" +HG_BINARY = 'hg.exe' if salt.utils.platform.is_windows() else 'hg' def __virtual__(): diff --git a/salt/states/host.py b/salt/states/host.py index 1c038d36ff..8345168cd5 100644 --- a/salt/states/host.py +++ b/salt/states/host.py @@ -61,7 +61,7 @@ Or delete all existing names for an address: from __future__ import absolute_import import salt.utils.validate.net -import salt.ext.six as six +from salt.ext import six def present(name, ip): # pylint: disable=C0103 diff --git a/salt/states/htpasswd.py b/salt/states/htpasswd.py index 160dcffc77..4bd0cdf8e6 100644 --- a/salt/states/htpasswd.py +++ b/salt/states/htpasswd.py @@ -15,7 +15,7 @@ Support for htpasswd module. Requires the apache2-utils package for Debian-based ''' from __future__ import absolute_import -import salt.utils +import salt.utils.path __virtualname__ = 'webutil' @@ -26,7 +26,7 @@ def __virtual__(): depends on webutil module ''' - return __virtualname__ if salt.utils.which('htpasswd') else False + return __virtualname__ if salt.utils.path.which('htpasswd') else False def user_exists(name, password=None, htpasswd_file=None, options='', diff --git a/salt/states/infoblox.py b/salt/states/infoblox.py deleted file mode 100644 index 35d65fdb20..0000000000 --- a/salt/states/infoblox.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -''' -states for infoblox stuff - -ensures a record is either present or absent in an Infoblox DNS system - -.. versionadded:: 2016.3.0 -''' -from __future__ import absolute_import - -# Import Python libs -import logging -from salt.ext import six - -log = logging.getLogger(__name__) - - -def __virtual__(): - ''' - make sure the infoblox module is available - ''' - return True if 'infoblox.get_record' in __salt__ else False - - -def present(name, - value, - record_type, - dns_view, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - Ensure a record exists - - name - Name of the record - - value - Value of the record - - record_type - record type (host, a, cname, etc) - - dns_view - DNS View - - infoblox_server - infoblox server to connect to (will try pillar if not specified) - - infoblox_user - username to use to connect to infoblox (will try pillar if not specified) - - infoblox_password - password to use to connect to infoblox (will try pillar if not specified) - - verify_ssl - verify SSL certificates - - Example: - - .. code-block:: yaml - - some-state: - infoblox.present: - - name: some.dns.record - - value: 10.1.1.3 - - record_type: host - - sslVerify: False - ''' - record_type = record_type.lower() - value_utf8 = six.text_type(value, "utf-8") - ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - records = __salt__['infoblox.get_record'](name, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - dns_view=dns_view, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify) - if records: - # check records for updates - for record in records: - update_record = False - if record_type == 'cname': - if record['Canonical Name'] != value_utf8: - update_record = True - elif record_type == 'a': - if record['IP Address'] != value_utf8: - update_record = True - elif record_type == 'host': - if record['IP Addresses'] != [value_utf8]: - update_record = True - if update_record: - if __opts__['test']: - ret['result'] = None - ret['comment'] = ' '.join([ret['comment'], - 'DNS {0} record {1} in view {2} will be update'.format(record_type, - name, - dns_view)]) - else: - retval = __salt__['infoblox.update_record'](name, - value, - dns_view, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify) - if retval: - if 'old' not in ret['changes']: - ret['changes']['old'] = [] - if 'new' not in ret['changes']: - ret['changes']['new'] = [] - ret['changes']['old'].append(record) - ret['changes']['new'].append(__salt__['infoblox.get_record'](name, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - dns_view=dns_view, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify)) - else: - ret['result'] = False - return ret - else: - # no records - if __opts__['test']: - ret['result'] = None - ret['comment'] = ' '.join([ret['comment'], - 'DNS {0} record {1} set to be added to view {2}'.format(record_type, - name, - dns_view)]) - return ret - retval = __salt__['infoblox.add_record'](name, - value, - record_type, - dns_view, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - infoblox_api_version='v1.4.2', - sslVerify=sslVerify) - if retval: - ret['result'] = True - ret['changes']['old'] = None - ret['changes']['new'] = __salt__['infoblox.get_record'](name, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - dns_view=dns_view, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify) - return ret - - -def absent(name, - record_type, - dns_view, - infoblox_server=None, - infoblox_user=None, - infoblox_password=None, - infoblox_api_version='v1.4.2', - sslVerify=True): - ''' - Ensure a record does not exists - - name - Name of the record - record_type - record type (host, a, cname, etc) - dns_view - DNS View - infoblox_server - infoblox server to connect to (will try pillar if not specified) - infoblox_user - username to use to connect to infoblox (will try pillar if not specified) - infoblox_password - password to use to connect to infoblox (will try pillar if not specified) - verify_ssl - verify SSL certificates - - Example: - - .. code-block:: yaml - - some-state: - infoblox.absent: - - name: some.dns.record - - record_type: host - - dns_view: MyView - - sslVerify: False - ''' - ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} - record = __salt__['infoblox.get_record'](name, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - dns_view=dns_view, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify) - if record: - if __opts__['test']: - ret['result'] = None - ret['comment'] = ' '.join([ret['comment'], - 'DNS {0} record {1} in view {2} will be removed'.format(record_type, - name, - dns_view)]) - else: - retval = __salt__['infoblox.delete_record'](name, - dns_view, - record_type, - infoblox_server=infoblox_server, - infoblox_user=infoblox_user, - infoblox_password=infoblox_password, - infoblox_api_version=infoblox_api_version, - sslVerify=sslVerify) - if retval: - if 'old' not in ret['changes']: - ret['changes']['old'] = [] - ret['changes']['new'] = None - ret['changes']['old'].append(record) - else: - ret['result'] = False - return ret - else: - # record not found - ret['result'] = True - ret['changes']['old'] = None - ret['changes']['new'] = None - ret['comment'] = 'DNS record does not exist' - - return ret diff --git a/salt/states/infoblox_a.py b/salt/states/infoblox_a.py new file mode 100644 index 0000000000..1af4510ced --- /dev/null +++ b/salt/states/infoblox_a.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +''' +Infoblox A record managment. + +functions accept api_opts: + + api_verifyssl: verify SSL [default to True or pillar value] + api_url: server to connect to [default to pillar value] + api_username: [default to pillar value] + api_password: [default to pillar value] +''' + + +def present(name=None, ipv4addr=None, data=None, ensure_data=True, **api_opts): + ''' + Ensure infoblox A record. + + When you wish to update a hostname ensure `name` is set to the hostname + of the current record. You can give a new name in the `data.name`. + + State example: + + .. code-block:: yaml + + infoblox_a.present: + - name: example-ha-0.domain.com + - data: + name: example-ha-0.domain.com + ipv4addr: 123.0.31.2 + view: Internal + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + + if not data: + data = {} + if 'name' not in data: + data.update({'name': name}) + if 'ipv4addr' not in data: + data.update({'ipv4addr': ipv4addr}) + + obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts) + if obj is None: + # perhaps the user updated the name + obj = __salt__['infoblox.get_a'](name=data['name'], ipv4addr=data['ipv4addr'], allow_array=False, **api_opts) + if obj: + # warn user that the data was updated and does not match + ret['result'] = False + ret['comment'] = '** please update the name: {0} to equal the updated data name {1}'.format(name, data['name']) + return ret + + if obj: + obj = obj[0] + if not ensure_data: + ret['result'] = True + ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)' + return ret + + diff = __salt__['infoblox.diff_objects'](data, obj) + if not diff: + ret['result'] = True + ret['comment'] = 'supplied fields already updated (note: removing fields might not update)' + return ret + + if diff: + ret['changes'] = {'diff': diff} + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to update infoblox record' + return ret + ## TODO: perhaps need to review the output of new_obj + new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) + ret['result'] = True + ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)' + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to create infoblox record {0}'.format(data['name']) + return ret + + new_obj_ref = __salt__['infoblox.create_a'](data=data, **api_opts) + new_obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts) + + ret['result'] = True + ret['comment'] = 'infoblox record created' + ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} + return ret + + +def absent(name=None, ipv4addr=None, **api_opts): + ''' + Ensure infoblox A record is removed. + + State example: + + .. code-block:: yaml + + infoblox_a.absent: + - name: example-ha-0.domain.com + + infoblox_a.absent: + - name: + - ipv4addr: 127.0.23.23 + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + obj = __salt__['infoblox.get_a'](name=name, ipv4addr=ipv4addr, allow_array=False, **api_opts) + + if not obj: + ret['result'] = True + ret['comment'] = 'infoblox already removed' + return ret + + if __opts__['test']: + ret['result'] = None + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret + + if __salt__['infoblox.delete_a'](name=name, ipv4addr=ipv4addr, **api_opts): + ret['result'] = True + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret diff --git a/salt/states/infoblox_cname.py b/salt/states/infoblox_cname.py new file mode 100644 index 0000000000..35509e042b --- /dev/null +++ b/salt/states/infoblox_cname.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +''' +Infoblox CNAME managment. + +functions accept api_opts: + + api_verifyssl: verify SSL [default to True or pillar value] + api_url: server to connect to [default to pillar value] + api_username: [default to pillar value] + api_password: [default to pillar value] +''' + + +def present(name=None, data=None, ensure_data=True, **api_opts): + ''' + Ensure the CNAME with the given data is present. + + name + CNAME of record + data + raw CNAME api data see: https://INFOBLOX/wapidoc + + State example: + + .. code-block:: yaml + + infoblox_cname.present: + - name: example-ha-0.domain.com + - data: + name: example-ha-0.domain.com + canonical: example.domain.com + zone: example.com + view: Internal + comment: Example comment + + infoblox_cname.present: + - name: example-ha-0.domain.com + - data: + name: example-ha-0.domain.com + canonical: example.domain.com + zone: example.com + view: Internal + comment: Example comment + - api_url: https://INFOBLOX/wapi/v1.2.1 + - api_username: username + - api_password: passwd + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + if not data: + data = {} + if 'name' not in data: + data.update({'name': name}) + + obj = __salt__['infoblox.get_cname'](name=name, **api_opts) + if obj is None: + # perhaps the user updated the name + obj = __salt__['infoblox.get_cname'](name=data['name'], **api_opts) + if obj: + # warn user that the data was updated and does not match + ret['result'] = False + ret['comment'] = '** please update the name: {0} to equal the updated data name {1}'.format(name, data['name']) + return ret + + if obj: + if not ensure_data: + ret['result'] = True + ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)' + return ret + + diff = __salt__['infoblox.diff_objects'](data, obj) + if not diff: + ret['result'] = True + ret['comment'] = 'supplied fields already updated (note: removing fields might not update)' + return ret + + if diff: + ret['changes'] = {'diff': diff} + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to update infoblox record' + return ret + new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) + ret['result'] = True + ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)' + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to create infoblox record {0}'.format(data['name']) + return ret + + new_obj_ref = __salt__['infoblox.create_cname'](data=data, **api_opts) + new_obj = __salt__['infoblox.get_cname'](name=name, **api_opts) + + ret['result'] = True + ret['comment'] = 'infoblox record created' + ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} + return ret + + +def absent(name=None, canonical=None, **api_opts): + ''' + Ensure the CNAME with the given name or canonical name is removed + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + obj = __salt__['infoblox.get_cname'](name=name, canonical=canonical, **api_opts) + + if not obj: + ret['result'] = True + ret['comment'] = 'infoblox already removed' + return ret + + if __opts__['test']: + ret['result'] = None + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret + + if __salt__['infoblox.delete_cname'](name=name, canonical=canonical, **api_opts): + ret['result'] = True + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret diff --git a/salt/states/infoblox_host_record.py b/salt/states/infoblox_host_record.py new file mode 100644 index 0000000000..fbf8ab3061 --- /dev/null +++ b/salt/states/infoblox_host_record.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +''' +Infoblox host record managment. + +functions accept api_opts: + + api_verifyssl: verify SSL [default to True or pillar value] + api_url: server to connect to [default to pillar value] + api_username: [default to pillar value] + api_password: [default to pillar value] +''' + + +def present(name=None, data=None, ensure_data=True, **api_opts): + ''' + This will ensure that a host with the provided name exists. + This will try to ensure that the state of the host matches the given data + If the host is not found then one will be created. + + When trying to update a hostname ensure `name` is set to the hostname + of the current record. You can give a new name in the `data.name`. + + Avoid race conditions, use func:nextavailableip: + - func:nextavailableip:network/ZG54dfgsrDFEFfsfsLzA:10.0.0.0/8/default + - func:nextavailableip:10.0.0.0/8 + - func:nextavailableip:10.0.0.0/8,externalconfigure_for_dns + - func:nextavailableip:10.0.0.3-10.0.0.10 + + State Example: + + .. code-block:: yaml + + # this would update `original_hostname.example.ca` to changed `data`. + infoblox_host_record.present: + - name: original_hostname.example.ca + - data: {'namhostname.example.cae': 'hostname.example.ca', + 'aliases': ['hostname.math.example.ca'], + 'extattrs': [{'Business Contact': {'value': 'EXAMPLE@example.ca'}}], + 'ipv4addrs': [{'configure_for_dhcp': True, + 'ipv4addr': 'func:nextavailableip:129.97.139.0/24', + 'mac': '00:50:56:84:6e:ae'}], + 'ipv6addrs': [], } + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + if data is None: + data = {} + if 'name' not in data: + data.update({'name': name}) + + obj = __salt__['infoblox.get_host'](name=name, **api_opts) + if obj is None: + # perhaps the user updated the name + obj = __salt__['infoblox.get_host'](name=data['name'], **api_opts) + if obj: + # warn user that the host name was updated and does not match + ret['result'] = False + ret['comment'] = 'please update the name: {0} to equal the updated data name {1}'.format(name, data['name']) + return ret + + if obj: + if not ensure_data: + ret['result'] = True + ret['comment'] = 'infoblox record already created (supplied fields not ensured to match)' + return ret + + obj = __salt__['infoblox.get_host_advanced'](name=name, **api_opts) + diff = __salt__['infoblox.diff_objects'](data, obj) + if not diff: + ret['result'] = True + ret['comment'] = 'supplied fields already updated (note: removing fields might not update)' + return ret + + if diff: + ret['changes'] = {'diff': diff} + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to update infoblox record' + return ret + + # replace func:nextavailableip with current ip address if in range + # get list of ipaddresses that are defined. + obj_addrs = [] + if 'ipv4addrs' in obj: + for addr in obj['ipv4addrs']: + if 'ipv4addr' in addr: + obj_addrs.append(addr['ipv4addr']) + if 'ipv6addrs' in obj: + for addr in obj['ipv6addrs']: + if 'ipv6addr' in addr: + obj_addrs.append(addr['ipv6addr']) + + # replace func:nextavailableip: if an ip address is already found in that range. + if 'ipv4addrs' in data: + for addr in data['ipv4addrs']: + if 'ipv4addr' in addr: + addrobj = addr['ipv4addr'] + if addrobj.startswith('func:nextavailableip:'): + found_matches = 0 + for ip in obj_addrs: + if __salt__['infoblox.is_ipaddr_in_ipfunc_range'](ip, addrobj): + addr['ipv4addr'] = ip + found_matches += 1 + if found_matches > 1: + ret['comment'] = 'infoblox record cant updated because ipaddress {0} matches mutiple func:nextavailableip'.format(ip) + ret['result'] = False + return ret + + new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) + ret['result'] = True + ret['comment'] = 'infoblox record fields updated (note: removing fields might not update)' + #ret['changes'] = {'diff': diff } + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to create infoblox record {0}'.format(name) + return ret + + new_obj_ref = __salt__['infoblox.create_host'](data=data, **api_opts) + new_obj = __salt__['infoblox.get_host'](name=name, **api_opts) + + ret['result'] = True + ret['comment'] = 'infoblox record created' + ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} + return ret + + +def absent(name=None, ipv4addr=None, mac=None, **api_opts): + ''' + Ensure the host with the given Name ipv4addr or mac is removed. + + State example: + + .. code-block:: yaml + + infoblox_host_record.absent: + - name: hostname.of.record.to.remove + + infoblox_host_record.absent: + - name: + - ipv4addr: 192.168.0.1 + + infoblox_host_record.absent: + - name: + - mac: 12:02:12:31:23:43 + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + obj = __salt__['infoblox.get_host'](name=name, ipv4addr=ipv4addr, mac=mac, **api_opts) + + if not obj: + ret['result'] = True + ret['comment'] = 'infoblox already removed' + return ret + + if __opts__['test']: + ret['result'] = None + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret + + if __salt__['infoblox.delete_host'](name=name, mac=mac, **api_opts): + ret['result'] = True + ret['changes'] = {'old': obj, 'new': 'absent'} + return ret diff --git a/salt/states/infoblox_range.py b/salt/states/infoblox_range.py new file mode 100644 index 0000000000..aa8769b338 --- /dev/null +++ b/salt/states/infoblox_range.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +''' +Infoblox host record managment. + +functions accept api_opts: + + api_verifyssl: verify SSL [default to True or pillar value] + api_url: server to connect to [default to pillar value] + api_username: [default to pillar value] + api_password: [default to pillar value] +''' + + +def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts): + ''' + Ensure range record is present. + + infoblox_range.present: + start_addr: '129.97.150.160', + end_addr: '129.97.150.170', + + Verbose state example: + + .. code-block:: yaml + + infoblox_range.present: + data: { + 'always_update_dns': False, + 'authority': False, + 'comment': 'range of IP addresses used for salt.. was used for ghost images deployment', + 'ddns_generate_hostname': True, + 'deny_all_clients': False, + 'deny_bootp': False, + 'disable': False, + 'email_list': [], + 'enable_ddns': False, + 'enable_dhcp_thresholds': False, + 'enable_email_warnings': False, + 'enable_ifmap_publishing': False, + 'enable_snmp_warnings': False, + 'end_addr': '129.97.150.169', + 'exclude': [], + 'extattrs': {}, + 'fingerprint_filter_rules': [], + 'high_water_mark': 95, + 'high_water_mark_reset': 85, + 'ignore_dhcp_option_list_request': False, + 'lease_scavenge_time': -1, + 'logic_filter_rules': [], + 'low_water_mark': 0, + 'low_water_mark_reset': 10, + 'mac_filter_rules': [], + 'member': {'_struct': 'dhcpmember', + 'ipv4addr': '129.97.128.9', + 'name': 'cn-dhcp-mc.example.ca'}, + 'ms_options': [], + 'nac_filter_rules': [], + 'name': 'ghost-range', + 'network': '129.97.150.0/24', + 'network_view': 'default', + 'option_filter_rules': [], + 'options': [{'name': 'dhcp-lease-time', + 'num': 51, + 'use_option': False, + 'value': '43200', + 'vendor_class': 'DHCP'}], + 'recycle_leases': True, + 'relay_agent_filter_rules': [], + 'server_association_type': 'MEMBER', + 'start_addr': '129.97.150.160', + 'update_dns_on_lease_renewal': False, + 'use_authority': False, + 'use_bootfile': False, + 'use_bootserver': False, + 'use_ddns_domainname': False, + 'use_ddns_generate_hostname': True, + 'use_deny_bootp': False, + 'use_email_list': False, + 'use_enable_ddns': False, + 'use_enable_dhcp_thresholds': False, + 'use_enable_ifmap_publishing': False, + 'use_ignore_dhcp_option_list_request': False, + 'use_known_clients': False, + 'use_lease_scavenge_time': False, + 'use_nextserver': False, + 'use_options': False, + 'use_recycle_leases': False, + 'use_unknown_clients': False, + 'use_update_dns_on_lease_renewal': False + } + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + + if not data: + data = {} + if 'name' not in data: + data.update({'name': name}) + if 'start_addr' not in data: + data.update({'start_addr': start_addr}) + if 'end_addr' not in data: + data.update({'end_addr': end_addr}) + + obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) + if obj is None: + obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) + if obj is None: + obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) + + if obj: + diff = __salt__['infoblox.diff_objects'](data, obj) + if not diff: + ret['result'] = True + ret['comment'] = 'supplied fields in correct state' + return ret + if diff: + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to update record' + return ret + new_obj = __salt__['infoblox.update_object'](obj['_ref'], data=data, **api_opts) + ret['result'] = True + ret['comment'] = 'record fields updated' + ret['changes'] = {'diff': diff} + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to create record {0}'.format(name) + return ret + + new_obj_ref = __salt__['infoblox.create_ipv4_range'](data, **api_opts) + new_obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) + + ret['result'] = True + ret['comment'] = 'record created' + ret['changes'] = {'old': 'None', 'new': {'_ref': new_obj_ref, 'data': new_obj}} + return ret + + +def absent(name=None, start_addr=None, end_addr=None, data=None, **api_opts): + ''' + Ensure the range is removed + + Supplying the end of the range is optional. + + State example: + + .. code-block:: yaml + + infoblox_range.absent: + - name: 'vlan10' + + infoblox_range.absent: + - name: + - start_addr: 127.0.1.20 + ''' + ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} + + if not data: + data = {} + if 'name' not in data: + data.update({'name': name}) + if 'start_addr' not in data: + data.update({'start_addr': start_addr}) + if 'end_addr' not in data: + data.update({'end_addr': end_addr}) + + obj = __salt__['infoblox.get_ipv4_range'](data['start_addr'], data['end_addr'], **api_opts) + if obj is None: + obj = __salt__['infoblox.get_ipv4_range'](start_addr=data['start_addr'], end_addr=None, **api_opts) + if obj is None: + obj = __salt__['infoblox.get_ipv4_range'](start_addr=None, end_addr=data['end_addr'], **api_opts) + + if not obj: + ret['result'] = True + ret['comment'] = 'already deleted' + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'would attempt to delete range' + return ret + + if __salt__['infoblox.delete_object'](objref=obj['_ref']): + ret['result'] = True + ret['changes'] = {'old': 'Found {0} - {1}'.format(start_addr, end_addr), + 'new': 'Removed'} + return ret diff --git a/salt/states/ini_manage.py b/salt/states/ini_manage.py index e22202e0f1..d0ed67e8d3 100644 --- a/salt/states/ini_manage.py +++ b/salt/states/ini_manage.py @@ -14,7 +14,7 @@ Manage ini files from __future__ import absolute_import # Import Salt libs -import salt.ext.six as six +from salt.ext import six __virtualname__ = 'ini' @@ -206,7 +206,7 @@ def sections_present(name, sections=None, separator='='): ret['result'] = False ret['comment'] = "{0}".format(err) return ret - if cmp(dict(sections[section]), cur_section) == 0: + if dict(sections[section]) == cur_section: ret['comment'] += 'Section unchanged {0}.\n'.format(section) continue elif cur_section: diff --git a/salt/states/jboss7.py b/salt/states/jboss7.py index 9c5d2950da..18aa1e449a 100644 --- a/salt/states/jboss7.py +++ b/salt/states/jboss7.py @@ -44,11 +44,11 @@ import re import traceback # Import Salt libs -from salt.utils import dictdiffer +import salt.utils.dictdiffer as dictdiffer from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/jenkins.py b/salt/states/jenkins.py index 0df8492400..3e077f681d 100644 --- a/salt/states/jenkins.py +++ b/salt/states/jenkins.py @@ -13,7 +13,7 @@ import difflib import logging # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip import salt.utils.files diff --git a/salt/states/k8s.py b/salt/states/k8s.py index 772ea6e117..da52e36f76 100644 --- a/salt/states/k8s.py +++ b/salt/states/k8s.py @@ -28,7 +28,7 @@ Manage Kubernetes from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.versions __virtualname__ = 'k8s' @@ -74,7 +74,7 @@ def label_present( 'kubernetes.node_label_present. Update your SLS to use the new ' 'function name to get rid of this warning.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -109,7 +109,7 @@ def label_absent( 'kubernetes.node_label_absent. Update your SLS to use the new ' 'function name to get rid of this warning.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret @@ -145,7 +145,7 @@ def label_folder_absent( 'function name to get rid of this warning.' ) - salt.utils.warn_until('Fluorine', msg) + salt.utils.versions.warn_until('Fluorine', msg) ret.setdefault('warnings', []).append(msg) return ret diff --git a/salt/states/kubernetes.py b/salt/states/kubernetes.py index 64cd451532..952b78cbb6 100644 --- a/salt/states/kubernetes.py +++ b/salt/states/kubernetes.py @@ -73,11 +73,17 @@ The kubernetes module is used to manage different kubernetes resources. key1: value1 key2: value2 key3: value3 + +.. versionadded: 2017.7.0 ''' from __future__ import absolute_import import copy import logging + +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -400,7 +406,7 @@ def namespace_absent(name, **kwargs): if ( res['code'] == 200 or ( - isinstance(res['status'], str) and + isinstance(res['status'], six.string_types) and 'Terminating' in res['status'] ) or ( diff --git a/salt/states/ldap.py b/salt/states/ldap.py index e12ee23a0a..5ea2b96c3b 100644 --- a/salt/states/ldap.py +++ b/salt/states/ldap.py @@ -14,8 +14,9 @@ from __future__ import absolute_import import copy import inspect import logging -import salt.ext.six as six +from salt.ext import six from salt.utils.odict import OrderedDict +from salt.utils.oset import OrderedSet log = logging.getLogger(__name__) @@ -335,16 +336,16 @@ def managed(name, entries, connect_spec=None): changed_old[dn] = o changed_new[dn] = n success_dn_set[dn] = True - except ldap3.LDAPError: - log.exception('failed to %s entry %s', op, dn) - errs.append((op, dn)) + except ldap3.LDAPError as err: + log.exception('failed to %s entry %s (%s)', op, dn, err) + errs.append((op, dn, err)) continue if len(errs): ret['result'] = False ret['comment'] = 'failed to ' \ - + ', '.join((op + ' entry ' + dn - for op, dn in errs)) + + ', '.join((op + ' entry ' + dn + '(' + str(err) + ')' + for op, dn, err in errs)) # set ret['changes']. filter out any unchanged attributes, and # convert the value sets to lists before returning them to the @@ -421,7 +422,7 @@ def _process_entries(l, entries): results = __salt__['ldap3.search'](l, dn, 'base') if len(results) == 1: attrs = results[dn] - olde = dict(((attr, set(attrs[attr])) + olde = dict(((attr, OrderedSet(attrs[attr])) for attr in attrs if len(attrs[attr]))) else: @@ -478,7 +479,7 @@ def _update_entry(entry, status, directives): if len(vals): entry[attr] = vals elif directive == 'delete': - existing_vals = entry.pop(attr, set()) + existing_vals = entry.pop(attr, OrderedSet()) if len(vals): existing_vals -= vals if len(existing_vals): @@ -496,9 +497,16 @@ def _toset(thing): This enables flexibility in what users provide as the list of LDAP entry attribute values. Note that the LDAP spec prohibits - duplicate values in an attribute and that the order is - unspecified, so a set is good for automatically removing - duplicates. + duplicate values in an attribute. + + RFC 2251 states that: + "The order of attribute values within the vals set is undefined and + implementation-dependent, and MUST NOT be relied upon." + However, OpenLDAP have an X-ORDERED that is used in the config schema. + Using sets would mean we can't pass ordered values and therefore can't + manage parts of the OpenLDAP configuration, hence the use of OrderedSet. + + Sets are also good for automatically removing duplicates. None becomes an empty set. Iterables except for strings have their elements added to a new set. Non-None scalars (strings, @@ -507,12 +515,12 @@ def _toset(thing): ''' if thing is None: - return set() + return OrderedSet() if isinstance(thing, six.string_types): - return set((thing,)) + return OrderedSet((thing,)) # convert numbers to strings so that equality checks work # (LDAP stores numbers as strings) try: - return set((str(x) for x in thing)) + return OrderedSet((str(x) for x in thing)) except TypeError: - return set((str(thing),)) + return OrderedSet((str(thing),)) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index a6a54a7fcd..0f733f31c2 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -32,10 +32,13 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils +import salt.utils.path + +# Impot salt exceptions +from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __virtualname__ = 'acl' @@ -44,7 +47,7 @@ def __virtual__(): ''' Ensure getfacl & setfacl exist ''' - if salt.utils.which('getfacl') and salt.utils.which('setfacl'): + if salt.utils.path.which('getfacl') and salt.utils.path.which('setfacl'): return __virtualname__ return False @@ -57,6 +60,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): ret = {'name': name, 'result': True, 'changes': {}, + 'pchanges': {}, 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} @@ -99,21 +103,54 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): if user[_search_name]['octal'] == sum([_octal.get(i, i) for i in perms]): ret['comment'] = 'Permissions are in the desired state' else: - ret['comment'] = 'Permissions have been updated' + changes = {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': str(user[_search_name]['octal'])}} if __opts__['test']: - ret['result'] = None + ret.update({'comment': 'Updated permissions will be applied for ' + '{0}: {1} -> {2}'.format( + acl_name, + str(user[_search_name]['octal']), + perms), + 'result': None, 'pchanges': changes}) return ret - - __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse) + try: + __salt__['acl.modfacl'](acl_type, acl_name, perms, name, + recursive=recurse, raise_err=True) + ret.update({'comment': 'Updated permissions for ' + '{0}'.format(acl_name), + 'result': True, 'changes': changes}) + except CommandExecutionError as exc: + ret.update({'comment': 'Error updating permissions for ' + '{0}: {1}'.format(acl_name, exc.strerror), + 'result': False}) else: - ret['comment'] = 'Permissions will be applied' + changes = {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}} if __opts__['test']: + ret.update({'comment': 'New permissions will be applied for ' + '{0}: {1}'.format(acl_name, perms), + 'result': None, 'pchanges': changes}) ret['result'] = None return ret - __salt__['acl.modfacl'](acl_type, acl_name, perms, name, recursive=recurse) + try: + __salt__['acl.modfacl'](acl_type, acl_name, perms, name, + recursive=recurse, raise_err=True) + ret.update({'comment': 'Applied new permissions for ' + '{0}'.format(acl_name), + 'result': True, 'changes': changes}) + except CommandExecutionError as exc: + ret.update({'comment': 'Error updating permissions for {0}: ' + '{1}'.format(acl_name, exc.strerror), + 'result': False}) + else: ret['comment'] = 'ACL Type does not exist' ret['result'] = False diff --git a/salt/states/logadm.py b/salt/states/logadm.py index 8618509db7..0a1af8f1bd 100644 --- a/salt/states/logadm.py +++ b/salt/states/logadm.py @@ -22,6 +22,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.args log = logging.getLogger(__name__) @@ -60,7 +61,7 @@ def rotate(name, **kwargs): 'comment': ''} ## cleanup kwargs - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name as entryname if 'entryname' not in kwargs: diff --git a/salt/states/lvm.py b/salt/states/lvm.py index d3e256080b..ffc9054aab 100644 --- a/salt/states/lvm.py +++ b/salt/states/lvm.py @@ -27,15 +27,15 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' - if salt.utils.which('lvm'): + if salt.utils.path.which('lvm'): return 'lvm' return False diff --git a/salt/states/lxc.py b/salt/states/lxc.py index ccb7907093..e05061a6ad 100644 --- a/salt/states/lxc.py +++ b/salt/states/lxc.py @@ -8,7 +8,7 @@ from __future__ import absolute_import __docformat__ = 'restructuredtext en' # Import salt libs -import salt.utils +import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -708,7 +708,7 @@ def edited_conf(name, lxc_conf=None, lxc_conf_unset=None): # Until a reasonable alternative for this state function is created, we need # to keep this function around and cannot officially remove it. Progress of # the new function will be tracked in https://github.com/saltstack/salt/issues/35523 - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'This state is unsuitable for setting parameters that appear more ' 'than once in an LXC config file, or parameters which must appear in ' diff --git a/salt/states/mac_assistive.py b/salt/states/mac_assistive.py index 77ca78093d..8cc5dd4c90 100644 --- a/salt/states/mac_assistive.py +++ b/salt/states/mac_assistive.py @@ -12,12 +12,12 @@ Install, enable and disable assistive access on macOS minions - enabled: True ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) @@ -29,7 +29,8 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin() and _LooseVersion(__grains__['osrelease']) >= _LooseVersion('10.9'): + if salt.utils.platform.is_darwin() \ + and _LooseVersion(__grains__['osrelease']) >= _LooseVersion('10.9'): return True return False diff --git a/salt/states/mac_defaults.py b/salt/states/mac_defaults.py index 0eee644753..be4076e251 100644 --- a/salt/states/mac_defaults.py +++ b/salt/states/mac_defaults.py @@ -5,12 +5,12 @@ Writing/reading defaults from a macOS minion ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'macdefaults' @@ -20,7 +20,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return __virtualname__ return False diff --git a/salt/states/mac_keychain.py b/salt/states/mac_keychain.py index e67a99a403..e54c2d60ef 100644 --- a/salt/states/mac_keychain.py +++ b/salt/states/mac_keychain.py @@ -13,12 +13,12 @@ Install certificats to the macOS keychain ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import os -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'keychain' @@ -28,7 +28,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return __virtualname__ return False diff --git a/salt/states/mac_package.py b/salt/states/mac_package.py index ed7dae0403..4d100e65c6 100644 --- a/salt/states/mac_package.py +++ b/salt/states/mac_package.py @@ -22,15 +22,14 @@ Install any kind of pkg, dmg or app file on macOS: - target: /Applications/Xcode.app - version_check: xcodebuild -version=Xcode 7.1\n.*7B91b ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import logging import os import re -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) @@ -41,7 +40,7 @@ def __virtual__(): ''' Only work on Mac OS ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): return __virtualname__ return False diff --git a/salt/states/makeconf.py b/salt/states/makeconf.py index 0458c4aaa7..aa442ed65a 100644 --- a/salt/states/makeconf.py +++ b/salt/states/makeconf.py @@ -11,6 +11,10 @@ A state module to manage Gentoo's ``make.conf`` file makeconf.present: - value: '-j3' ''' +from __future__ import absolute_import + +# Import 3rd-party libs +from salt.ext import six def __virtual__(): @@ -27,7 +31,7 @@ def _make_set(var): if var is None: return set() if not isinstance(var, list): - if isinstance(var, str): + if isinstance(var, six.string_types): var = var.split() else: var = list(var) diff --git a/salt/states/mdadm.py b/salt/states/mdadm.py index b2e25f07ae..2b1c834087 100644 --- a/salt/states/mdadm.py +++ b/salt/states/mdadm.py @@ -23,10 +23,10 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Set up logger log = logging.getLogger(__name__) @@ -41,7 +41,7 @@ def __virtual__(): ''' if __grains__['kernel'] != 'Linux': return False - if not salt.utils.which('mdadm'): + if not salt.utils.path.which('mdadm'): return False return __virtualname__ diff --git a/salt/states/modjk.py b/salt/states/modjk.py index 90cf6cb365..8d138d580b 100644 --- a/salt/states/modjk.py +++ b/salt/states/modjk.py @@ -8,7 +8,7 @@ from __future__ import absolute_import import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/modjk_worker.py b/salt/states/modjk_worker.py index f902d99c0a..baa8d3f93d 100644 --- a/salt/states/modjk_worker.py +++ b/salt/states/modjk_worker.py @@ -19,7 +19,7 @@ Mandatory Settings: execution module :mod:`documentation ` ''' from __future__ import absolute_import -import salt.utils +import salt.utils.versions def __virtual__(): @@ -195,7 +195,7 @@ def stop(name, lbn, target, profile='default', tgt_type='glob', expr_form=None): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -229,7 +229,7 @@ def activate(name, lbn, target, profile='default', tgt_type='glob', expr_form=No # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -264,7 +264,7 @@ def disable(name, lbn, target, profile='default', tgt_type='glob', expr_form=Non # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' diff --git a/salt/states/module.py b/salt/states/module.py index 31a4355464..202999e7d8 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -262,6 +262,7 @@ def run(**kwargs): missing = [] tests = [] for func in functions: + func = func.split(':')[0] if func not in __salt__: missing.append(func) elif __opts__['test']: @@ -284,8 +285,9 @@ def run(**kwargs): failures = [] success = [] for func in functions: + _func = func.split(':')[0] try: - func_ret = _call_function(func, returner=kwargs.get('returner'), + func_ret = _call_function(_func, returner=kwargs.get('returner'), func_args=kwargs.get(func)) if not _get_result(func_ret, ret['changes'].get('ret', {})): if isinstance(func_ret, dict): @@ -313,22 +315,35 @@ def _call_function(name, returner=None, **kwargs): ''' argspec = salt.utils.args.get_function_argspec(__salt__[name]) func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code - argspec.defaults or [])) - func_args = [] - for funcset in kwargs.get('func_args') or {}: - if isinstance(funcset, dict): - func_kw.update(funcset) + argspec.defaults or [])) + arg_type, na_type, kw_type = [], {}, False + for funcset in reversed(kwargs.get('func_args') or []): + if not isinstance(funcset, dict): + kw_type = True + if kw_type: + if isinstance(funcset, dict): + arg_type += funcset.values() + na_type.update(funcset) + else: + arg_type.append(funcset) else: - func_args.append(funcset) + func_kw.update(funcset) + arg_type.reverse() + _exp_prm = len(argspec.args or []) - len(argspec.defaults or []) + _passed_prm = len(arg_type) missing = [] - for arg in argspec.args: - if arg not in func_kw: - missing.append(arg) + if na_type and _exp_prm > _passed_prm: + for arg in argspec.args: + if arg not in func_kw: + missing.append(arg) if missing: raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing))) + elif _exp_prm > _passed_prm: + raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format( + _exp_prm, _passed_prm)) - mret = __salt__[name](*func_args, **func_kw) + mret = __salt__[name](*arg_type, **func_kw) if returner is not None: returners = salt.loader.returners(__opts__, __salt__) if returner in returners: diff --git a/salt/states/mongodb_database.py b/salt/states/mongodb_database.py index 788c5c4e92..be9a66612e 100644 --- a/salt/states/mongodb_database.py +++ b/salt/states/mongodb_database.py @@ -8,7 +8,7 @@ and can be done using mongodb_user.present from __future__ import absolute_import -import salt.utils +import salt.utils.versions def absent(name, @@ -47,7 +47,7 @@ def absent(name, 'result': True, 'comment': ''} - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'mongodb_database.absent\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'mongodb.database_absent\' instead.' diff --git a/salt/states/mongodb_user.py b/salt/states/mongodb_user.py index bfa7ae1aaf..4a65cf23b7 100644 --- a/salt/states/mongodb_user.py +++ b/salt/states/mongodb_user.py @@ -9,7 +9,7 @@ Management of Mongodb users from __future__ import absolute_import -import salt.utils +import salt.utils.versions # Define the module's virtual name __virtualname__ = 'mongodb_user' @@ -85,7 +85,7 @@ def present(name, ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'mongodb_user.present\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'mongodb.user_present\' instead.' @@ -189,7 +189,7 @@ def absent(name, The database in which to authenticate ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'mongodb_user.absent\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'mongodb.user_absent\' instead.' diff --git a/salt/states/mount.py b/salt/states/mount.py index 3d9abe4fd3..b44225b92a 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -45,7 +45,7 @@ import re from salt.ext.six import string_types import logging -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/mysql_query.py b/salt/states/mysql_query.py index b9bf76ba6f..e5a1a046b4 100644 --- a/salt/states/mysql_query.py +++ b/salt/states/mysql_query.py @@ -29,7 +29,7 @@ import os.path import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index 9318e227b4..1b2ed44d56 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -109,7 +109,7 @@ def managed(name, template_mode='755', saltenv=None, template_engine='jinja', - skip_verify=True, + skip_verify=False, defaults=None, test=False, commit=True, @@ -194,10 +194,12 @@ def managed(name, - :mod:`py` - :mod:`wempy` - skip_verify: True + skip_verify: False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. + .. versionchanged:: 2017.7.1 + test: False Dry run? If set to ``True``, will apply the config, discard and return the changes. Default: ``False`` (will commit the changes on the device). diff --git a/salt/states/netntp.py b/salt/states/netntp.py index a0d8c851ca..06cbdb66fa 100644 --- a/salt/states/netntp.py +++ b/salt/states/netntp.py @@ -28,7 +28,9 @@ Dependencies from __future__ import absolute_import import logging -log = logging.getLogger(__name__) + +# Import 3rd-party libs +from salt.ext import six # import NAPALM utils import salt.utils.napalm @@ -52,6 +54,8 @@ except ImportError: __virtualname__ = 'netntp' +log = logging.getLogger(__name__) + # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- @@ -105,7 +109,7 @@ def _check(peers): return False for peer in peers: - if not isinstance(peer, str): + if not isinstance(peer, six.string_types): return False if not HAS_NETADDR: # if does not have this lib installed, will simply try to load what user specified diff --git a/salt/states/network.py b/salt/states/network.py index 1411ea6453..4eb17f59f9 100644 --- a/salt/states/network.py +++ b/salt/states/network.py @@ -255,10 +255,12 @@ all interfaces are ignored unless specified. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import difflib -import salt.utils + +# Import Salt libs import salt.utils.network +import salt.utils.platform import salt.loader # Set up logging @@ -271,7 +273,7 @@ def __virtual__(): Confine this module to non-Windows systems with the required execution module available. ''' - if not salt.utils.is_windows() and 'ip.get_interface' in __salt__: + if not salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__: return True return False diff --git a/salt/states/netyang.py b/salt/states/netyang.py index e39b09b3b9..a0f8f53bc3 100644 --- a/salt/states/netyang.py +++ b/salt/states/netyang.py @@ -38,7 +38,7 @@ except ImportError: HAS_NAPALM_YANG = False # Import salt modules -from salt.utils import fopen +import salt.utils.files import salt.utils.napalm # ------------------------------------------------------------------------------ @@ -146,7 +146,7 @@ def managed(name, if 'to_dict' not in data: data = {'to_dict': data} data = [data] - with fopen(temp_file, 'w') as file_handle: + with salt.utils.files.fopen(temp_file, 'w') as file_handle: yaml.safe_dump(json.loads(json.dumps(data)), file_handle, encoding='utf-8', allow_unicode=True) device_config = __salt__['napalm_yang.parse'](models, config=True, diff --git a/salt/states/npm.py b/salt/states/npm.py index 4c58d3ae03..a323a5ae43 100644 --- a/salt/states/npm.py +++ b/salt/states/npm.py @@ -24,7 +24,7 @@ from __future__ import absolute_import from salt.exceptions import CommandExecutionError, CommandNotFoundError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def __virtual__(): @@ -296,7 +296,7 @@ def bootstrap(name, user=None, silent=True): return ret # npm.install will return a string if it can't parse a JSON result - if isinstance(call, str): + if isinstance(call, six.string_types): ret['result'] = False ret['changes'] = call ret['comment'] = 'Could not bootstrap directory' diff --git a/salt/states/ntp.py b/salt/states/ntp.py index dbc1583883..58f235911a 100644 --- a/salt/states/ntp.py +++ b/salt/states/ntp.py @@ -17,12 +17,14 @@ This state is used to manage NTP servers. Currently only Windows is supported. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import salt libs -from salt.ext.six import string_types -import salt.utils +# Import Salt libs +import salt.utils.platform + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -32,7 +34,7 @@ def __virtual__(): ''' This only supports Windows ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False return 'ntp' @@ -41,7 +43,7 @@ def _check_servers(servers): if not isinstance(servers, list): return False for server in servers: - if not isinstance(server, string_types): + if not isinstance(server, six.string_types): return False return True diff --git a/salt/states/panos.py b/salt/states/panos.py new file mode 100644 index 0000000000..ee30325340 --- /dev/null +++ b/salt/states/panos.py @@ -0,0 +1,579 @@ +# -*- coding: utf-8 -*- +''' +A state module to manage Palo Alto network devices. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + + +About +===== +This state module was designed to handle connections to a Palo Alto based +firewall. This module relies on the Palo Alto proxy module to interface with the devices. + +This state module is designed to give extreme flexibility in the control over XPATH values on the PANOS device. It +exposes the core XML API commands and allows state modules to chain complex XPATH commands. + +Below is an example of how to construct a security rule and move to the top of the policy. This will take a config +lock to prevent execution during the operation, then remove the lock. After the XPATH has been deployed, it will +commit to the device. + +.. code-block:: yaml + + panos/takelock: + panos.add_config_lock + panos/service_tcp_22: + panos.set_config: + - xpath: /config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/service + - value: 22 + - commit: False + panos/create_rule1: + panos.set_config: + - xpath: /config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules + - value: ' + + trust + untrust + 10.0.0.1 + 10.0.1.1 + tcp-22 + any + allow + no + ' + - commit: False + panos/moveruletop: + panos.move_config: + - xpath: /config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1'] + - where: top + - commit: False + panos/removelock: + panos.remove_config_lock + panos/commit: + panos.commit + +.. seealso:: + :prox:`Palo Alto Proxy Module ` + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +log = logging.getLogger(__name__) + + +def __virtual__(): + return 'panos.commit' in __salt__ + + +def _default_ret(name): + ''' + Set the default response values. + + ''' + ret = { + 'name': name, + 'changes': {}, + 'commit': None, + 'result': False, + 'comment': '' + } + return ret + + +def add_config_lock(name): + ''' + Prevent other users from changing configuration until the lock is released. + + name: The name of the module function to execute. + + SLS Example: + + .. code-block:: yaml + + panos/takelock: + panos.add_config_lock + + ''' + ret = _default_ret(name) + + ret.update({ + 'changes': __salt__['panos.add_config_lock'](), + 'result': True + }) + + return ret + + +def clone_config(name, xpath=None, newname=None, commit=False): + ''' + Clone a specific XPATH and set it to a new name. + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to clone. + + newname(str): The new name of the XPATH clone. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/clonerule: + panos.clone_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules&from=/config/devices/ + entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1'] + - value: rule2 + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + if not newname: + return ret + + query = {'type': 'config', + 'action': 'clone', + 'xpath': xpath, + 'newname': newname} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def commit(name): + ''' + Commits the candidate configuration to the running configuration. + + name: The name of the module function to execute. + + SLS Example: + + .. code-block:: yaml + + panos/commit: + panos.commit + + ''' + ret = _default_ret(name) + + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def delete_config(name, xpath=None, commit=False): + ''' + Deletes a Palo Alto XPATH to a specific value. + + Use the xpath parameter to specify the location of the object to be deleted. + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to control. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/deletegroup: + panos.delete_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address-group/entry[@name='test'] + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + query = {'type': 'config', + 'action': 'delete', + 'xpath': xpath} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def download_software(name, version=None, synch=False, check=False): + ''' + Ensures that a software version is downloaded. + + name: The name of the module function to execute. + + version(str): The software version to check. If this version is not already downloaded, it will attempt to download + the file from Palo Alto. + + synch(bool): If true, after downloading the file it will be synched to its peer. + + check(bool): If true, the PANOS device will first attempt to pull the most recent software inventory list from Palo + Alto. + + SLS Example: + + .. code-block:: yaml + + panos/version8.0.0: + panos.download_software: + - version: 8.0.0 + - synch: False + - check: True + + ''' + ret = _default_ret(name) + + if check is True: + __salt__['panos.check_software']() + + versions = __salt__['panos.get_software_info']() + + if 'sw-updates' not in versions \ + or 'versions' not in versions['sw-updates'] \ + or 'entry' not in versions['sw-updates']['versions']: + ret.update({ + 'comment': 'Software version is not found in the local software list.', + 'result': False + }) + return ret + + for entry in versions['sw-updates']['versions']['entry']: + if entry['version'] == version and entry['downloaded'] == "yes": + ret.update({ + 'comment': 'Software version is already downloaded.', + 'result': True + }) + return ret + + ret.update({ + 'changes': __salt__['panos.download_software_version'](version=version, synch=synch) + }) + + versions = __salt__['panos.get_software_info']() + + if 'sw-updates' not in versions \ + or 'versions' not in versions['sw-updates'] \ + or 'entry' not in versions['sw-updates']['versions']: + ret.update({ + 'result': False + }) + return ret + + for entry in versions['sw-updates']['versions']['entry']: + if entry['version'] == version and entry['downloaded'] == "yes": + ret.update({ + 'result': True + }) + return ret + + return ret + + +def edit_config(name, xpath=None, value=None, commit=False): + ''' + Edits a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not + changed. + + You can replace an existing object hierarchy at a specified location in the configuration with a new value. Use + the xpath parameter to specify the location of the object, including the node to be replaced. + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to control. + + value(str): The XML value to edit. This must be a child to the XPATH. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/addressgroup: + panos.edit_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address-group/entry[@name='test'] + - value: abcxyz + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + if not value: + return ret + + query = {'type': 'config', + 'action': 'edit', + 'xpath': xpath, + 'element': value} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def move_config(name, xpath=None, where=None, dst=None, commit=False): + ''' + Moves a XPATH value to a new location. + + Use the xpath parameter to specify the location of the object to be moved, the where parameter to + specify type of move, and dst parameter to specify the destination path. + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to move. + + where(str): The type of move to execute. Valid options are after, before, top, bottom. The after and before + options will require the dst option to specify the destination of the action. The top action will move the + XPATH to the top of its structure. The botoom action will move the XPATH to the bottom of its structure. + + dst(str): Optional. Specifies the destination to utilize for a move action. This is ignored for the top + or bottom action. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/moveruletop: + panos.move_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1'] + - where: top + - commit: True + + panos/moveruleafter: + panos.move_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='rule1'] + - where: after + - dst: rule2 + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + if not where: + return ret + + if where == 'after': + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'after', + 'dst': dst} + elif where == 'before': + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'before', + 'dst': dst} + elif where == 'top': + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'top'} + elif where == 'bottom': + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'bottom'} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def remove_config_lock(name): + ''' + Release config lock previously held. + + name: The name of the module function to execute. + + SLS Example: + + .. code-block:: yaml + + panos/takelock: + panos.remove_config_lock + + ''' + ret = _default_ret(name) + + ret.update({ + 'changes': __salt__['panos.remove_config_lock'](), + 'result': True + }) + + return ret + + +def rename_config(name, xpath=None, newname=None, commit=False): + ''' + Rename a Palo Alto XPATH to a specific value. This will always rename the value even if a change is not needed. + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to control. + + newname(str): The new name of the XPATH value. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/renamegroup: + panos.rename_config: + - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address/entry[@name='old_address'] + - value: new_address + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + if not newname: + return ret + + query = {'type': 'config', + 'action': 'rename', + 'xpath': xpath, + 'newname': newname} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret + + +def set_config(name, xpath=None, value=None, commit=False): + ''' + Sets a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not + changed. + + You can add or create a new object at a specified location in the configuration hierarchy. Use the xpath parameter + to specify the location of the object in the configuration + + name: The name of the module function to execute. + + xpath(str): The XPATH of the configuration API tree to control. + + value(str): The XML value to set. This must be a child to the XPATH. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/hostname: + panos.set_config: + - xpath: /config/devices/entry[@name='localhost.localdomain']/deviceconfig/system + - value: foobar + - commit: True + + ''' + ret = _default_ret(name) + + if not xpath: + return ret + + if not value: + return ret + + query = {'type': 'config', + 'action': 'set', + 'xpath': xpath, + 'element': value} + + response = __proxy__['panos.call'](query) + + ret.update({ + 'changes': response, + 'result': True + }) + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'result': True + }) + + return ret diff --git a/salt/states/pcs.py b/salt/states/pcs.py index 7815caf21c..9d0815ade2 100644 --- a/salt/states/pcs.py +++ b/salt/states/pcs.py @@ -164,14 +164,16 @@ Create a cluster from scratch: ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import os -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files -import salt.ext.six as six +import salt.utils.path + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -180,7 +182,7 @@ def __virtual__(): ''' Only load if pcs package is installed ''' - if salt.utils.which('pcs'): + if salt.utils.path.which('pcs'): return 'pcs' return False diff --git a/salt/states/pip_state.py b/salt/states/pip_state.py index 1fc3817fa6..8460bc8293 100644 --- a/salt/states/pip_state.py +++ b/salt/states/pip_state.py @@ -25,12 +25,12 @@ import re import logging # Import salt libs -import salt.utils +import salt.utils.versions from salt.version import SaltStackVersion as _SaltStackVersion from salt.exceptions import CommandExecutionError, CommandNotFoundError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: import pip @@ -94,7 +94,7 @@ def _fulfills_version_spec(version, version_spec): for oper, spec in version_spec: if oper is None: continue - if not salt.utils.compare_versions(ver1=version, oper=oper, ver2=spec): + if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec): return False return True @@ -527,7 +527,7 @@ def installed(name, # prepro = lambda pkg: pkg if type(pkg) == str else \ # ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';'))) # pkgs = ','.join([prepro(pkg) for pkg in pkgs]) - prepro = lambda pkg: pkg if isinstance(pkg, str) else \ + prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \ ' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1])) pkgs = [prepro(pkg) for pkg in pkgs] @@ -538,7 +538,7 @@ def installed(name, if use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): ret['result'] = False ret['comment'] = ('The \'use_wheel\' option is only supported in ' @@ -550,7 +550,7 @@ def installed(name, if no_use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): ret['result'] = False ret['comment'] = ('The \'no_use_wheel\' option is only supported in ' @@ -568,7 +568,7 @@ def installed(name, repo, version=_SaltStackVersion.from_name('Lithium').formatted_version )) - salt.utils.warn_until('Lithium', msg) + salt.utils.versions.warn_until('Lithium', msg) ret.setdefault('warnings', []).append(msg) name = repo diff --git a/salt/states/pkg.py b/salt/states/pkg.py index 7526f27037..103907eab8 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -80,9 +80,11 @@ import logging import os import re -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils # Can be removed once gen_state_tag is moved import salt.utils.pkg +import salt.utils.platform +import salt.utils.versions from salt.output import nested from salt.utils import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict @@ -92,12 +94,12 @@ from salt.exceptions import ( from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError @@ -105,6 +107,7 @@ if salt.utils.is_windows(): import datetime import errno import time + from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info @@ -172,19 +175,16 @@ def _fulfills_version_spec(versions, oper, desired_version, ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper - if salt.utils.is_freebsd(): + if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: - if oper == '==': - if fnmatch.fnmatch(ver, desired_version): - return True - - elif salt.utils.compare_versions(ver1=ver, - oper=oper, - ver2=desired_version, - cmp_func=cmp_func, - ignore_epoch=ignore_epoch): + if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ + or salt.utils.versions.compare(ver1=ver, + oper=oper, + ver2=desired_version, + cmp_func=cmp_func, + ignore_epoch=ignore_epoch): return True return False @@ -504,7 +504,7 @@ def _find_install_targets(name=None, if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh @@ -517,7 +517,7 @@ def _find_install_targets(name=None, 'result': False, 'comment': exc.strerror} - if salt.utils.is_windows() and kwargs.pop('refresh', False): + if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False @@ -541,7 +541,7 @@ def _find_install_targets(name=None, else 'sources')} to_unpurge = _find_unpurge_targets(desired) else: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, @@ -1257,8 +1257,11 @@ def installed( ``NOTE:`` For :mod:`apt `, :mod:`ebuild `, - :mod:`pacman `, :mod:`yumpkg `, - and :mod:`zypper `, version numbers can be specified + :mod:`pacman `, + :mod:`winrepo `, + :mod:`yumpkg `, and + :mod:`zypper `, + version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml @@ -1434,6 +1437,15 @@ def installed( 'result': True, 'comment': 'No packages to install provided'} + # If just a name (and optionally a version) is passed, just pack them into + # the pkgs argument. + if name and not any((pkgs, sources)): + if version: + pkgs = [{name: version}] + version = None + else: + pkgs = [name] + kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if not isinstance(pkg_verify, list): @@ -1597,10 +1609,10 @@ def installed( failed_hold = None if targets or to_reinstall: force = False - if salt.utils.is_freebsd(): + if salt.utils.platform.is_freebsd(): force = True # Downgrades need to be forced. try: - pkg_ret = __salt__['pkg.install'](name, + pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, force=force, diff --git a/salt/states/ports.py b/salt/states/ports.py index 252c2410a4..1dbf2ed2c1 100644 --- a/salt/states/ports.py +++ b/salt/states/ports.py @@ -28,7 +28,7 @@ from salt.modules.freebsdports import _normalize, _options_file_exists # Needed by imported function _options_file_exists import os # pylint: disable=W0611 -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/postgres_schema.py b/salt/states/postgres_schema.py index 0f15756339..4acc52c46c 100644 --- a/salt/states/postgres_schema.py +++ b/salt/states/postgres_schema.py @@ -29,7 +29,7 @@ def __virtual__(): def present(dbname, name, - owner=None, + owner=None, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' @@ -41,8 +41,8 @@ def present(dbname, name, name The name of the schema to manage - owner - The database user that will be the owner of the schema + user + system user all operations should be performed on behalf of db_user database username if different from config or default @@ -67,7 +67,8 @@ def present(dbname, name, 'db_user': db_user, 'db_password': db_password, 'db_host': db_host, - 'db_port': db_port + 'db_port': db_port, + 'user': user } # check if schema exists @@ -105,7 +106,7 @@ def present(dbname, name, return ret -def absent(dbname, name, +def absent(dbname, name, user=None, db_user=None, db_password=None, db_host=None, db_port=None): ''' @@ -117,6 +118,9 @@ def absent(dbname, name, name The name of the schema to remove + user + system user all operations should be performed on behalf of + db_user database username if different from config or default @@ -139,7 +143,8 @@ def absent(dbname, name, 'db_user': db_user, 'db_password': db_password, 'db_host': db_host, - 'db_port': db_port + 'db_port': db_port, + 'user': user } # check if schema exists and remove it diff --git a/salt/states/postgres_tablespace.py b/salt/states/postgres_tablespace.py index 661be6812f..634814e47a 100644 --- a/salt/states/postgres_tablespace.py +++ b/salt/states/postgres_tablespace.py @@ -20,7 +20,7 @@ A module used to create and manage PostgreSQL tablespaces. from __future__ import absolute_import # Import salt libs -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate # Import 3rd-party libs from salt.ext.six import iteritems diff --git a/salt/states/postgres_user.py b/salt/states/postgres_user.py index d9f686871c..aa6cee08d4 100644 --- a/salt/states/postgres_user.py +++ b/salt/states/postgres_user.py @@ -19,7 +19,7 @@ import logging # Salt imports from salt.modules import postgres -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/proxy.py b/salt/states/proxy.py index f284d4e5ee..6c128c290e 100644 --- a/salt/states/proxy.py +++ b/salt/states/proxy.py @@ -14,13 +14,12 @@ Setup proxy settings on minions - localhost - 127.0.0.1 ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'proxy' @@ -30,7 +29,7 @@ def __virtual__(): ''' Only work on Mac OS and Windows ''' - if salt.utils.is_darwin() or salt.utils.is_windows(): + if salt.utils.platform.is_darwin() or salt.utils.platform.is_windows(): return True return False diff --git a/salt/states/rabbitmq_cluster.py b/salt/states/rabbitmq_cluster.py index e8d0ed3384..22316e99e0 100644 --- a/salt/states/rabbitmq_cluster.py +++ b/salt/states/rabbitmq_cluster.py @@ -19,6 +19,7 @@ import logging # Import salt libs import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -27,7 +28,7 @@ def __virtual__(): ''' Only load if RabbitMQ is installed. ''' - return salt.utils.which('rabbitmqctl') is not None + return salt.utils.path.which('rabbitmqctl') is not None def joined(name, host, user='rabbit', ram_node=None, runas='root'): diff --git a/salt/states/rabbitmq_policy.py b/salt/states/rabbitmq_policy.py index 37c4c8ff77..c438161a31 100644 --- a/salt/states/rabbitmq_policy.py +++ b/salt/states/rabbitmq_policy.py @@ -21,7 +21,7 @@ from __future__ import absolute_import # Import python libs import logging -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -30,7 +30,7 @@ def __virtual__(): ''' Only load if RabbitMQ is installed. ''' - return salt.utils.which('rabbitmqctl') is not None + return salt.utils.path.which('rabbitmqctl') is not None def present(name, diff --git a/salt/states/rabbitmq_user.py b/salt/states/rabbitmq_user.py index a990b5a2d2..b431a229b3 100644 --- a/salt/states/rabbitmq_user.py +++ b/salt/states/rabbitmq_user.py @@ -27,8 +27,8 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) @@ -38,7 +38,7 @@ def __virtual__(): ''' Only load if RabbitMQ is installed. ''' - return salt.utils.which('rabbitmqctl') is not None + return salt.utils.path.which('rabbitmqctl') is not None def _check_perms_changes(name, newperms, runas=None, existing=None): @@ -175,7 +175,7 @@ def present(name, if tags is not None: current_tags = _get_current_tags(name, runas=runas) - if isinstance(tags, str): + if isinstance(tags, six.string_types): tags = tags.split() # Diff the tags sets. Symmetric difference operator ^ will give us # any element in one set, but not both diff --git a/salt/states/rabbitmq_vhost.py b/salt/states/rabbitmq_vhost.py index cb8332df3b..c35c7810f5 100644 --- a/salt/states/rabbitmq_vhost.py +++ b/salt/states/rabbitmq_vhost.py @@ -20,7 +20,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.utils +import salt.utils.path log = logging.getLogger(__name__) @@ -29,7 +29,7 @@ def __virtual__(): ''' Only load if RabbitMQ is installed. ''' - return salt.utils.which('rabbitmqctl') is not None + return salt.utils.path.which('rabbitmqctl') is not None def present(name): diff --git a/salt/states/rbenv.py b/salt/states/rbenv.py index f4889be8a2..50a66e1ef4 100644 --- a/salt/states/rbenv.py +++ b/salt/states/rbenv.py @@ -77,7 +77,7 @@ def _ruby_installed(ret, ruby, user=None): for version in __salt__['rbenv.versions'](user): if version == ruby: ret['result'] = True - ret['comment'] = 'Requested ruby exists.' + ret['comment'] = 'Requested ruby exists' ret['default'] = default == ruby break @@ -97,7 +97,7 @@ def _check_and_install_ruby(ret, ruby, default=False, user=None): ret['default'] = default else: ret['result'] = False - ret['comment'] = 'Could not install ruby.' + ret['comment'] = 'Failed to install ruby' return ret if default: @@ -131,7 +131,11 @@ def installed(name, default=False, user=None): name = re.sub(r'^ruby-', '', name) if __opts__['test']: - ret['comment'] = 'Ruby {0} is set to be installed'.format(name) + ret = _ruby_installed(ret, name, user=user) + if not ret['result']: + ret['comment'] = 'Ruby {0} is set to be installed'.format(name) + else: + ret['comment'] = 'Ruby {0} is already installed'.format(name) return ret rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user) @@ -188,16 +192,22 @@ def absent(name, user=None): if name.startswith('ruby-'): name = re.sub(r'^ruby-', '', name) - if __opts__['test']: - ret['comment'] = 'Ruby {0} is set to be uninstalled'.format(name) - return ret - ret = _check_rbenv(ret, user) if ret['result'] is False: ret['result'] = True ret['comment'] = 'Rbenv not installed, {0} not either'.format(name) return ret else: + if __opts__['test']: + ret = _ruby_installed(ret, name, user=user) + if ret['result']: + ret['result'] = None + ret['comment'] = 'Ruby {0} is set to be uninstalled'.format(name) + else: + ret['result'] = True + ret['comment'] = 'Ruby {0} is already uninstalled'.format(name) + return ret + return _check_and_uninstall_ruby(ret, name, user=user) @@ -205,7 +215,6 @@ def _check_and_install_rbenv(ret, user=None): ''' Verify that rbenv is installed, install if unavailable ''' - ret = _check_rbenv(ret, user) if ret['result'] is False: if __salt__['rbenv.install'](user): @@ -216,7 +225,7 @@ def _check_and_install_rbenv(ret, user=None): ret['comment'] = 'Rbenv failed to install' else: ret['result'] = True - ret['comment'] = 'Rbenv already installed' + ret['comment'] = 'Rbenv is already installed' return ret @@ -237,7 +246,13 @@ def install_rbenv(name, user=None): ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if __opts__['test']: - ret['comment'] = 'Rbenv is set to be installed' + ret = _check_rbenv(ret, user=user) + if ret['result'] is False: + ret['result'] = None + ret['comment'] = 'Rbenv is set to be installed' + else: + ret['result'] = True + ret['comment'] = 'Rbenv is already installed' return ret return _check_and_install_rbenv(ret, user) diff --git a/salt/states/rsync.py b/salt/states/rsync.py index eb72a0d8ea..01d8711f2b 100644 --- a/salt/states/rsync.py +++ b/salt/states/rsync.py @@ -28,9 +28,10 @@ State to synchronize files and directories with rsync. ''' from __future__ import absolute_import -import salt.utils import os +import salt.utils.path + def __virtual__(): ''' @@ -38,7 +39,7 @@ def __virtual__(): :return: ''' - return salt.utils.which('rsync') and 'rsync' or False + return salt.utils.path.which('rsync') and 'rsync' or False def _get_summary(rsync_out): diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index f891b6f56c..fd7a9139d8 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -30,10 +30,10 @@ import time # Import salt libs import salt.syspaths -import salt.utils +import salt.utils # Can be removed once check_state_result is moved import salt.utils.event -import salt.ext.six as six -from salt.ext.six import string_types +import salt.utils.versions +from salt.ext import six log = logging.getLogger(__name__) @@ -227,7 +227,7 @@ def state(name, # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -309,7 +309,7 @@ def state(name, if fail_minions is None: fail_minions = () - elif isinstance(fail_minions, string_types): + elif isinstance(fail_minions, six.string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): state_ret.setdefault('warnings', []).append( @@ -466,7 +466,7 @@ def function( 'result': True} if kwarg is None: kwarg = {} - if isinstance(arg, str): + if isinstance(arg, six.string_types): func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. ' 'Modifying in place, but please update SLS file ' 'to remove this warning.'] @@ -477,7 +477,7 @@ def function( # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -527,7 +527,7 @@ def function( if fail_minions is None: fail_minions = () - elif isinstance(fail_minions, string_types): + elif isinstance(fail_minions, six.string_types): fail_minions = [minion.strip() for minion in fail_minions.split(',')] elif not isinstance(fail_minions, list): func_ret.setdefault('warnings', []).append( diff --git a/salt/states/selinux.py b/salt/states/selinux.py index 93e45bfa2a..8187ea8338 100644 --- a/salt/states/selinux.py +++ b/salt/states/selinux.py @@ -171,8 +171,14 @@ def boolean(name, value, persist=False): name, rvalue) return ret - if __salt__['selinux.setsebool'](name, rvalue, persist): + ret['result'] = __salt__['selinux.setsebool'](name, rvalue, persist) + if ret['result']: ret['comment'] = 'Boolean {0} has been set to {1}'.format(name, rvalue) + ret['changes'].update({'State': {'old': bools[name]['State'], + 'new': rvalue}}) + if persist and not default: + ret['changes'].update({'Default': {'old': bools[name]['Default'], + 'new': rvalue}}) return ret ret['comment'] = 'Failed to set the boolean {0} to {1}'.format(name, rvalue) return ret diff --git a/salt/states/serverdensity_device.py b/salt/states/serverdensity_device.py index a2395a2bf9..a2427e2de8 100644 --- a/salt/states/serverdensity_device.py +++ b/salt/states/serverdensity_device.py @@ -53,7 +53,7 @@ import json import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six import json # TODO: diff --git a/salt/states/service.py b/salt/states/service.py index 895d6b4960..0b0850d9a3 100644 --- a/salt/states/service.py +++ b/salt/states/service.py @@ -57,18 +57,18 @@ service, then set the reload value to True: :ref:`Requisites ` documentation. ''' - # Import Python libs from __future__ import absolute_import import time # Import Salt libs import salt.utils +import salt.utils.platform from salt.utils.args import get_function_argspec as _argspec from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six SYSTEMD_ONLY = ('no_block', 'unmask', 'unmask_runtime') @@ -432,7 +432,7 @@ def running(name, ret['comment'] = 'Service {0} is set to start'.format(name) return ret - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if enable is True: ret.update(_enable(name, False, result=False, **kwargs)) @@ -900,7 +900,7 @@ def mod_watch(name, try: result = func(name, **func_kwargs) except CommandExecutionError as exc: - ret['result'] = True + ret['result'] = False ret['comment'] = exc.strerror return ret diff --git a/salt/states/solrcloud.py b/salt/states/solrcloud.py index 7e738d2c6f..6fc969af27 100644 --- a/salt/states/solrcloud.py +++ b/salt/states/solrcloud.py @@ -11,7 +11,7 @@ from __future__ import absolute_import import json # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six def alias(name, collections, **kwargs): diff --git a/salt/states/sqlite3.py b/salt/states/sqlite3.py index 8251e04e71..98fe38a6ef 100644 --- a/salt/states/sqlite3.py +++ b/salt/states/sqlite3.py @@ -96,7 +96,7 @@ can be approximated with sqlite3's module functions and module.run: from __future__ import absolute_import # Import Salt libs -import salt.ext.six as six +from salt.ext import six try: import sqlite3 @@ -160,9 +160,13 @@ def row_absent(name, db, table, where_sql, where_args=None): changes['changes']['old'] = rows[0] else: - cursor = conn.execute("DELETE FROM `" + - table + "` WHERE " + where_sql, - where_args) + if where_args is None: + cursor = conn.execute("DELETE FROM `" + + table + "` WHERE " + where_sql) + else: + cursor = conn.execute("DELETE FROM `" + + table + "` WHERE " + where_sql, + where_args) conn.commit() if cursor.rowcount == 1: changes['result'] = True @@ -406,7 +410,7 @@ def table_present(name, db, schema, force=False): if len(tables) == 1: sql = None - if isinstance(schema, str): + if isinstance(schema, six.string_types): sql = schema.strip() else: sql = _get_sql_from_schema(name, schema) @@ -437,7 +441,7 @@ def table_present(name, db, schema, force=False): elif len(tables) == 0: # Create the table sql = None - if isinstance(schema, str): + if isinstance(schema, six.string_types): sql = schema else: sql = _get_sql_from_schema(name, schema) diff --git a/salt/states/ssh_auth.py b/salt/states/ssh_auth.py index 4db27e55aa..aa78786033 100644 --- a/salt/states/ssh_auth.py +++ b/salt/states/ssh_auth.py @@ -52,7 +52,7 @@ import re import sys # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def _present_test(user, name, enc, comment, options, source, config, fingerprint_hash_type): diff --git a/salt/states/ssh_known_hosts.py b/salt/states/ssh_known_hosts.py index 0fe24cb813..1b35732110 100644 --- a/salt/states/ssh_known_hosts.py +++ b/salt/states/ssh_known_hosts.py @@ -20,12 +20,12 @@ Manage the information stored in the known_hosts files. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import os -# Import salt libs +# Import Salt libs +import salt.utils.platform from salt.exceptions import CommandNotFoundError -import salt.utils # Define the state's virtual name __virtualname__ = 'ssh_known_hosts' @@ -35,7 +35,7 @@ def __virtual__(): ''' Does not work on Windows, requires ssh module functions ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return False, 'ssh_known_hosts: Does not support Windows' return __virtualname__ diff --git a/salt/states/stormpath_account.py b/salt/states/stormpath_account.py deleted file mode 100644 index a23f2b057d..0000000000 --- a/salt/states/stormpath_account.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Support for Stormpath. - -.. versionadded:: 2015.8.0 -''' - -# Import python libs -from __future__ import absolute_import -import pprint - - -def __virtual__(): - ''' - Only load if the stormpath module is available in __salt__ - ''' - return 'stormpath.create_account' in __salt__ - - -def present(name, **kwargs): - ''' - Ensure that an account is present and properly configured - - name - The email address associated with the Stormpath account - - directory_id - The ID of a directory which the account belongs to. Required. - - password - Required when creating a new account. If specified, it is advisable to - reference the password in another database using an ``sdb://`` URL. - Will NOT update the password if an account already exists. - - givenName - Required when creating a new account. - - surname - Required when creating a new account. - - username - Optional. Must be unique across the owning directory. If not specified, - the username will default to the email field. - - middleName - Optional. - - status - ``enabled`` accounts are able to login to their assigned applications, - ``disabled`` accounts may not login to applications, ``unverified`` - accounts are disabled and have not verified their email address. - - customData. - Optional. Must be specified as a dict. - ''' - # Because __opts__ is not available outside of functions - backend = __opts__.get('backend', False) - if not backend: - backend = 'requests' - - if backend == 'requests': - from requests.exceptions import HTTPError - elif backend == 'urrlib2': - from urllib2 import HTTPError - else: - from tornado.httpclient import HTTPError - - ret = {'name': name, - 'changes': {}, - 'result': None, - 'comment': ''} - info = {} - try: - result = __salt__['stormpath.show_account'](email=name, **kwargs) - if len(result['items']) > 0: - info = result['items'][0] - except HTTPError: - pass - needs_update = {} - if info.get('email', False): - for field in kwargs: - if info.get(field, None) != kwargs[field]: - needs_update[field] = kwargs[field] - del needs_update['directory_id'] - if 'password' in needs_update: - del needs_update['password'] - if len(needs_update.keys()) < 1: - ret['result'] = True - ret['comment'] = 'Stormpath account {0} already exists and is correct'.format(name) - return ret - if __opts__['test']: - if len(needs_update.keys()) < 1: - ret['comment'] = 'Stormpath account {0} needs to be created'.format(name) - else: - if 'password' in needs_update: - needs_update['password'] = '**HIDDEN**' - ret['comment'] = ('Stormpath account {0} needs the following ' - 'fields to be updated: '.format(', '.join(needs_update))) - return ret - if len(needs_update.keys()) < 1: - info = __salt__['stormpath.create_account'](email=name, **kwargs) - comps = info['href'].split('/') - account_id = comps[-1] - ret['changes'] = info - ret['result'] = True - kwargs['password'] = '**HIDDEN**' - ret['comment'] = 'Created account ID {0} ({1}): {2}'.format( - account_id, name, pprint.pformat(kwargs)) - return ret - comps = info['href'].split('/') - account_id = comps[-1] - result = __salt__['stormpath.update_account'](account_id, items=needs_update) - if result.get('href', None): - ret['changes'] = needs_update - ret['result'] = True - if 'password' in needs_update: - needs_update['password'] = '**HIDDEN**' - ret['comment'] = 'Set the following fields for account ID {0} ({1}): {2}'.format( - account_id, name, pprint.pformat(needs_update)) - return ret - else: - ret['result'] = False - ret['comment'] = 'Failed to set the following fields for account ID {0} ({1}): {2}'.format( - account_id, name, pprint.pformat(needs_update)) - return ret - - -def absent(name, directory_id=None): - ''' - Ensure that an account associated with the given email address is absent. - Will search all directories for the account, unless a directory_id is - specified. - - name - The email address of the account to delete. - - directory_id - Optional. The ID of the directory that the account is expected to belong - to. If not specified, then a list of directories will be retrieved, and - each will be scanned for the account. Specifying a directory_id will - therefore cut down on the number of requests to Stormpath, and increase - performance of this state. - ''' - # Because __opts__ is not available outside of functions - backend = __opts__.get('backend', False) - if not backend: - backend = 'requests' - - if backend == 'requests': - from requests.exceptions import HTTPError - elif backend == 'urrlib2': - from urllib2 import HTTPError - else: - from tornado.httpclient import HTTPError - - ret = {'name': name, - 'changes': {}, - 'result': None, - 'comment': ''} - info = {} - if directory_id is None: - dirs = __salt__['stormpath.list_directories']() - for dir_ in dirs.get('items', []): - try: - comps = dir_.get('href', '').split('/') - directory_id = comps[-1] - info = __salt__['stormpath.show_account'](email=name, directory_id=directory_id) - if len(info.get('items', [])) > 0: - info = info['items'][0] - break - except HTTPError: - pass - else: - info = __salt__['stormpath.show_account'](email=name, directory_id=directory_id) - info = info['items'][0] - if 'items' in info: - ret['result'] = True - ret['comment'] = 'Stormpath account {0} already absent'.format(name) - return ret - if __opts__['test']: - ret['comment'] = 'Stormpath account {0} needs to be deleted'.format(name) - return ret - comps = info['href'].split('/') - account_id = comps[-1] - if __salt__['stormpath.delete_account'](account_id): - ret['changes'] = {'deleted': account_id} - ret['result'] = True - ret['comment'] = 'Stormpath account {0} was deleted'.format(name) - return ret - else: - ret['result'] = False - ret['comment'] = 'Failed to delete Stormpath account {0}'.format(name) - return ret diff --git a/salt/states/supervisord.py b/salt/states/supervisord.py index c886d3f699..c114b864d7 100644 --- a/salt/states/supervisord.py +++ b/salt/states/supervisord.py @@ -16,7 +16,7 @@ from __future__ import absolute_import # Import python libs import logging -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/states/sysrc.py b/salt/states/sysrc.py index 5390b4f3d3..223bf56db3 100644 --- a/salt/states/sysrc.py +++ b/salt/states/sysrc.py @@ -4,7 +4,7 @@ from __future__ import absolute_import # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # define the module's virtual name __virtualname__ = 'sysrc' diff --git a/salt/states/testinframod.py b/salt/states/testinframod.py index 0350f4d385..a31a72c3e0 100644 --- a/salt/states/testinframod.py +++ b/salt/states/testinframod.py @@ -52,8 +52,12 @@ def _to_snake_case(pascal_case): def _generate_functions(): - for module in modules.__all__: - module_name = _to_snake_case(module) + try: + modules_ = [_to_snake_case(module_) for module_ in modules.__all__] + except AttributeError: + modules_ = [module_ for module_ in modules.modules] + + for module_name in modules_: func_name = 'testinfra.{0}'.format(module_name) __all__.append(module_name) log.debug('Generating state for module %s as function %s', diff --git a/salt/states/trafficserver.py b/salt/states/trafficserver.py index 13b15da7b4..8d0ec4354a 100644 --- a/salt/states/trafficserver.py +++ b/salt/states/trafficserver.py @@ -10,7 +10,7 @@ Control Apache Traffic Server from __future__ import absolute_import # Import Salt libs -import salt.utils +import salt.utils.versions def __virtual__(): @@ -260,7 +260,7 @@ def set_var(name, value): - value: cdn.site.domain.tld ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'The \'set_var\' function has been deprecated and will be removed in Salt ' '{version}. Please use \'trafficserver.config\' instead.' diff --git a/salt/states/user.py b/salt/states/user.py index 8a731cc2a1..027b587802 100644 --- a/salt/states/user.py +++ b/salt/states/user.py @@ -23,14 +23,14 @@ as either absent or present testuser: user.absent ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import os import logging -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.platform from salt.utils.locales import sdecode, sdecode_if_string # Import 3rd-party libs @@ -146,7 +146,7 @@ def _changes(name, change['warndays'] = warndays if expire and lshad['expire'] != expire: change['expire'] = expire - elif 'shadow.info' in __salt__ and salt.utils.is_windows(): + elif 'shadow.info' in __salt__ and salt.utils.platform.is_windows(): if expire and expire is not -1 and salt.utils.date_format(lshad['expire']) != salt.utils.date_format(expire): change['expire'] = expire @@ -633,7 +633,7 @@ def present(name, # Setup params specific to Linux and Windows to be passed to the # add.user function - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): params = {'name': name, 'uid': uid, 'gid': gid, @@ -667,8 +667,8 @@ def present(name, # pwd incorrectly reports presence of home ret['changes']['home'] = '' if 'shadow.info' in __salt__ \ - and not salt.utils.is_windows()\ - and not salt.utils.is_darwin(): + and not salt.utils.platform.is_windows() \ + and not salt.utils.platform.is_darwin(): if password and not empty_password: __salt__['shadow.set_password'](name, password) spost = __salt__['shadow.info'](name) @@ -740,7 +740,7 @@ def present(name, ' {1}'.format(name, expire) ret['result'] = False ret['changes']['expire'] = expire - elif salt.utils.is_windows(): + elif salt.utils.platform.is_windows(): if password and not empty_password: if not __salt__['user.setpassword'](name, password): ret['comment'] = 'User {0} created but failed to set' \ @@ -757,7 +757,7 @@ def present(name, ' {1}'.format(name, expire) ret['result'] = False ret['changes']['expiration_date'] = spost['expire'] - elif salt.utils.is_darwin() and password and not empty_password: + elif salt.utils.platform.is_darwin() and password and not empty_password: if not __salt__['shadow.set_password'](name, password): ret['comment'] = 'User {0} created but failed to set' \ ' password to' \ diff --git a/salt/states/virt.py b/salt/states/virt.py index 260b63bf93..c028667470 100644 --- a/salt/states/virt.py +++ b/salt/states/virt.py @@ -13,10 +13,9 @@ for the generation and signing of certificates for systems running libvirt: ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import fnmatch import os -from salt.ext import six try: import libvirt # pylint: disable=import-error @@ -24,11 +23,14 @@ try: except ImportError: HAS_LIBVIRT = False -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.args import salt.utils.files from salt.exceptions import CommandExecutionError +# Import 3rd-party libs +from salt.ext import six + __virtualname__ = 'virt' @@ -228,7 +230,7 @@ def running(name, **kwargs): 'comment': '{0} is running'.format(name) } - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) cpu = kwargs.pop('cpu', False) mem = kwargs.pop('mem', False) image = kwargs.pop('image', False) @@ -241,7 +243,7 @@ def running(name, **kwargs): ret['changes'][name] = 'Domain started' ret['comment'] = 'Domain {0} started'.format(name) except CommandExecutionError: - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) __salt__['virt.init'](name, cpu=cpu, mem=mem, image=image, **kwargs) ret['changes'][name] = 'Domain defined and started' ret['comment'] = 'Domain {0} defined and started'.format(name) diff --git a/salt/states/virtualenv_mod.py b/salt/states/virtualenv_mod.py index 81ee1707e8..364699051f 100644 --- a/salt/states/virtualenv_mod.py +++ b/salt/states/virtualenv_mod.py @@ -6,17 +6,20 @@ Setup of Python virtualenv sandboxes. ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import os -# Import salt libs +# Import Salt libs import salt.version -import salt.utils - +import salt.utils # Can be removed once alias_function is moved +import salt.utils.platform +import salt.utils.versions from salt.exceptions import CommandExecutionError, CommandNotFoundError +# Import 3rd-party libs from salt.ext import six + log = logging.getLogger(__name__) # Define the module's virtual name @@ -134,7 +137,7 @@ def managed(name, ret['comment'] = 'Virtualenv was not detected on this system' return ret - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): venv_py = os.path.join(name, 'Scripts', 'python.exe') else: venv_py = os.path.join(name, 'bin', 'python') @@ -225,7 +228,7 @@ def managed(name, if use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env=name) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): ret['result'] = False ret['comment'] = ('The \'use_wheel\' option is only supported in ' @@ -236,7 +239,7 @@ def managed(name, if no_use_wheel: min_version = '1.4' cur_version = __salt__['pip.version'](bin_env=name) - if not salt.utils.compare_versions(ver1=cur_version, oper='>=', + if not salt.utils.versions.compare(ver1=cur_version, oper='>=', ver2=min_version): ret['result'] = False ret['comment'] = ('The \'no_use_wheel\' option is only supported ' diff --git a/salt/states/win_certutil.py b/salt/states/win_certutil.py index ea65622b37..94f5ba95d5 100644 --- a/salt/states/win_certutil.py +++ b/salt/states/win_certutil.py @@ -11,13 +11,12 @@ Install certificates to the Windows Certificate Manager certutil.add_store: - store: TrustedPublisher ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import logging -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "certutil" @@ -27,7 +26,7 @@ def __virtual__(): ''' Only work on Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return False diff --git a/salt/states/win_dism.py b/salt/states/win_dism.py index 4b73d4e711..c7575f1b10 100644 --- a/salt/states/win_dism.py +++ b/salt/states/win_dism.py @@ -15,12 +15,13 @@ Install windows features/capabilties with DISM ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging import os -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = "dism" @@ -30,7 +31,7 @@ def __virtual__(): ''' Only work on Windows where the DISM module is available ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'Module only available on Windows' return __virtualname__ diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index 614236afc1..69d35e5c4a 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -495,6 +495,7 @@ def container_setting(name, container, settings=None): processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword + processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: diff --git a/salt/states/win_lgpo.py b/salt/states/win_lgpo.py index 96e6ff729d..fcedaae010 100644 --- a/salt/states/win_lgpo.py +++ b/salt/states/win_lgpo.py @@ -112,7 +112,7 @@ import json import salt.utils.dictdiffer # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) __virtualname__ = 'lgpo' diff --git a/salt/states/win_license.py b/salt/states/win_license.py index 34d7a7a0db..3f2238c2f0 100644 --- a/salt/states/win_license.py +++ b/salt/states/win_license.py @@ -10,13 +10,12 @@ Install and activate windows licenses XXXXX-XXXXX-XXXXX-XXXXX-XXXXX: license.activate ''' - -# Import python libs +# Import Python libs from __future__ import absolute_import import logging # Import Salt Libs -import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'license' @@ -26,7 +25,7 @@ def __virtual__(): ''' Only work on Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return __virtualname__ return False diff --git a/salt/states/win_network.py b/salt/states/win_network.py index 63710658a7..c68a5d0a00 100644 --- a/salt/states/win_network.py +++ b/salt/states/win_network.py @@ -61,11 +61,12 @@ default gateway using the ``gateway`` parameter: ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.platform import salt.utils.validate.net from salt.ext.six.moves import range from salt.exceptions import CommandExecutionError @@ -84,7 +85,7 @@ def __virtual__(): Confine this module to Windows systems with the required execution module available. ''' - if salt.utils.is_windows() and 'ip.get_interface' in __salt__: + if salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__: return __virtualname__ return False diff --git a/salt/states/win_servermanager.py b/salt/states/win_servermanager.py index 76f2ecdad2..b50a180f35 100644 --- a/salt/states/win_servermanager.py +++ b/salt/states/win_servermanager.py @@ -12,6 +12,7 @@ from __future__ import absolute_import # Import salt modules import salt.utils +import salt.utils.versions def __virtual__(): @@ -113,7 +114,7 @@ def installed(name, - Web-Service ''' if 'force' in kwargs: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Flourine', 'Parameter \'force\' has been detected in the argument list. This' 'parameter is no longer used and has been replaced by \'recurse\'' diff --git a/salt/states/win_smtp_server.py b/salt/states/win_smtp_server.py index a6591e255e..79a2470c76 100644 --- a/salt/states/win_smtp_server.py +++ b/salt/states/win_smtp_server.py @@ -7,10 +7,10 @@ Module for managing IIS SMTP server configuration on Windows servers. from __future__ import absolute_import # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import salt libs -import salt.utils +import salt.utils.args _DEFAULT_SERVER = 'SmtpSvc/1' @@ -39,7 +39,7 @@ def _normalize_server_settings(**settings): Convert setting values that has been improperly converted to a dict back to a string. ''' ret = dict() - settings = salt.utils.clean_kwargs(**settings) + settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): diff --git a/salt/states/win_system.py b/salt/states/win_system.py index 5e11dfbe28..59a6d23fcd 100644 --- a/salt/states/win_system.py +++ b/salt/states/win_system.py @@ -16,14 +16,14 @@ description. This is Erik's computer, don't touch!: system.computer_desc: [] ''' - from __future__ import absolute_import -# Import python libs +# Import Python libs import logging -# Import salt libs +# Import Salt libs import salt.utils +import salt.utils.platform log = logging.getLogger(__name__) @@ -35,7 +35,7 @@ def __virtual__(): ''' This only supports Windows ''' - if salt.utils.is_windows() and 'system.get_computer_desc' in __salt__: + if salt.utils.platform.is_windows() and 'system.get_computer_desc' in __salt__: return __virtualname__ return False diff --git a/salt/states/win_update.py b/salt/states/win_update.py index fca29fe252..327a144932 100644 --- a/salt/states/win_update.py +++ b/salt/states/win_update.py @@ -71,7 +71,7 @@ import logging # Import 3rd-party libs # pylint: disable=import-error -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=redefined-builtin try: import win32com.client @@ -81,8 +81,9 @@ except ImportError: HAS_DEPENDENCIES = False # pylint: enable=import-error -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform +import salt.utils.versions log = logging.getLogger(__name__) @@ -91,7 +92,7 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.is_windows() and HAS_DEPENDENCIES: + if salt.utils.platform.is_windows() and HAS_DEPENDENCIES: return True return False @@ -465,7 +466,7 @@ def installed(name, categories=None, skips=None, retries=10): deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \ 'removed in Salt Fluorine. Please use the \'win_wua\' ' \ 'module instead.' - salt.utils.warn_until('Fluorine', deprecation_msg) + salt.utils.versions.warn_until('Fluorine', deprecation_msg) ret.setdefault('warnings', []).append(deprecation_msg) if not categories: categories = [name] @@ -549,7 +550,7 @@ def downloaded(name, categories=None, skips=None, retries=10): deprecation_msg = 'The \'win_update\' module is deprecated, and will be ' \ 'removed in Salt Fluorine. Please use the \'win_wua\' ' \ 'module instead.' - salt.utils.warn_until('Fluorine', deprecation_msg) + salt.utils.versions.warn_until('Fluorine', deprecation_msg) ret.setdefault('warnings', []).append(deprecation_msg) if not categories: diff --git a/salt/states/win_wua.py b/salt/states/win_wua.py index 835dea28ce..ab43b65654 100644 --- a/salt/states/win_wua.py +++ b/salt/states/win_wua.py @@ -56,6 +56,7 @@ import logging # Import Salt libs from salt.ext import six import salt.utils +import salt.utils.platform import salt.utils.win_update log = logging.getLogger(__name__) @@ -67,7 +68,7 @@ def __virtual__(): ''' Only valid on Windows machines ''' - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): return False, 'WUA: Only available on Window systems' if not salt.utils.win_update.HAS_PYWIN32: diff --git a/salt/states/x509.py b/salt/states/x509.py index bcd08972b7..58fbabac34 100644 --- a/salt/states/x509.py +++ b/salt/states/x509.py @@ -166,7 +166,7 @@ import salt.exceptions import salt.utils # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: from M2Crypto.RSA import RSAError diff --git a/salt/states/zabbix_usergroup.py b/salt/states/zabbix_usergroup.py index a5d68b1546..292a2aafc9 100644 --- a/salt/states/zabbix_usergroup.py +++ b/salt/states/zabbix_usergroup.py @@ -84,7 +84,7 @@ def present(name, **kwargs): for right in kwargs['rights']: for key in right: right[key] = str(right[key]) - if cmp(sorted(kwargs['rights']), sorted(usergroup['rights'])) != 0: + if sorted(kwargs['rights']) != sorted(usergroup['rights']): update_rights = True else: update_rights = True diff --git a/salt/states/zone.py b/salt/states/zone.py index 22900f89c5..02a324111e 100644 --- a/salt/states/zone.py +++ b/salt/states/zone.py @@ -116,6 +116,7 @@ import logging # Import Salt libs import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.atomicfile from salt.modules.zonecfg import _parse_value, _zonecfg_resource_default_selectors @@ -284,7 +285,7 @@ def resource_present(name, resource_type, resource_selector_property, resource_s 'comment': ''} # sanitize input - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) resource_selector_value = _parse_value(resource_selector_value) for k, v in kwargs.items(): kwargs[k] = _parse_value(kwargs[k]) diff --git a/salt/syspaths.py b/salt/syspaths.py index f39380b170..95efc7246b 100644 --- a/salt/syspaths.py +++ b/salt/syspaths.py @@ -22,6 +22,9 @@ from __future__ import absolute_import import sys import os.path +# Import Salt libs +from salt.utils.locales import sdecode + __PLATFORM = sys.platform.lower() @@ -31,129 +34,129 @@ try: import salt._syspaths as __generated_syspaths # pylint: disable=no-name-in-module except ImportError: import types - __generated_syspaths = types.ModuleType('salt._syspaths') - for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR', - 'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', - 'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR', - 'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR', - 'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH', - 'SHARE_DIR'): + __generated_syspaths = types.ModuleType('salt._syspaths') # future lint: disable=non-unicode-string + for key in (u'ROOT_DIR', u'CONFIG_DIR', u'CACHE_DIR', u'SOCK_DIR', + u'SRV_ROOT_DIR', u'BASE_FILE_ROOTS_DIR', + u'BASE_PILLAR_ROOTS_DIR', u'BASE_THORIUM_ROOTS_DIR', + u'BASE_MASTER_ROOTS_DIR', u'LOGS_DIR', u'PIDFILE_DIR', + u'SPM_FORMULA_PATH', u'SPM_PILLAR_PATH', u'SPM_REACTOR_PATH', + u'SHARE_DIR'): setattr(__generated_syspaths, key, None) # Let's find out the path of this module -if 'SETUP_DIRNAME' in globals(): +if u'SETUP_DIRNAME' in globals(): # This is from the exec() call in Salt's setup.py - __THIS_FILE = os.path.join(SETUP_DIRNAME, 'salt', 'syspaths.py') # pylint: disable=E0602 + __THIS_FILE = os.path.join(SETUP_DIRNAME, u'salt', u'syspaths.py') # pylint: disable=E0602 else: __THIS_FILE = __file__ # These values are always relative to salt's installation directory INSTALL_DIR = os.path.dirname(os.path.realpath(__THIS_FILE)) -CLOUD_DIR = os.path.join(INSTALL_DIR, 'cloud') -BOOTSTRAP = os.path.join(CLOUD_DIR, 'deploy', 'bootstrap-salt.sh') +CLOUD_DIR = os.path.join(INSTALL_DIR, u'cloud') +BOOTSTRAP = os.path.join(CLOUD_DIR, u'deploy', u'bootstrap-salt.sh') ROOT_DIR = __generated_syspaths.ROOT_DIR if ROOT_DIR is None: # The installation time value was not provided, let's define the default - if __PLATFORM.startswith('win'): - ROOT_DIR = r'c:\salt' + if __PLATFORM.startswith(u'win'): + ROOT_DIR = sdecode(r'c:\salt') # future lint: disable=non-unicode-string else: - ROOT_DIR = '/' + ROOT_DIR = u'/' CONFIG_DIR = __generated_syspaths.CONFIG_DIR if CONFIG_DIR is None: - if __PLATFORM.startswith('win'): - CONFIG_DIR = os.path.join(ROOT_DIR, 'conf') - elif 'freebsd' in __PLATFORM: - CONFIG_DIR = os.path.join(ROOT_DIR, 'usr', 'local', 'etc', 'salt') - elif 'netbsd' in __PLATFORM: - CONFIG_DIR = os.path.join(ROOT_DIR, 'usr', 'pkg', 'etc', 'salt') - elif 'sunos5' in __PLATFORM: - CONFIG_DIR = os.path.join(ROOT_DIR, 'opt', 'local', 'etc', 'salt') + if __PLATFORM.startswith(u'win'): + CONFIG_DIR = os.path.join(ROOT_DIR, u'conf') + elif u'freebsd' in __PLATFORM: + CONFIG_DIR = os.path.join(ROOT_DIR, u'usr', u'local', u'etc', u'salt') + elif u'netbsd' in __PLATFORM: + CONFIG_DIR = os.path.join(ROOT_DIR, u'usr', u'pkg', u'etc', u'salt') + elif u'sunos5' in __PLATFORM: + CONFIG_DIR = os.path.join(ROOT_DIR, u'opt', u'local', u'etc', u'salt') else: - CONFIG_DIR = os.path.join(ROOT_DIR, 'etc', 'salt') + CONFIG_DIR = os.path.join(ROOT_DIR, u'etc', u'salt') SHARE_DIR = __generated_syspaths.SHARE_DIR if SHARE_DIR is None: - if __PLATFORM.startswith('win'): - SHARE_DIR = os.path.join(ROOT_DIR, 'share') - elif 'freebsd' in __PLATFORM: - SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'local', 'share', 'salt') - elif 'netbsd' in __PLATFORM: - SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt') - elif 'sunos5' in __PLATFORM: - SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt') + if __PLATFORM.startswith(u'win'): + SHARE_DIR = os.path.join(ROOT_DIR, u'share') + elif u'freebsd' in __PLATFORM: + SHARE_DIR = os.path.join(ROOT_DIR, u'usr', u'local', u'share', u'salt') + elif u'netbsd' in __PLATFORM: + SHARE_DIR = os.path.join(ROOT_DIR, u'usr', u'share', u'salt') + elif u'sunos5' in __PLATFORM: + SHARE_DIR = os.path.join(ROOT_DIR, u'usr', u'share', u'salt') else: - SHARE_DIR = os.path.join(ROOT_DIR, 'usr', 'share', 'salt') + SHARE_DIR = os.path.join(ROOT_DIR, u'usr', u'share', u'salt') CACHE_DIR = __generated_syspaths.CACHE_DIR if CACHE_DIR is None: - CACHE_DIR = os.path.join(ROOT_DIR, 'var', 'cache', 'salt') + CACHE_DIR = os.path.join(ROOT_DIR, u'var', u'cache', u'salt') SOCK_DIR = __generated_syspaths.SOCK_DIR if SOCK_DIR is None: - SOCK_DIR = os.path.join(ROOT_DIR, 'var', 'run', 'salt') + SOCK_DIR = os.path.join(ROOT_DIR, u'var', u'run', u'salt') SRV_ROOT_DIR = __generated_syspaths.SRV_ROOT_DIR if SRV_ROOT_DIR is None: - SRV_ROOT_DIR = os.path.join(ROOT_DIR, 'srv') + SRV_ROOT_DIR = os.path.join(ROOT_DIR, u'srv') BASE_FILE_ROOTS_DIR = __generated_syspaths.BASE_FILE_ROOTS_DIR if BASE_FILE_ROOTS_DIR is None: - BASE_FILE_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'salt') + BASE_FILE_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, u'salt') BASE_PILLAR_ROOTS_DIR = __generated_syspaths.BASE_PILLAR_ROOTS_DIR if BASE_PILLAR_ROOTS_DIR is None: - BASE_PILLAR_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'pillar') + BASE_PILLAR_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, u'pillar') BASE_THORIUM_ROOTS_DIR = __generated_syspaths.BASE_THORIUM_ROOTS_DIR if BASE_THORIUM_ROOTS_DIR is None: - BASE_THORIUM_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'thorium') + BASE_THORIUM_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, u'thorium') BASE_MASTER_ROOTS_DIR = __generated_syspaths.BASE_MASTER_ROOTS_DIR if BASE_MASTER_ROOTS_DIR is None: - BASE_MASTER_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, 'salt-master') + BASE_MASTER_ROOTS_DIR = os.path.join(SRV_ROOT_DIR, u'salt-master') LOGS_DIR = __generated_syspaths.LOGS_DIR if LOGS_DIR is None: - LOGS_DIR = os.path.join(ROOT_DIR, 'var', 'log', 'salt') + LOGS_DIR = os.path.join(ROOT_DIR, u'var', u'log', u'salt') PIDFILE_DIR = __generated_syspaths.PIDFILE_DIR if PIDFILE_DIR is None: - PIDFILE_DIR = os.path.join(ROOT_DIR, 'var', 'run') + PIDFILE_DIR = os.path.join(ROOT_DIR, u'var', u'run') SPM_FORMULA_PATH = __generated_syspaths.SPM_FORMULA_PATH if SPM_FORMULA_PATH is None: - SPM_FORMULA_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'salt') + SPM_FORMULA_PATH = os.path.join(SRV_ROOT_DIR, u'spm', u'salt') SPM_PILLAR_PATH = __generated_syspaths.SPM_PILLAR_PATH if SPM_PILLAR_PATH is None: - SPM_PILLAR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'pillar') + SPM_PILLAR_PATH = os.path.join(SRV_ROOT_DIR, u'spm', u'pillar') SPM_REACTOR_PATH = __generated_syspaths.SPM_REACTOR_PATH if SPM_REACTOR_PATH is None: - SPM_REACTOR_PATH = os.path.join(SRV_ROOT_DIR, 'spm', 'reactor') + SPM_REACTOR_PATH = os.path.join(SRV_ROOT_DIR, u'spm', u'reactor') __all__ = [ - 'ROOT_DIR', - 'SHARE_DIR', - 'CONFIG_DIR', - 'CACHE_DIR', - 'SOCK_DIR', - 'SRV_ROOT_DIR', - 'BASE_FILE_ROOTS_DIR', - 'BASE_PILLAR_ROOTS_DIR', - 'BASE_MASTER_ROOTS_DIR', - 'BASE_THORIUM_ROOTS_DIR', - 'LOGS_DIR', - 'PIDFILE_DIR', - 'INSTALL_DIR', - 'CLOUD_DIR', - 'BOOTSTRAP', - 'SPM_FORMULA_PATH', - 'SPM_PILLAR_PATH', - 'SPM_REACTOR_PATH' + u'ROOT_DIR', + u'SHARE_DIR', + u'CONFIG_DIR', + u'CACHE_DIR', + u'SOCK_DIR', + u'SRV_ROOT_DIR', + u'BASE_FILE_ROOTS_DIR', + u'BASE_PILLAR_ROOTS_DIR', + u'BASE_MASTER_ROOTS_DIR', + u'BASE_THORIUM_ROOTS_DIR', + u'LOGS_DIR', + u'PIDFILE_DIR', + u'INSTALL_DIR', + u'CLOUD_DIR', + u'BOOTSTRAP', + u'SPM_FORMULA_PATH', + u'SPM_PILLAR_PATH', + u'SPM_REACTOR_PATH' ] diff --git a/salt/template.py b/salt/template.py index 0c4498fb5f..7a3d00ebfc 100644 --- a/salt/template.py +++ b/salt/template.py @@ -5,17 +5,20 @@ Manage basic template commands from __future__ import absolute_import -# Import python libs +# Import Python libs import time import os import codecs import logging -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.locales import salt.utils.stringio -import salt.ext.six as six +import salt.utils.versions + +# Import 3rd-party libs +from salt.ext import six from salt.ext.six.moves import StringIO log = logging.getLogger(__name__) @@ -24,7 +27,7 @@ log = logging.getLogger(__name__) # FIXME: we should make the default encoding of a .sls file a configurable # option in the config, and default it to 'utf-8'. # -SLS_ENCODING = 'utf-8' # this one has no BOM. +SLS_ENCODING = u'utf-8' # this one has no BOM. SLS_ENCODER = codecs.getencoder(SLS_ENCODING) @@ -33,9 +36,9 @@ def compile_template(template, default, blacklist, whitelist, - saltenv='base', - sls='', - input_data='', + saltenv=u'base', + sls=u'', + input_data=u'', **kwargs): ''' Take the path to a template and return the high data structure @@ -45,29 +48,29 @@ def compile_template(template, # if any error occurs, we return an empty dictionary ret = {} - log.debug('compile template: {0}'.format(template)) + log.debug(u'compile template: %s', template) - if 'env' in kwargs: - salt.utils.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' + if u'env' in kwargs: + salt.utils.versions.warn_until( + u'Oxygen', + u'Parameter \'env\' has been detected in the argument list. This ' + u'parameter is no longer used and has been replaced by \'saltenv\' ' + u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) - kwargs.pop('env') + kwargs.pop(u'env') - if template != ':string:': + if template != u':string:': # Template was specified incorrectly if not isinstance(template, six.string_types): - log.error('Template was specified incorrectly: {0}'.format(template)) + log.error(u'Template was specified incorrectly: %s', template) return ret # Template does not exist if not os.path.isfile(template): - log.error('Template does not exist: {0}'.format(template)) + log.error(u'Template does not exist: %s', template) return ret # Template is an empty file if salt.utils.files.is_empty(template): - log.debug('Template is an empty file: {0}'.format(template)) + log.debug(u'Template is an empty file: %s', template) return ret with codecs.open(template, encoding=SLS_ENCODING) as ifile: @@ -75,13 +78,13 @@ def compile_template(template, input_data = ifile.read() if not input_data.strip(): # Template is nothing but whitespace - log.error('Template is nothing but whitespace: {0}'.format(template)) + log.error(u'Template is nothing but whitespace: %s', template) return ret # Get the list of render funcs in the render pipe line. render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data) - windows_newline = '\r\n' in input_data + windows_newline = u'\r\n' in input_data input_data = StringIO(input_data) for render, argline in render_pipe: @@ -90,15 +93,14 @@ def compile_template(template, render_kwargs = dict(renderers=renderers, tmplpath=template) render_kwargs.update(kwargs) if argline: - render_kwargs['argline'] = argline + render_kwargs[u'argline'] = argline start = time.time() ret = render(input_data, saltenv, sls, **render_kwargs) log.profile( - 'Time (in seconds) to render \'{0}\' using \'{1}\' renderer: {2}'.format( - template, - render.__module__.split('.')[-1], - time.time() - start - ) + u'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s', + template, + render.__module__.split(u'.')[-1], + time.time() - start ) if ret is None: # The file is empty or is being written elsewhere @@ -110,10 +112,11 @@ def compile_template(template, # yaml, mako, or another engine which renders to a data # structure) we don't want to log this. if salt.utils.stringio.is_readable(ret): - log.debug('Rendered data from file: {0}:\n{1}'.format( + log.debug( + u'Rendered data from file: %s:\n%s', template, - ret.read())) # pylint: disable=no-member - ret.seek(0) # pylint: disable=no-member + salt.utils.locales.sdecode(ret.read())) # pylint: disable=no-member + ret.seek(0) # pylint: disable=no-member # Preserve newlines from original template if windows_newline: @@ -125,8 +128,8 @@ def compile_template(template, contents = ret if isinstance(contents, six.string_types): - if '\r\n' not in contents: - contents = contents.replace('\n', '\r\n') + if u'\r\n' not in contents: + contents = contents.replace(u'\n', u'\r\n') ret = StringIO(contents) if is_stringio else contents else: if is_stringio: @@ -140,7 +143,7 @@ def compile_template_str(template, renderers, default, blacklist, whitelist): derived from the template. ''' fn_ = salt.utils.files.mkstemp() - with salt.utils.files.fopen(fn_, 'wb') as ofile: + with salt.utils.files.fopen(fn_, u'wb') as ofile: ofile.write(SLS_ENCODER(template)[0]) return compile_template(fn_, renderers, default, blacklist, whitelist) @@ -163,16 +166,16 @@ def template_shebang(template, renderers, default, blacklist, whitelist, input_d #!mako|yaml_odict|stateconf ''' - line = '' + line = u'' # Open up the first line of the sls template - if template == ':string:': + if template == u':string:': line = input_data.split()[0] else: - with salt.utils.files.fopen(template, 'r') as ifile: + with salt.utils.files.fopen(template, u'r') as ifile: line = ifile.readline() # Check if it starts with a shebang and not a path - if line.startswith('#!') and not line.startswith('#!/'): + if line.startswith(u'#!') and not line.startswith(u'#!/'): # pull out the shebang data # If the shebang does not contain recognized/not-blacklisted/whitelisted # renderers, do not fall back to the default renderer @@ -186,18 +189,18 @@ def template_shebang(template, renderers, default, blacklist, whitelist, input_d # OLD_STYLE_RENDERERS = {} -for comb in ('yaml_jinja', - 'yaml_mako', - 'yaml_wempy', - 'json_jinja', - 'json_mako', - 'json_wempy', - 'yamlex_jinja', - 'yamlexyamlex_mako', - 'yamlexyamlex_wempy'): +for comb in (u'yaml_jinja', + u'yaml_mako', + u'yaml_wempy', + u'json_jinja', + u'json_mako', + u'json_wempy', + u'yamlex_jinja', + u'yamlexyamlex_mako', + u'yamlexyamlex_wempy'): - fmt, tmpl = comb.split('_') - OLD_STYLE_RENDERERS[comb] = '{0}|{1}'.format(tmpl, fmt) + fmt, tmpl = comb.split(u'_') + OLD_STYLE_RENDERERS[comb] = u'{0}|{1}'.format(tmpl, fmt) def check_render_pipe_str(pipestr, renderers, blacklist, whitelist): @@ -208,24 +211,25 @@ def check_render_pipe_str(pipestr, renderers, blacklist, whitelist): ''' if pipestr is None: return [] - parts = [r.strip() for r in pipestr.split('|')] + parts = [r.strip() for r in pipestr.split(u'|')] # Note: currently, | is not allowed anywhere in the shebang line except # as pipes between renderers. results = [] try: if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS: - parts = OLD_STYLE_RENDERERS[pipestr].split('|') + parts = OLD_STYLE_RENDERERS[pipestr].split(u'|') for part in parts: - name, argline = (part + ' ').split(' ', 1) + name, argline = (part + u' ').split(u' ', 1) if whitelist and name not in whitelist or \ blacklist and name in blacklist: log.warning( - 'The renderer "{0}" is disallowed by configuration and ' - 'will be skipped.'.format(name)) + u'The renderer "%s" is disallowed by configuration and ' + u'will be skipped.', name + ) continue results.append((renderers[name], argline.strip())) return results except KeyError: - log.error('The renderer "{0}" is not available'.format(pipestr)) + log.error(u'The renderer "%s" is not available', pipestr) return [] diff --git a/salt/textformat.py b/salt/textformat.py index b50a9f9c98..828b628245 100644 --- a/salt/textformat.py +++ b/salt/textformat.py @@ -3,98 +3,102 @@ ANSI escape code utilities, see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-048.pdf ''' +from __future__ import absolute_import -graph_prefix = '\x1b[' -graph_suffix = 'm' +# Import 3rd-party libs +from salt.ext import six + +graph_prefix = u'\x1b[' +graph_suffix = u'm' codes = { - 'reset': '0', + u'reset': u'0', - 'bold': '1', - 'faint': '2', - 'italic': '3', - 'underline': '4', - 'blink': '5', - 'slow_blink': '5', - 'fast_blink': '6', - 'inverse': '7', - 'conceal': '8', - 'strike': '9', + u'bold': u'1', + u'faint': u'2', + u'italic': u'3', + u'underline': u'4', + u'blink': u'5', + u'slow_blink': u'5', + u'fast_blink': u'6', + u'inverse': u'7', + u'conceal': u'8', + u'strike': u'9', - 'primary_font': '10', - 'reset_font': '10', - 'font_0': '10', - 'font_1': '11', - 'font_2': '12', - 'font_3': '13', - 'font_4': '14', - 'font_5': '15', - 'font_6': '16', - 'font_7': '17', - 'font_8': '18', - 'font_9': '19', - 'fraktur': '20', + u'primary_font': u'10', + u'reset_font': u'10', + u'font_0': u'10', + u'font_1': u'11', + u'font_2': u'12', + u'font_3': u'13', + u'font_4': u'14', + u'font_5': u'15', + u'font_6': u'16', + u'font_7': u'17', + u'font_8': u'18', + u'font_9': u'19', + u'fraktur': u'20', - 'double_underline': '21', - 'end_bold': '21', - 'normal_intensity': '22', - 'end_italic': '23', - 'end_fraktur': '23', - 'end_underline': '24', # single or double - 'end_blink': '25', - 'end_inverse': '27', - 'end_conceal': '28', - 'end_strike': '29', + u'double_underline': u'21', + u'end_bold': u'21', + u'normal_intensity': u'22', + u'end_italic': u'23', + u'end_fraktur': u'23', + u'end_underline': u'24', # single or double + u'end_blink': u'25', + u'end_inverse': u'27', + u'end_conceal': u'28', + u'end_strike': u'29', - 'black': '30', - 'red': '31', - 'green': '32', - 'yellow': '33', - 'blue': '34', - 'magenta': '35', - 'cyan': '36', - 'white': '37', - 'extended': '38', - 'default': '39', + u'black': u'30', + u'red': u'31', + u'green': u'32', + u'yellow': u'33', + u'blue': u'34', + u'magenta': u'35', + u'cyan': u'36', + u'white': u'37', + u'extended': u'38', + u'default': u'39', - 'fg_black': '30', - 'fg_red': '31', - 'fg_green': '32', - 'fg_yellow': '33', - 'fg_blue': '34', - 'fg_magenta': '35', - 'fg_cyan': '36', - 'fg_white': '37', - 'fg_extended': '38', - 'fg_default': '39', + u'fg_black': u'30', + u'fg_red': u'31', + u'fg_green': u'32', + u'fg_yellow': u'33', + u'fg_blue': u'34', + u'fg_magenta': u'35', + u'fg_cyan': u'36', + u'fg_white': u'37', + u'fg_extended': u'38', + u'fg_default': u'39', - 'bg_black': '40', - 'bg_red': '41', - 'bg_green': '42', - 'bg_yellow': '44', - 'bg_blue': '44', - 'bg_magenta': '45', - 'bg_cyan': '46', - 'bg_white': '47', - 'bg_extended': '48', - 'bg_default': '49', + u'bg_black': u'40', + u'bg_red': u'41', + u'bg_green': u'42', + u'bg_yellow': u'44', + u'bg_blue': u'44', + u'bg_magenta': u'45', + u'bg_cyan': u'46', + u'bg_white': u'47', + u'bg_extended': u'48', + u'bg_default': u'49', - 'frame': '51', - 'encircle': '52', - 'overline': '53', - 'end_frame': '54', - 'end_encircle': '54', - 'end_overline': '55', + u'frame': u'51', + u'encircle': u'52', + u'overline': u'53', + u'end_frame': u'54', + u'end_encircle': u'54', + u'end_overline': u'55', - 'ideogram_underline': '60', - 'right_line': '60', - 'ideogram_double_underline': '61', - 'right_double_line': '61', - 'ideogram_overline': '62', - 'left_line': '62', - 'ideogram_double_overline': '63', - 'left_double_line': '63', - 'ideogram_stress': '64', - 'reset_ideogram': '65' + u'ideogram_underline': u'60', + u'right_line': u'60', + u'ideogram_double_underline': u'61', + u'right_double_line': u'61', + u'ideogram_overline': u'62', + u'left_line': u'62', + u'ideogram_double_overline': u'63', + u'left_double_line': u'63', + u'ideogram_stress': u'64', + u'reset_ideogram': u'65' } @@ -138,10 +142,10 @@ class TextFormat(object): '{0}Can you read this?{1}' ).format(magenta_on_green, TextFormat('reset')) ''' - self.codes = [codes[attr.lower()] for attr in attrs if isinstance(attr, str)] + self.codes = [codes[attr.lower()] for attr in attrs if isinstance(attr, six.string_types)] - if kwargs.get('reset', True): - self.codes[:0] = [codes['reset']] + if kwargs.get(u'reset', True): + self.codes[:0] = [codes[u'reset']] def qualify_int(i): if isinstance(i, int): @@ -151,20 +155,20 @@ class TextFormat(object): if isinstance(t, (list, tuple)) and len(t) == 3: return qualify_int(t[0]), qualify_int(t[1]), qualify_int(t[2]) - if kwargs.get('x', None) is not None: - self.codes.extend((codes['extended'], '5', qualify_int(kwargs['x']))) - elif kwargs.get('rgb', None) is not None: - self.codes.extend((codes['extended'], '2')) - self.codes.extend(*qualify_triple_int(kwargs['rgb'])) + if kwargs.get(u'x', None) is not None: + self.codes.extend((codes[u'extended'], u'5', qualify_int(kwargs[u'x']))) + elif kwargs.get(u'rgb', None) is not None: + self.codes.extend((codes[u'extended'], u'2')) + self.codes.extend(*qualify_triple_int(kwargs[u'rgb'])) - if kwargs.get('bg_x', None) is not None: - self.codes.extend((codes['extended'], '5', qualify_int(kwargs['bg_x']))) - elif kwargs.get('bg_rgb', None) is not None: - self.codes.extend((codes['extended'], '2')) - self.codes.extend(*qualify_triple_int(kwargs['bg_rgb'])) + if kwargs.get(u'bg_x', None) is not None: + self.codes.extend((codes[u'extended'], u'5', qualify_int(kwargs[u'bg_x']))) + elif kwargs.get(u'bg_rgb', None) is not None: + self.codes.extend((codes[u'extended'], u'2')) + self.codes.extend(*qualify_triple_int(kwargs[u'bg_rgb'])) - self.sequence = '%s%s%s' % (graph_prefix, # pylint: disable=E1321 - ';'.join(self.codes), + self.sequence = u'%s%s%s' % (graph_prefix, # pylint: disable=E1321 + u';'.join(self.codes), graph_suffix) def __call__(self, text, reset=True): @@ -179,8 +183,8 @@ class TextFormat(object): green_blink_text = TextFormat('blink', 'green') 'The answer is: {0}'.format(green_blink_text(42)) ''' - end = TextFormat('reset') if reset else '' - return '%s%s%s' % (self.sequence, text, end) # pylint: disable=E1321 + end = TextFormat(u'reset') if reset else u'' + return u'%s%s%s' % (self.sequence, text, end) # pylint: disable=E1321 def __str__(self): return self.sequence diff --git a/salt/thorium/__init__.py b/salt/thorium/__init__.py index 72e854a36b..2837c1c7bd 100644 --- a/salt/thorium/__init__.py +++ b/salt/thorium/__init__.py @@ -23,6 +23,9 @@ import salt.loader import salt.payload from salt.exceptions import SaltRenderError +# Import 3rd-party libs +from salt.ext import six + log = logging.getLogger(__name__) @@ -130,7 +133,7 @@ class ThorState(salt.state.HighState): matches = self.matches_whitelist(matches, whitelist) high, errors = self.render_highstate(matches) if exclude: - if isinstance(exclude, str): + if isinstance(exclude, six.string_types): exclude = exclude.split(',') if '__exclude__' in high: high['__exclude__'].extend(exclude) diff --git a/salt/transport/__init__.py b/salt/transport/__init__.py index 2bbaa4a7a0..95290252e4 100644 --- a/salt/transport/__init__.py +++ b/salt/transport/__init__.py @@ -6,7 +6,7 @@ from __future__ import absolute_import import logging # Import third party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range log = logging.getLogger(__name__) diff --git a/salt/transport/frame.py b/salt/transport/frame.py index a4919f4e54..c26eeb68da 100644 --- a/salt/transport/frame.py +++ b/salt/transport/frame.py @@ -5,7 +5,7 @@ Helper functions for transport components to handle message framing # Import python libs from __future__ import absolute_import import msgpack -import salt.ext.six as six +from salt.ext import six def frame_msg(body, header=None, raw_body=False): # pylint: disable=unused-argument diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 46789c3dd5..488acaa0f2 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -24,7 +24,7 @@ from tornado.iostream import IOStream # Import Salt libs import salt.transport.client import salt.transport.frame -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -250,15 +250,16 @@ class IPCClient(object): # FIXME key = str(socket_path) - if key not in loop_instance_map: + client = loop_instance_map.get(key) + if client is None: log.debug('Initializing new IPCClient for path: {0}'.format(key)) - new_client = object.__new__(cls) + client = object.__new__(cls) # FIXME - new_client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) - loop_instance_map[key] = new_client + client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) + loop_instance_map[key] = client else: log.debug('Re-using IPCClient for {0}'.format(key)) - return loop_instance_map[key] + return client def __singleton_init__(self, socket_path, io_loop=None): ''' diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py index 5388506356..609899725f 100644 --- a/salt/transport/mixins/auth.py +++ b/salt/transport/mixins/auth.py @@ -17,10 +17,13 @@ import salt.master import salt.transport.frame import salt.utils.event import salt.utils.files -import salt.ext.six as six +import salt.utils.minions +import salt.utils.stringutils +import salt.utils.verify from salt.utils.cache import CacheCli # Import Third Party Libs +from salt.ext import six import tornado.gen try: from Cryptodome.Cipher import PKCS1_OAEP @@ -125,7 +128,7 @@ class AESReqServerMixin(object): if six.PY2: pret['key'] = cipher.encrypt(key) else: - pret['key'] = cipher.encrypt(salt.utils.to_bytes(key)) + pret['key'] = cipher.encrypt(salt.utils.stringutils.to_bytes(key)) pret[dictkey] = pcrypt.dumps( ret if ret is not False else {} ) @@ -441,9 +444,13 @@ class AESReqServerMixin(object): else: # the master has its own signing-keypair, compute the master.pub's # signature and append that to the auth-reply + + # get the key_pass for the signing key + key_pass = salt.utils.sdb.sdb_get(self.opts['signing_key_pass'], self.opts) + log.debug("Signing master public key before sending") pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1], - ret['pub_key']) + ret['pub_key'], key_pass) ret.update({'pub_sig': binascii.b2a_base64(pub_sign)}) mcipher = PKCS1_OAEP.new(self.master_key.key) diff --git a/salt/transport/raet.py b/salt/transport/raet.py index 1384d61d5a..4b4930d465 100644 --- a/salt/transport/raet.py +++ b/salt/transport/raet.py @@ -9,7 +9,7 @@ import time # Import Salt Libs import logging -from salt.utils import kinds +import salt.utils.kinds as kinds from salt.transport.client import ReqChannel log = logging.getLogger(__name__) diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py index 8aadfef45e..dcfd8ef685 100644 --- a/salt/transport/tcp.py +++ b/salt/transport/tcp.py @@ -20,9 +20,10 @@ import errno # Import Salt Libs import salt.crypt import salt.utils -import salt.utils.verify -import salt.utils.event import salt.utils.async +import salt.utils.event +import salt.utils.platform +import salt.utils.verify import salt.payload import salt.exceptions import salt.transport.frame @@ -30,7 +31,7 @@ import salt.transport.ipc import salt.transport.client import salt.transport.server import salt.transport.mixins.auth -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltReqTimeoutError, SaltClientError from salt.transport import iter_transport_opts @@ -55,7 +56,7 @@ try: except ImportError: from Crypto.Cipher import PKCS1_OAEP -if six.PY3 and salt.utils.is_windows(): +if six.PY3 and salt.utils.platform.is_windows(): USE_LOAD_BALANCER = True else: USE_LOAD_BALANCER = False @@ -221,17 +222,18 @@ class AsyncTCPReqChannel(salt.transport.client.ReqChannel): loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) - if key not in loop_instance_map: + obj = loop_instance_map.get(key) + if obj is None: log.debug('Initializing new AsyncTCPReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj + obj = object.__new__(cls) + obj.__singleton_init__(opts, **kwargs) + loop_instance_map[key] = obj else: log.debug('Re-using AsyncTCPReqChannel for {0}'.format(key)) - return loop_instance_map[key] + return obj @classmethod def __key(cls, opts, **kwargs): @@ -568,7 +570,7 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra process_manager.add_process( LoadBalancerServer, args=(self.opts, self.socket_queue) ) - elif not salt.utils.is_windows(): + elif not salt.utils.platform.is_windows(): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) @@ -591,7 +593,7 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra io_loop=self.io_loop, ssl_options=self.opts.get('ssl')) else: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) @@ -1366,7 +1368,7 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel): do the actual publishing ''' kwargs = {} - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): kwargs['log_queue'] = ( salt.log.setup.get_multiprocessing_logging_queue() ) diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index 67eb54e304..3d35982c31 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -21,6 +21,7 @@ import salt.crypt import salt.utils import salt.utils.verify import salt.utils.event +import salt.utils.stringutils import salt.payload import salt.transport.client import salt.transport.server @@ -46,7 +47,7 @@ import tornado.gen import tornado.concurrent # Import third party libs -import salt.ext.six as six +from salt.ext import six try: from Cryptodome.Cipher import PKCS1_OAEP except ImportError: @@ -80,28 +81,19 @@ class AsyncZeroMQReqChannel(salt.transport.client.ReqChannel): loop_instance_map = cls.instance_map[io_loop] key = cls.__key(opts, **kwargs) - if key not in loop_instance_map: + obj = loop_instance_map.get(key) + if obj is None: log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key)) # we need to make a local variable for this, as we are going to store # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj + obj = object.__new__(cls) + obj.__singleton_init__(opts, **kwargs) + loop_instance_map[key] = obj log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid())) else: log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key)) - try: - return loop_instance_map[key] - except KeyError: - # In iterating over the loop_instance_map, we may have triggered - # garbage collection. Therefore, the key is no longer present in - # the map. Re-gen and add to map. - log.debug('Initializing new AsyncZeroMQReqChannel due to GC for {0}'.format(key)) - new_obj = object.__new__(cls) - new_obj.__singleton_init__(opts, **kwargs) - loop_instance_map[key] = new_obj - return loop_instance_map[key] + return obj def __deepcopy__(self, memo): cls = self.__class__ @@ -313,7 +305,7 @@ class AsyncZeroMQPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.t else: self._socket.setsockopt(zmq.SUBSCRIBE, b'') - self._socket.setsockopt(zmq.IDENTITY, salt.utils.to_bytes(self.opts['id'])) + self._socket.setsockopt(zmq.IDENTITY, salt.utils.stringutils.to_bytes(self.opts['id'])) # TODO: cleanup all the socket opts stuff if hasattr(zmq, 'TCP_KEEPALIVE'): diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index eed9761b48..c88f9ea5fa 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -118,10 +118,9 @@ from salt.defaults import DEFAULT_TARGET_DELIM import salt.defaults.exitcodes import salt.log import salt.utils.dictupdate +import salt.utils.versions import salt.version -from salt.utils.decorators import jinja_filter -from salt.utils.decorators import memoize as real_memoize -from salt.utils.versions import LooseVersion as _LooseVersion +from salt.utils.decorators.jinja import jinja_filter from salt.textformat import TextFormat from salt.exceptions import ( CommandExecutionError, SaltClientError, @@ -134,53 +133,6 @@ log = logging.getLogger(__name__) _empty = object() -@jinja_filter('is_hex') -def is_hex(value): - ''' - Returns True if value is a hexidecimal string, otherwise returns False - ''' - try: - int(value, 16) - return True - except (TypeError, ValueError): - return False - - -def safe_rm(tgt): - ''' - Safely remove a file - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.safe_rm\' detected. This function has been moved to ' - '\'salt.utils.files.safe_rm\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.safe_rm(tgt) - - -@jinja_filter('is_empty') -def is_empty(filename): - ''' - Is a file empty? - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.is_empty\' detected. This function has been moved to ' - '\'salt.utils.files.is_empty\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.is_empty(filename) - - def get_color_theme(theme): ''' Return the color theme to use @@ -246,7 +198,7 @@ def get_colors(use=True, theme=None): if not use: for color in colors: colors[color] = '' - if isinstance(use, str): + if isinstance(use, six.string_types): # Try to set all of the colors to the passed color if use in colors: for color in colors: @@ -264,6 +216,7 @@ def get_context(template, line, num_lines=5, marker=None): Returns:: string ''' + import salt.utils.stringutils template_lines = template.splitlines() num_template_lines = len(template_lines) @@ -290,11 +243,7 @@ def get_context(template, line, num_lines=5, marker=None): if marker: buf[error_line_in_context] += marker - # warning: jinja content may contain unicode strings - # instead of utf-8. - buf = [to_str(i) if isinstance(i, six.text_type) else i for i in buf] - - return '---\n{0}\n---'.format('\n'.join(buf)) + return u'---\n{0}\n---'.format(u'\n'.join(buf)) def get_user(): @@ -418,8 +367,9 @@ def get_specific_user(): Get a user name for publishing. If you find the user is "root" attempt to be more specific ''' + import salt.utils.platform user = get_user() - if is_windows(): + if salt.utils.platform.is_windows(): if _win_current_user_is_admin(): return 'sudo_{0}'.format(user) else: @@ -435,13 +385,14 @@ def get_master_key(key_user, opts, skip_perm_errors=False): # Late import to avoid circular import. import salt.utils.files import salt.utils.verify + import salt.utils.platform if key_user == 'root': if opts.get('user', 'root') != 'root': key_user = opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = opts.get('user', 'root') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') @@ -462,7 +413,7 @@ def get_master_key(key_user, opts, skip_perm_errors=False): def reinit_crypto(): ''' - When a fork arrises, pycrypto needs to reinit + When a fork arises, pycrypto needs to reinit From its doc:: Caveat: For the random number generator to work correctly, @@ -567,88 +518,6 @@ def profile_func(filename=None): return proffunc -def rand_str(size=9999999999, hash_type=None): - ''' - Return a random string - ''' - if not hash_type: - hash_type = 'md5' - hasher = getattr(hashlib, hash_type) - return hasher(to_bytes(str(random.SystemRandom().randint(0, size)))).hexdigest() - - -@jinja_filter('which') -def which(exe=None): - ''' - Python clone of /usr/bin/which - ''' - def _is_executable_file_or_link(exe): - # check for os.X_OK doesn't suffice because directory may executable - return (os.access(exe, os.X_OK) and - (os.path.isfile(exe) or os.path.islink(exe))) - - if exe: - if _is_executable_file_or_link(exe): - # executable in cwd or fullpath - return exe - - ext_list = os.environ.get('PATHEXT', '.EXE').split(';') - - @real_memoize - def _exe_has_ext(): - ''' - Do a case insensitive test if exe has a file extension match in - PATHEXT - ''' - for ext in ext_list: - try: - pattern = r'.*\.' + ext.lstrip('.') + r'$' - re.match(pattern, exe, re.I).groups() - return True - except AttributeError: - continue - return False - - # Enhance POSIX path for the reliability at some environments, when $PATH is changing - # This also keeps order, where 'first came, first win' for cases to find optional alternatives - search_path = os.environ.get('PATH') and os.environ['PATH'].split(os.pathsep) or list() - for default_path in ['/bin', '/sbin', '/usr/bin', '/usr/sbin', '/usr/local/bin']: - if default_path not in search_path: - search_path.append(default_path) - os.environ['PATH'] = os.pathsep.join(search_path) - for path in search_path: - full_path = os.path.join(path, exe) - if _is_executable_file_or_link(full_path): - return full_path - elif is_windows() and not _exe_has_ext(): - # On Windows, check for any extensions in PATHEXT. - # Allows both 'cmd' and 'cmd.exe' to be matched. - for ext in ext_list: - # Windows filesystem is case insensitive so we - # safely rely on that behavior - if _is_executable_file_or_link(full_path + ext): - return full_path + ext - log.trace('\'{0}\' could not be found in the following search path: \'{1}\''.format(exe, search_path)) - else: - log.error('No executable was passed to be searched by salt.utils.which()') - - return None - - -def which_bin(exes): - ''' - Scan over some possible executables and return the first one that is found - ''' - if not isinstance(exes, collections.Iterable): - return None - for exe in exes: - path = which(exe) - if not path: - continue - return path - return None - - def activate_profile(test=True): pr = None if test: @@ -663,6 +532,9 @@ def activate_profile(test=True): def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None): # Late import to avoid circular import. import salt.utils.files + import salt.utils.hashutils + import salt.utils.path + import salt.utils.stringutils if pr is not None and HAS_CPROFILE: try: @@ -671,7 +543,7 @@ def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None): os.makedirs(stats_path) date = datetime.datetime.now().isoformat() if id_ is None: - id_ = rand_str(size=32) + id_ = salt.utils.hashutils.random_hash(size=32) ficp = os.path.join(stats_path, '{0}.{1}.pstats'.format(id_, date)) fico = os.path.join(stats_path, '{0}.{1}.dot'.format(id_, date)) ficn = os.path.join(stats_path, '{0}.{1}.stats'.format(id_, date)) @@ -681,7 +553,7 @@ def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None): pstats.Stats(pr, stream=fic).sort_stats('cumulative') log.info('PROFILING: {0} generated'.format(ficp)) log.info('PROFILING (cumulative): {0} generated'.format(ficn)) - pyprof = which('pyprof2calltree') + pyprof = salt.utils.path.which('pyprof2calltree') cmd = [pyprof, '-i', ficp, '-o', fico] if pyprof: failed = False @@ -698,8 +570,8 @@ def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None): else: log.info('PROFILING (dot): {0} generated'.format(fico)) log.trace('pyprof2calltree output:') - log.trace(to_str(pro.stdout.read()).strip() + - to_str(pro.stderr.read()).strip()) + log.trace(salt.utils.stringutils.to_str(pro.stdout.read()).strip() + + salt.utils.stringutils.to_str(pro.stderr.read()).strip()) else: log.info('You can run {0} for additional stats.'.format(cmd)) finally: @@ -918,10 +790,11 @@ def check_or_die(command): Lazily import `salt.modules.cmdmod` to avoid any sort of circular dependencies. ''' + import salt.utils.path if command is None: raise CommandNotFoundError('\'None\' is not a valid command.') - if not which(command): + if not salt.utils.path.which(command): raise CommandNotFoundError('\'{0}\' is not in the path'.format(command)) @@ -929,15 +802,16 @@ def backup_minion(path, bkroot): ''' Backup a file on the minion ''' + import salt.utils.platform dname, bname = os.path.split(path) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): src_dir = dname.replace(':', '_') else: src_dir = dname[1:] - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): fstat = os.stat(path) msecs = str(int(time.time() * 1000000))[-6:] - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # ':' is an illegal filesystem path character on Windows stamp = time.strftime('%a_%b_%d_%H-%M-%S_%Y') else: @@ -949,69 +823,11 @@ def backup_minion(path, bkroot): if not os.path.isdir(os.path.dirname(bkpath)): os.makedirs(os.path.dirname(bkpath)) shutil.copyfile(path, bkpath) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): os.chown(bkpath, fstat.st_uid, fstat.st_gid) os.chmod(bkpath, fstat.st_mode) -@jinja_filter('path_join') -def path_join(*parts, **kwargs): - ''' - This functions tries to solve some issues when joining multiple absolute - paths on both *nix and windows platforms. - - See tests/unit/utils/path_join_test.py for some examples on what's being - talked about here. - - The "use_posixpath" kwarg can be be used to force joining using poxixpath, - which is useful for Salt fileserver paths on Windows masters. - ''' - if six.PY3: - new_parts = [] - for part in parts: - new_parts.append(to_str(part)) - parts = new_parts - - kwargs = salt.utils.clean_kwargs(**kwargs) - use_posixpath = kwargs.pop('use_posixpath', False) - if kwargs: - invalid_kwargs(kwargs) - - pathlib = posixpath if use_posixpath else os.path - - # Normalize path converting any os.sep as needed - parts = [pathlib.normpath(p) for p in parts] - - try: - root = parts.pop(0) - except IndexError: - # No args passed to func - return '' - - if not parts: - ret = root - else: - stripped = [p.lstrip(os.sep) for p in parts] - try: - ret = pathlib.join(root, *stripped) - except UnicodeDecodeError: - # This is probably Python 2 and one of the parts contains unicode - # characters in a bytestring. First try to decode to the system - # encoding. - try: - enc = __salt_system_encoding__ - except NameError: - enc = sys.stdin.encoding or sys.getdefaultencoding() - try: - ret = pathlib.join(root.decode(enc), - *[x.decode(enc) for x in stripped]) - except UnicodeDecodeError: - # Last resort, try UTF-8 - ret = pathlib.join(root.decode('UTF-8'), - *[x.decode('UTF-8') for x in stripped]) - return pathlib.normpath(ret) - - def pem_finger(path=None, key=None, sum_type='sha256'): ''' Pass in either a raw pem string, or the path on disk to the location of a @@ -1048,26 +864,26 @@ def build_whitespace_split_regex(text): Example: - .. code-block:: yaml + .. code-block:: python - >>> import re - >>> from salt.utils import * - >>> regex = build_whitespace_split_regex( - ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" - ... ) + >>> import re + >>> import salt.utils + >>> regex = salt.utils.build_whitespace_split_regex( + ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" + ... ) - >>> regex - '(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian' - '\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?' - '\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?' - 'then(?:[\\s]+)?' - >>> re.search( - ... regex, - ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" - ... ) + >>> regex + '(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian' + '\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?' + '\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?' + 'then(?:[\\s]+)?' + >>> re.search( + ... regex, + ... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then""" + ... ) - <_sre.SRE_Match object at 0xb70639c0> - >>> + <_sre.SRE_Match object at 0xb70639c0> + >>> ''' def __build_parts(text): @@ -1113,6 +929,9 @@ def format_call(fun, :returns: A dictionary with the function required arguments and keyword arguments. ''' + # Late import to avoid circular import + import salt.utils.versions + import salt.utils.args ret = initial_ret is not None and initial_ret or {} ret['args'] = [] @@ -1179,7 +998,7 @@ def format_call(fun, # We'll be showing errors to the users until Salt Oxygen comes out, after # which, errors will be raised instead. - warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'It\'s time to start raising `SaltInvocationError` instead of ' 'returning warnings', @@ -1233,6 +1052,7 @@ def arg_lookup(fun, aspec=None): Return a dict containing the arguments and default arguments to the function. ''' + import salt.utils.args ret = {'kwargs': {}} if aspec is None: aspec = salt.utils.args.get_function_argspec(fun) @@ -1284,7 +1104,7 @@ def istextfile(fp_, blocksize=512): return float(len(nontext)) / len(block) <= 0.30 -@jinja_filter('is_sorted') +@jinja_filter('sorted_ignorecase') def isorted(to_sort): ''' Sort a list of strings ignoring case. @@ -1304,6 +1124,7 @@ def mysql_to_dict(data, key): ''' Convert MySQL-style output to a python dictionary ''' + import salt.utils.stringutils ret = {} headers = [''] for line in data: @@ -1321,113 +1142,13 @@ def mysql_to_dict(data, key): if field < 1: continue else: - row[headers[field]] = str_to_num(comps[field]) + row[headers[field]] = salt.utils.stringutils.to_num(comps[field]) ret[row[key]] = row else: headers = comps return ret -@jinja_filter('contains_whitespace') -def contains_whitespace(text): - ''' - Returns True if there are any whitespace characters in the string - ''' - return any(x.isspace() for x in text) - - -@jinja_filter('str_to_num') -def str_to_num(text): - ''' - Convert a string to a number. - Returns an integer if the string represents an integer, a floating - point number if the string is a real number, or the string unchanged - otherwise. - ''' - try: - return int(text) - except ValueError: - try: - return float(text) - except ValueError: - return text - - -def fopen(*args, **kwargs): - ''' - Wrapper around open() built-in to set CLOEXEC on the fd. - - This flag specifies that the file descriptor should be closed when an exec - function is invoked; - When a file descriptor is allocated (as with open or dup), this bit is - initially cleared on the new file descriptor, meaning that descriptor will - survive into the new program after exec. - - NB! We still have small race condition between open and fcntl. - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.fopen\' detected. This function has been moved to ' - '\'salt.utils.files.fopen\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.fopen(*args, **kwargs) # pylint: disable=W8470 - - -@contextlib.contextmanager -def flopen(*args, **kwargs): - ''' - Shortcut for fopen with lock and context manager - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.flopen\' detected. This function has been moved to ' - '\'salt.utils.files.flopen\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.flopen(*args, **kwargs) - - -@contextlib.contextmanager -def fpopen(*args, **kwargs): - ''' - Shortcut for fopen with extra uid, gid and mode options. - - Supported optional Keyword Arguments: - - mode: explicit mode to set. Mode is anything os.chmod - would accept as input for mode. Works only on unix/unix - like systems. - - uid: the uid to set, if not set, or it is None or -1 no changes are - made. Same applies if the path is already owned by this - uid. Must be int. Works only on unix/unix like systems. - - gid: the gid to set, if not set, or it is None or -1 no changes are - made. Same applies if the path is already owned by this - gid. Must be int. Works only on unix/unix like systems. - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.fpopen\' detected. This function has been moved to ' - '\'salt.utils.files.fpopen\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.fpopen(*args, **kwargs) - - def expr_match(line, expr): ''' Evaluate a line of text against an expression. First try a full-string @@ -1451,6 +1172,24 @@ def expr_match(line, expr): def check_whitelist_blacklist(value, whitelist=None, blacklist=None): ''' Check a whitelist and/or blacklist to see if the value matches it. + + value + The item to check the whitelist and/or blacklist against. + + whitelist + The list of items that are white-listed. If ``value`` is found + in the whitelist, then the function returns ``True``. Otherwise, + it returns ``False``. + + blacklist + The list of items that are black-listed. If ``value`` is found + in the blacklist, then the function returns ``False``. Otherwise, + it returns ``True``. + + If both a whitelist and a blacklist are provided, value membership + in the blacklist will be examined first. If the value is not found + in the blacklist, then the whitelist is checked. If the value isn't + found in the whitelist, the function returns ``False``. ''' if blacklist is not None: if not hasattr(blacklist, '__iter__'): @@ -1646,29 +1385,6 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL return data -def clean_kwargs(**kwargs): - ''' - Return a dict without any of the __pub* keys (or any other keys starting - with a dunder) from the kwargs dict passed into the execution module - functions. These keys are useful for tracking what was used to invoke - the function call, but they may not be desirable to have if passing the - kwargs forward wholesale. - ''' - ret = {} - for key, val in six.iteritems(kwargs): - if not key.startswith('__'): - ret[key] = val - return ret - - -@real_memoize -def is_windows(): - ''' - Simple function to return if a host is Windows or not - ''' - return sys.platform.startswith('win') - - def sanitize_win_path_string(winpath): ''' Remove illegal path characters for windows @@ -1676,170 +1392,13 @@ def sanitize_win_path_string(winpath): intab = '<>:|?*' outtab = '_' * len(intab) trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) # pylint: disable=no-member - if isinstance(winpath, str): + if isinstance(winpath, six.string_types): winpath = winpath.translate(trantab) elif isinstance(winpath, six.text_type): winpath = winpath.translate(dict((ord(c), u'_') for c in intab)) return winpath -@real_memoize -def is_proxy(): - ''' - Return True if this minion is a proxy minion. - Leverages the fact that is_linux() and is_windows - both return False for proxies. - TODO: Need to extend this for proxies that might run on - other Unices - ''' - import __main__ as main - # This is a hack. If a proxy minion is started by other - # means, e.g. a custom script that creates the minion objects - # then this will fail. - is_proxy = False - try: - # Changed this from 'salt-proxy in main...' to 'proxy in main...' - # to support the testsuite's temp script that is called 'cli_salt_proxy' - if 'proxy' in main.__file__: - is_proxy = True - except AttributeError: - pass - return is_proxy - - -@real_memoize -def is_linux(): - ''' - Simple function to return if a host is Linux or not. - Note for a proxy minion, we need to return something else - ''' - return sys.platform.startswith('linux') - - -@real_memoize -def is_darwin(): - ''' - Simple function to return if a host is Darwin (macOS) or not - ''' - return sys.platform.startswith('darwin') - - -@real_memoize -def is_sunos(): - ''' - Simple function to return if host is SunOS or not - ''' - return sys.platform.startswith('sunos') - - -@real_memoize -def is_smartos(): - ''' - Simple function to return if host is SmartOS (Illumos) or not - ''' - if not is_sunos(): - return False - else: - return os.uname()[3].startswith('joyent_') - - -@real_memoize -def is_smartos_globalzone(): - ''' - Function to return if host is SmartOS (Illumos) global zone or not - ''' - if not is_smartos(): - return False - else: - cmd = ['zonename'] - try: - zonename = subprocess.Popen( - cmd, shell=False, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - except OSError: - return False - if zonename.returncode: - return False - if zonename.stdout.read().strip() == 'global': - return True - - return False - - -@real_memoize -def is_smartos_zone(): - ''' - Function to return if host is SmartOS (Illumos) and not the gz - ''' - if not is_smartos(): - return False - else: - cmd = ['zonename'] - try: - zonename = subprocess.Popen( - cmd, shell=False, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - except OSError: - return False - if zonename.returncode: - return False - if zonename.stdout.read().strip() == 'global': - return False - - return True - - -@real_memoize -def is_freebsd(): - ''' - Simple function to return if host is FreeBSD or not - ''' - return sys.platform.startswith('freebsd') - - -@real_memoize -def is_netbsd(): - ''' - Simple function to return if host is NetBSD or not - ''' - return sys.platform.startswith('netbsd') - - -@real_memoize -def is_openbsd(): - ''' - Simple function to return if host is OpenBSD or not - ''' - return sys.platform.startswith('openbsd') - - -@real_memoize -def is_aix(): - ''' - Simple function to return if host is AIX or not - ''' - return sys.platform.startswith('aix') - - -def is_fcntl_available(check_sunos=False): - ''' - Simple function to check if the `fcntl` module is available or not. - If `check_sunos` is passed as `True` an additional check to see if host is - SunOS is also made. For additional information see: http://goo.gl/159FF8 - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.is_fcntl_available\' detected. This function has been moved to ' - '\'salt.utils.files.is_fcntl_available\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.is_fcntl_available(check_sunos) - - def check_include_exclude(path_str, include_pat=None, exclude_pat=None): ''' Check for glob or regexp patterns for include_pat and exclude_pat in the @@ -2123,24 +1682,6 @@ def exactly_one(l): return exactly_n(l) -def rm_rf(path): - ''' - Platform-independent recursive delete. Includes code from - http://stackoverflow.com/a/2656405 - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.rm_rf\' detected. This function has been moved to ' - '\'salt.utils.files.rm_rf\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.files - return salt.utils.files.rm_rf(path) - - def option(value, default='', opts=None, pillar=None): ''' Pass in a generic option and receive the value that will be assigned @@ -2161,32 +1702,6 @@ def option(value, default='', opts=None, pillar=None): return default -def parse_docstring(docstring): - ''' - Parse a docstring into its parts. - - Currently only parses dependencies, can be extended to parse whatever is - needed. - - Parses into a dictionary: - { - 'full': full docstring, - 'deps': list of dependencies (empty list if none) - } - - .. deprecated:: Oxygen - ''' - warn_until( - 'Neon', - 'Use of \'salt.utils.parse_docstring\' detected. This function has been moved to ' - '\'salt.utils.doc.parse_docstring\' as of Salt Oxygen. This warning will be ' - 'removed in Salt Neon.' - ) - # Late import to avoid circular import. - import salt.utils.doc - return salt.utils.doc.parse_docstring(docstring) - - def print_cli(msg, retries=10, step=0.01): ''' Wrapper around print() that suppresses tracebacks on broken pipes (i.e. @@ -2417,228 +1932,6 @@ def date_format(date=None, format="%Y-%m-%d"): return date_cast(date).strftime(format) -def warn_until(version, - message, - category=DeprecationWarning, - stacklevel=None, - _version_info_=None, - _dont_call_warnings=False): - ''' - Helper function to raise a warning, by default, a ``DeprecationWarning``, - until the provided ``version``, after which, a ``RuntimeError`` will - be raised to remind the developers to remove the warning because the - target version has been reached. - - :param version: The version info or name after which the warning becomes a - ``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen`` - or an instance of :class:`salt.version.SaltStackVersion`. - :param message: The warning message to be displayed. - :param category: The warning class to be thrown, by default - ``DeprecationWarning`` - :param stacklevel: There should be no need to set the value of - ``stacklevel``. Salt should be able to do the right thing. - :param _version_info_: In order to reuse this function for other SaltStack - projects, they need to be able to provide the - version info to compare to. - :param _dont_call_warnings: This parameter is used just to get the - functionality until the actual error is to be - issued. When we're only after the salt version - checks to raise a ``RuntimeError``. - ''' - if not isinstance(version, (tuple, - six.string_types, - salt.version.SaltStackVersion)): - raise RuntimeError( - 'The \'version\' argument should be passed as a tuple, string or ' - 'an instance of \'salt.version.SaltStackVersion\'.' - ) - elif isinstance(version, tuple): - version = salt.version.SaltStackVersion(*version) - elif isinstance(version, six.string_types): - version = salt.version.SaltStackVersion.from_name(version) - - if stacklevel is None: - # Attribute the warning to the calling function, not to warn_until() - stacklevel = 2 - - if _version_info_ is None: - _version_info_ = salt.version.__version_info__ - - _version_ = salt.version.SaltStackVersion(*_version_info_) - - if _version_ >= version: - import inspect - caller = inspect.getframeinfo(sys._getframe(stacklevel - 1)) - raise RuntimeError( - 'The warning triggered on filename \'{filename}\', line number ' - '{lineno}, is supposed to be shown until version ' - '{until_version} is released. Current version is now ' - '{salt_version}. Please remove the warning.'.format( - filename=caller.filename, - lineno=caller.lineno, - until_version=version.formatted_version, - salt_version=_version_.formatted_version - ), - ) - - if _dont_call_warnings is False: - def _formatwarning(message, - category, - filename, - lineno, - line=None): # pylint: disable=W0613 - ''' - Replacement for warnings.formatwarning that disables the echoing of - the 'line' parameter. - ''' - return '{0}:{1}: {2}: {3}\n'.format( - filename, lineno, category.__name__, message - ) - saved = warnings.formatwarning - warnings.formatwarning = _formatwarning - warnings.warn( - message.format(version=version.formatted_version), - category, - stacklevel=stacklevel - ) - warnings.formatwarning = saved - - -def kwargs_warn_until(kwargs, - version, - category=DeprecationWarning, - stacklevel=None, - _version_info_=None, - _dont_call_warnings=False): - ''' - Helper function to raise a warning (by default, a ``DeprecationWarning``) - when unhandled keyword arguments are passed to function, until the - provided ``version_info``, after which, a ``RuntimeError`` will be raised - to remind the developers to remove the ``**kwargs`` because the target - version has been reached. - This function is used to help deprecate unused legacy ``**kwargs`` that - were added to function parameters lists to preserve backwards compatibility - when removing a parameter. See - :ref:`the deprecation development docs ` - for the modern strategy for deprecating a function parameter. - - :param kwargs: The caller's ``**kwargs`` argument value (a ``dict``). - :param version: The version info or name after which the warning becomes a - ``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen`` - or an instance of :class:`salt.version.SaltStackVersion`. - :param category: The warning class to be thrown, by default - ``DeprecationWarning`` - :param stacklevel: There should be no need to set the value of - ``stacklevel``. Salt should be able to do the right thing. - :param _version_info_: In order to reuse this function for other SaltStack - projects, they need to be able to provide the - version info to compare to. - :param _dont_call_warnings: This parameter is used just to get the - functionality until the actual error is to be - issued. When we're only after the salt version - checks to raise a ``RuntimeError``. - ''' - if not isinstance(version, (tuple, - six.string_types, - salt.version.SaltStackVersion)): - raise RuntimeError( - 'The \'version\' argument should be passed as a tuple, string or ' - 'an instance of \'salt.version.SaltStackVersion\'.' - ) - elif isinstance(version, tuple): - version = salt.version.SaltStackVersion(*version) - elif isinstance(version, six.string_types): - version = salt.version.SaltStackVersion.from_name(version) - - if stacklevel is None: - # Attribute the warning to the calling function, - # not to kwargs_warn_until() or warn_until() - stacklevel = 3 - - if _version_info_ is None: - _version_info_ = salt.version.__version_info__ - - _version_ = salt.version.SaltStackVersion(*_version_info_) - - if kwargs or _version_.info >= version.info: - arg_names = ', '.join('\'{0}\''.format(key) for key in kwargs) - warn_until( - version, - message='The following parameter(s) have been deprecated and ' - 'will be removed in \'{0}\': {1}.'.format(version.string, - arg_names), - category=category, - stacklevel=stacklevel, - _version_info_=_version_.info, - _dont_call_warnings=_dont_call_warnings - ) - - -def version_cmp(pkg1, pkg2, ignore_epoch=False): - ''' - Compares two version strings using salt.utils.versions.LooseVersion. This is - a fallback for providers which don't have a version comparison utility - built into them. Return -1 if version1 < version2, 0 if version1 == - version2, and 1 if version1 > version2. Return None if there was a problem - making the comparison. - ''' - normalize = lambda x: str(x).split(':', 1)[-1] if ignore_epoch else str(x) - pkg1 = normalize(pkg1) - pkg2 = normalize(pkg2) - - try: - # pylint: disable=no-member - if _LooseVersion(pkg1) < _LooseVersion(pkg2): - return -1 - elif _LooseVersion(pkg1) == _LooseVersion(pkg2): - return 0 - elif _LooseVersion(pkg1) > _LooseVersion(pkg2): - return 1 - except Exception as exc: - log.exception(exc) - return None - - -def compare_versions(ver1='', - oper='==', - ver2='', - cmp_func=None, - ignore_epoch=False): - ''' - Compares two version numbers. Accepts a custom function to perform the - cmp-style version comparison, otherwise uses version_cmp(). - ''' - cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,), - '>=': (0, 1), '>': (1,)} - if oper not in ('!=',) and oper not in cmp_map: - log.error('Invalid operator \'%s\' for version comparison', oper) - return False - - if cmp_func is None: - cmp_func = version_cmp - - cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch) - if cmp_result is None: - return False - - # Check if integer/long - if not isinstance(cmp_result, numbers.Integral): - log.error('The version comparison function did not return an ' - 'integer/long.') - return False - - if oper == '!=': - return cmp_result not in cmp_map['=='] - else: - # Gracefully handle cmp_result not in (-1, 0, 1). - if cmp_result < -1: - cmp_result = -1 - elif cmp_result > 1: - cmp_result = 1 - - return cmp_result in cmp_map[oper] - - @jinja_filter('compare_dicts') def compare_dicts(old=None, new=None): ''' @@ -2683,6 +1976,7 @@ def argspec_report(functions, module=''): Pass in a functions dict as it is returned from the loader and return the argspec function signatures ''' + import salt.utils.args ret = {} if '*' in module or '.' in module: for fun in fnmatch.filter(functions, module): @@ -2786,6 +2080,7 @@ def is_bin_file(path): ''' # Late import to avoid circular import. import salt.utils.files + import salt.utils.stringutils if not os.path.isfile(path): return False @@ -2795,39 +2090,13 @@ def is_bin_file(path): data = fp_.read(2048) if six.PY3: data = data.decode(__salt_system_encoding__) - return is_bin_str(data) + return salt.utils.stringutils.is_binary(data) except UnicodeDecodeError: return True except os.error: return False -def is_bin_str(data): - ''' - Detects if the passed string of data is bin or text - ''' - if '\0' in data: - return True - if not data: - return False - - text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\n\r\t\b')) - # Get the non-text characters (map each character to itself then use the - # 'remove' option to get rid of the text characters.) - if six.PY3: - trans = ''.maketrans('', '', text_characters) - nontext = data.translate(trans) - else: - trans = string.maketrans('', '') # pylint: disable=no-member - nontext = data.translate(trans, text_characters) - - # If more than 30% non-text characters, then - # this is considered a binary file - if len(nontext) / len(data) > 0.30: - return True - return False - - def is_dictlist(data): ''' Returns True if data is a list of one-element dicts (as found in many SLS @@ -3117,41 +2386,6 @@ def chugid_and_umask(runas, umask): os.umask(umask) -@jinja_filter('random_str') -def rand_string(size=32): - key = os.urandom(size) - return key.encode('base64').replace('\n', '') - - -def relpath(path, start='.'): - ''' - Work around Python bug #5117, which is not (and will not be) patched in - Python 2.6 (http://bugs.python.org/issue5117) - ''' - if sys.version_info < (2, 7) and 'posix' in sys.builtin_module_names: - # The below code block is based on posixpath.relpath from Python 2.7, - # which has the fix for this bug. - if not path: - raise ValueError('no path specified') - - start_list = [ - x for x in os.path.abspath(start).split(os.path.sep) if x - ] - path_list = [ - x for x in os.path.abspath(path).split(os.path.sep) if x - ] - - # work out how much of the filepath is shared by start and path. - i = len(os.path.commonprefix([start_list, path_list])) - - rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return os.path.curdir - return os.path.join(*rel_list) - - return os.path.relpath(path, start=start) - - def human_size_to_bytes(human_size): ''' Convert human-readable units to bytes @@ -3169,59 +2403,6 @@ def human_size_to_bytes(human_size): return size_num * unit_multiplier -def to_str(s, encoding=None): - ''' - Given str, bytes, bytearray, or unicode (py2), return str - ''' - if isinstance(s, str): - return s - if six.PY3: - if isinstance(s, (bytes, bytearray)): - # https://docs.python.org/3/howto/unicode.html#the-unicode-type - # replace error with U+FFFD, REPLACEMENT CHARACTER - return s.decode(encoding or __salt_system_encoding__, "replace") - raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s))) - else: - if isinstance(s, bytearray): - return str(s) - if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable - return s.encode(encoding or __salt_system_encoding__) - raise TypeError('expected str, bytearray, or unicode') - - -@jinja_filter('to_bytes') -def to_bytes(s, encoding=None): - ''' - Given bytes, bytearray, str, or unicode (python 2), return bytes (str for - python 2) - ''' - if six.PY3: - if isinstance(s, bytes): - return s - if isinstance(s, bytearray): - return bytes(s) - if isinstance(s, str): - return s.encode(encoding or __salt_system_encoding__) - raise TypeError('expected bytes, bytearray, or str') - else: - return to_str(s, encoding) - - -def to_unicode(s, encoding=None): - ''' - Given str or unicode, return unicode (str for python 3) - ''' - if not isinstance(s, (bytes, bytearray, six.string_types)): - return s - if six.PY3: - if isinstance(s, (bytes, bytearray)): - return to_str(s, encoding) - else: - if isinstance(s, str): - return s.decode(encoding or __salt_system_encoding__) - return s - - @jinja_filter('is_list') def is_list(value): ''' @@ -3254,37 +2435,6 @@ def is_iter(y, ignore=six.string_types): return False -def invalid_kwargs(invalid_kwargs, raise_exc=True): - ''' - Raise a SaltInvocationError if invalid_kwargs is non-empty - ''' - if invalid_kwargs: - if isinstance(invalid_kwargs, dict): - new_invalid = [ - '{0}={1}'.format(x, y) - for x, y in six.iteritems(invalid_kwargs) - ] - invalid_kwargs = new_invalid - msg = ( - 'The following keyword arguments are not valid: {0}' - .format(', '.join(invalid_kwargs)) - ) - if raise_exc: - raise SaltInvocationError(msg) - else: - return msg - - -def shlex_split(s, **kwargs): - ''' - Only split if variable is a string - ''' - if isinstance(s, six.string_types): - return shlex.split(s, **kwargs) - else: - return s - - def split_input(val): ''' Take an input value and split it into a list, returning the resulting list @@ -3297,41 +2447,6 @@ def split_input(val): return [x.strip() for x in str(val).split(',')] -def str_version_to_evr(verstring): - ''' - Split the package version string into epoch, version and release. - Return this as tuple. - - The epoch is always not empty. The version and the release can be an empty - string if such a component could not be found in the version string. - - "2:1.0-1.2" => ('2', '1.0', '1.2) - "1.0" => ('0', '1.0', '') - "" => ('0', '', '') - ''' - if verstring in [None, '']: - return '0', '', '' - - idx_e = verstring.find(':') - if idx_e != -1: - try: - epoch = str(int(verstring[:idx_e])) - except ValueError: - # look, garbage in the epoch field, how fun, kill it - epoch = '0' # this is our fallback, deal - else: - epoch = '0' - idx_r = verstring.find('-') - if idx_r != -1: - version = verstring[idx_e + 1:idx_r] - release = verstring[idx_r + 1:] - else: - version = verstring[idx_e + 1:] - release = '' - - return epoch, version, release - - def simple_types_filter(data): ''' Convert the data list, dictionary into simple types, i.e., int, float, string, @@ -3452,27 +2567,683 @@ def fnmatch_multiple(candidates, pattern): return None -def is_quoted(val): +# +# MOVED FUNCTIONS +# +# These are deprecated and will be removed in Neon. +def to_bytes(s, encoding=None): + ''' + Given bytes, bytearray, str, or unicode (python 2), return bytes (str for + python 2) + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.to_bytes\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.to_bytes\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.to_bytes(s, encoding) + + +def to_str(s, encoding=None): + ''' + Given str, bytes, bytearray, or unicode (py2), return str + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.to_str\' detected. This function has been moved ' + 'to \'salt.utils.stringutils.to_str\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.to_str(s, encoding) + + +def to_unicode(s, encoding=None): + ''' + Given str or unicode, return unicode (str for python 3) + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.to_unicode\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.to_unicode\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.to_unicode(s, encoding) + + +def str_to_num(text): + ''' + Convert a string to a number. + Returns an integer if the string represents an integer, a floating + point number if the string is a real number, or the string unchanged + otherwise. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.str_to_num\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.to_num\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.to_num(text) + + +def is_quoted(value): ''' Return a single or double quote, if a string is wrapped in extra quotes. Otherwise return an empty string. + + .. deprecated:: Oxygen ''' - ret = '' - if ( - isinstance(val, six.string_types) and val[0] == val[-1] and - val.startswith(('\'', '"')) - ): - ret = val[0] - return ret + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_quoted\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.is_quoted\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.is_quoted(value) -def dequote(val): +def dequote(value): ''' Remove extra quotes around a string. + + .. deprecated:: Oxygen ''' - if is_quoted(val): - return val[1:-1] - return val + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.dequote\' detected. This function has been moved ' + 'to \'salt.utils.stringutils.dequote\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.dequote(value) + + +def is_hex(value): + ''' + Returns True if value is a hexidecimal string, otherwise returns False + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_hex\' detected. This function has been moved ' + 'to \'salt.utils.stringutils.is_hex\' as of Salt Oxygen. This warning ' + 'will be removed in Salt Neon.' + ) + return salt.utils.stringutils.is_hex(value) + + +def is_bin_str(data): + ''' + Detects if the passed string of data is binary or text + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_bin_str\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.is_binary\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.is_binary(data) + + +def rand_string(size=32): + ''' + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.rand_string\' detected. This function has been ' + 'moved to \'salt.utils.stringutils.random\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.random(size) + + +def contains_whitespace(text): + ''' + Returns True if there are any whitespace characters in the string + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.stringutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.contains_whitespace\' detected. This function ' + 'has been moved to \'salt.utils.stringutils.contains_whitespace\' as ' + 'of Salt Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.stringutils.contains_whitespace(text) + + +def clean_kwargs(**kwargs): + ''' + Return a dict without any of the __pub* keys (or any other keys starting + with a dunder) from the kwargs dict passed into the execution module + functions. These keys are useful for tracking what was used to invoke + the function call, but they may not be desirable to have if passing the + kwargs forward wholesale. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.args + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.clean_kwargs\' detected. This function has been ' + 'moved to \'salt.utils.args.clean_kwargs\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.args.clean_kwargs(**kwargs) + + +def invalid_kwargs(invalid_kwargs, raise_exc=True): + ''' + Raise a SaltInvocationError if invalid_kwargs is non-empty + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.args + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.invalid_kwargs\' detected. This function has ' + 'been moved to \'salt.utils.args.invalid_kwargs\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.args.invalid_kwargs(invalid_kwargs, raise_exc) + + +def shlex_split(s, **kwargs): + ''' + Only split if variable is a string + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.args + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.shlex_split\' detected. This function has been ' + 'moved to \'salt.utils.args.shlex_split\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.args.shlex_split(s, **kwargs) + + +def which(exe=None): + ''' + Python clone of /usr/bin/which + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.path + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.which\' detected. This function has been moved to ' + '\'salt.utils.path.which\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.path.which(exe) + + +def which_bin(exes): + ''' + Scan over some possible executables and return the first one that is found + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.path + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.which_bin\' detected. This function has been ' + 'moved to \'salt.utils.path.which_bin\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.path.which_bin(exes) + + +def path_join(*parts, **kwargs): + ''' + This functions tries to solve some issues when joining multiple absolute + paths on both *nix and windows platforms. + + See tests/unit/utils/test_path.py for some examples on what's being + talked about here. + + The "use_posixpath" kwarg can be be used to force joining using poxixpath, + which is useful for Salt fileserver paths on Windows masters. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.path + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.path_join\' detected. This function has been ' + 'moved to \'salt.utils.path.join\' as of Salt Oxygen. This warning ' + 'will be removed in Salt Neon.' + ) + return salt.utils.path.join(*parts, **kwargs) + + +def rand_str(size=9999999999, hash_type=None): + ''' + Return a hash of a randomized data from random.SystemRandom() + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.hashutils + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.rand_str\' detected. This function has been ' + 'moved to \'salt.utils.hashutils.random_hash\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.hashutils.random_hash(size, hash_type) + + +def is_windows(): + ''' + Simple function to return if a host is Windows or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_windows\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_windows\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_windows() + + +def is_proxy(): + ''' + Return True if this minion is a proxy minion. + Leverages the fact that is_linux() and is_windows + both return False for proxies. + TODO: Need to extend this for proxies that might run on + other Unices + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_proxy\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_proxy\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_proxy() + + +def is_linux(): + ''' + Simple function to return if a host is Linux or not. + Note for a proxy minion, we need to return something else + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_linux\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_linux\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_linux() + + +def is_darwin(): + ''' + Simple function to return if a host is Darwin (macOS) or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_darwin\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_darwin\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_darwin() + + +def is_sunos(): + ''' + Simple function to return if host is SunOS or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_sunos\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_sunos\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_sunos() + + +def is_smartos(): + ''' + Simple function to return if host is SmartOS (Illumos) or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_smartos\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_smartos\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_smartos() + + +def is_smartos_globalzone(): + ''' + Function to return if host is SmartOS (Illumos) global zone or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_smartos_globalzone\' detected. This function ' + 'has been moved to \'salt.utils.platform.is_smartos_globalzone\' as ' + 'of Salt Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_smartos_globalzone() + + +def is_smartos_zone(): + ''' + Function to return if host is SmartOS (Illumos) and not the gz + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_smartos_zone\' detected. This function has ' + 'been moved to \'salt.utils.platform.is_smartos_zone\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_smartos_zone() + + +def is_freebsd(): + ''' + Simple function to return if host is FreeBSD or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_freebsd\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_freebsd\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_freebsd() + + +def is_netbsd(): + ''' + Simple function to return if host is NetBSD or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_netbsd\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_netbsd\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_netbsd() + + +def is_openbsd(): + ''' + Simple function to return if host is OpenBSD or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_openbsd\' detected. This function has been ' + 'moved to \'salt.utils.platform.is_openbsd\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.platform.is_openbsd() + + +def is_aix(): + ''' + Simple function to return if host is AIX or not + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.platform + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_aix\' detected. This function has been moved to ' + '\'salt.utils.platform.is_aix\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.platform.is_aix() + + +def safe_rm(tgt): + ''' + Safely remove a file + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.safe_rm\' detected. This function has been moved to ' + '\'salt.utils.files.safe_rm\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.safe_rm(tgt) + + +@jinja_filter('is_empty') +def is_empty(filename): + ''' + Is a file empty? + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.is_empty\' detected. This function has been moved to ' + '\'salt.utils.files.is_empty\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.is_empty(filename) + + +def fopen(*args, **kwargs): + ''' + Wrapper around open() built-in to set CLOEXEC on the fd. + + This flag specifies that the file descriptor should be closed when an exec + function is invoked; + When a file descriptor is allocated (as with open or dup), this bit is + initially cleared on the new file descriptor, meaning that descriptor will + survive into the new program after exec. + + NB! We still have small race condition between open and fcntl. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.fopen\' detected. This function has been moved to ' + '\'salt.utils.files.fopen\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.fopen(*args, **kwargs) # pylint: disable=W8470 + + +@contextlib.contextmanager +def flopen(*args, **kwargs): + ''' + Shortcut for fopen with lock and context manager + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.flopen\' detected. This function has been moved to ' + '\'salt.utils.files.flopen\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.flopen(*args, **kwargs) + + +@contextlib.contextmanager +def fpopen(*args, **kwargs): + ''' + Shortcut for fopen with extra uid, gid and mode options. + + Supported optional Keyword Arguments: + + mode: explicit mode to set. Mode is anything os.chmod + would accept as input for mode. Works only on unix/unix + like systems. + + uid: the uid to set, if not set, or it is None or -1 no changes are + made. Same applies if the path is already owned by this + uid. Must be int. Works only on unix/unix like systems. + + gid: the gid to set, if not set, or it is None or -1 no changes are + made. Same applies if the path is already owned by this + gid. Must be int. Works only on unix/unix like systems. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.fpopen\' detected. This function has been moved to ' + '\'salt.utils.files.fpopen\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.fpopen(*args, **kwargs) + + +def rm_rf(path): + ''' + Platform-independent recursive delete. Includes code from + http://stackoverflow.com/a/2656405 + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.rm_rf\' detected. This function has been moved to ' + '\'salt.utils.files.rm_rf\' as of Salt Oxygen. This warning will be ' + 'removed in Salt Neon.' + ) + return salt.utils.files.rm_rf(path) def mkstemp(*args, **kwargs): @@ -3484,12 +3255,178 @@ def mkstemp(*args, **kwargs): .. deprecated:: Oxygen ''' - warn_until( + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.files + salt.utils.versions.warn_until( 'Neon', 'Use of \'salt.utils.mkstemp\' detected. This function has been moved to ' '\'salt.utils.files.mkstemp\' as of Salt Oxygen. This warning will be ' 'removed in Salt Neon.' ) - # Late import to avoid circular import. - import salt.utils.files return salt.utils.files.mkstemp(*args, **kwargs) + + +def str_version_to_evr(verstring): + ''' + Split the package version string into epoch, version and release. + Return this as tuple. + + The epoch is always not empty. The version and the release can be an empty + string if such a component could not be found in the version string. + + "2:1.0-1.2" => ('2', '1.0', '1.2) + "1.0" => ('0', '1.0', '') + "" => ('0', '', '') + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.pkg.rpm + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.str_version_to_evr\' detected. This function has ' + 'been moved to \'salt.utils.pkg.rpm.version_to_evr\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.pkg.rpm.version_to_evr(verstring) + + +def parse_docstring(docstring): + ''' + Parse a docstring into its parts. + + Currently only parses dependencies, can be extended to parse whatever is + needed. + + Parses into a dictionary: + { + 'full': full docstring, + 'deps': list of dependencies (empty list if none) + } + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.doc + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.parse_docstring\' detected. This function has ' + 'been moved to \'salt.utils.doc.parse_docstring\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.doc.parse_docstring(docstring) + + +def compare_versions(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False): + ''' + Compares two version numbers. Accepts a custom function to perform the + cmp-style version comparison, otherwise uses version_cmp(). + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.compare_versions\' detected. This function has ' + 'been moved to \'salt.utils.versions.compare\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.versions.compare(ver1=ver1, + oper=oper, + ver2=ver2, + cmp_func=cmp_func, + ignore_epoch=ignore_epoch) + + +def version_cmp(pkg1, pkg2, ignore_epoch=False): + ''' + Compares two version strings using salt.utils.versions.LooseVersion. This + is a fallback for providers which don't have a version comparison utility + built into them. Return -1 if version1 < version2, 0 if version1 == + version2, and 1 if version1 > version2. Return None if there was a problem + making the comparison. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.version_cmp\' detected. This function has ' + 'been moved to \'salt.utils.versions.version_cmp\' as of Salt Oxygen. ' + 'This warning will be removed in Salt Neon.' + ) + return salt.utils.versions.version_cmp(pkg1, + pkg2, + ignore_epoch=ignore_epoch) + + +def warn_until(version, + message, + category=DeprecationWarning, + stacklevel=None, + _version_info_=None, + _dont_call_warnings=False): + ''' + Helper function to raise a warning, by default, a ``DeprecationWarning``, + until the provided ``version``, after which, a ``RuntimeError`` will + be raised to remind the developers to remove the warning because the + target version has been reached. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.warn_until\' detected. This function has ' + 'been moved to \'salt.utils.versions.warn_until\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.versions.warn_until(version, + message, + category=category, + stacklevel=stacklevel, + _version_info_=_version_info_, + _dont_call_warnings=_dont_call_warnings) + + +def kwargs_warn_until(kwargs, + version, + category=DeprecationWarning, + stacklevel=None, + _version_info_=None, + _dont_call_warnings=False): + ''' + Helper function to raise a warning (by default, a ``DeprecationWarning``) + when unhandled keyword arguments are passed to function, until the + provided ``version_info``, after which, a ``RuntimeError`` will be raised + to remind the developers to remove the ``**kwargs`` because the target + version has been reached. + This function is used to help deprecate unused legacy ``**kwargs`` that + were added to function parameters lists to preserve backwards compatibility + when removing a parameter. See + :ref:`the deprecation development docs ` + for the modern strategy for deprecating a function parameter. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.kwargs_warn_until\' detected. This function has ' + 'been moved to \'salt.utils.versions.kwargs_warn_until\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.versions.kwargs_warn_until( + kwargs, + version, + category=category, + stacklevel=stacklevel, + _version_info_=_version_info_, + _dont_call_warnings=_dont_call_warnings) diff --git a/salt/utils/aggregation.py b/salt/utils/aggregation.py index bfd968bef8..b65eebe4ea 100644 --- a/salt/utils/aggregation.py +++ b/salt/utils/aggregation.py @@ -112,7 +112,7 @@ from copy import copy from salt.utils.odict import OrderedDict # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six __all__ = ['aggregate', 'Aggregate', 'Map', 'Scalar', 'Sequence'] diff --git a/salt/utils/args.py b/salt/utils/args.py index c7a49b3a29..8c55f845c6 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py @@ -6,13 +6,15 @@ from __future__ import absolute_import # Import python libs import re +import shlex import inspect # Import salt libs import salt.utils.jid +from salt.exceptions import SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six if six.PY3: @@ -21,6 +23,42 @@ else: KWARG_REGEX = re.compile(r'^([^\d\W][\w.-]*)=(?!=)(.*)$') +def clean_kwargs(**kwargs): + ''' + Return a dict without any of the __pub* keys (or any other keys starting + with a dunder) from the kwargs dict passed into the execution module + functions. These keys are useful for tracking what was used to invoke + the function call, but they may not be desirable to have if passing the + kwargs forward wholesale. + ''' + ret = {} + for key, val in six.iteritems(kwargs): + if not key.startswith('__'): + ret[key] = val + return ret + + +def invalid_kwargs(invalid_kwargs, raise_exc=True): + ''' + Raise a SaltInvocationError if invalid_kwargs is non-empty + ''' + if invalid_kwargs: + if isinstance(invalid_kwargs, dict): + new_invalid = [ + '{0}={1}'.format(x, y) + for x, y in six.iteritems(invalid_kwargs) + ] + invalid_kwargs = new_invalid + msg = ( + 'The following keyword arguments are not valid: {0}' + .format(', '.join(invalid_kwargs)) + ) + if raise_exc: + raise SaltInvocationError(msg) + else: + return msg + + def condition_input(args, kwargs): ''' Return a single arg structure for the publisher to safely use @@ -220,3 +258,13 @@ def get_function_argspec(func, is_class_method=None): 'Cannot inspect argument list for \'{0}\''.format(func) ) return aspec + + +def shlex_split(s, **kwargs): + ''' + Only split if variable is a string + ''' + if isinstance(s, six.string_types): + return shlex.split(s, **kwargs) + else: + return s diff --git a/salt/utils/atomicfile.py b/salt/utils/atomicfile.py index 2aab01c31a..0d88773d46 100644 --- a/salt/utils/atomicfile.py +++ b/salt/utils/atomicfile.py @@ -13,7 +13,7 @@ import errno import time import random import shutil -import salt.ext.six as six +from salt.ext import six CAN_RENAME_OPEN_FILE = False diff --git a/salt/utils/boto.py b/salt/utils/boto.py index 27f580fec7..ee0a795ee6 100644 --- a/salt/utils/boto.py +++ b/salt/utils/boto.py @@ -43,11 +43,12 @@ from functools import partial from salt.loader import minion_mods # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.exceptions import SaltInvocationError from salt.utils.versions import LooseVersion as _LooseVersion import salt.utils +import salt.utils.stringutils # Import third party libs # pylint: disable=import-error @@ -109,7 +110,7 @@ def _get_profile(service, region, key, keyid, profile): if keyid: hash_string = region + keyid + key if six.PY3: - hash_string = salt.utils.to_bytes(hash_string) + hash_string = salt.utils.stringutils.to_bytes(hash_string) cxkey = label + hashlib.md5(hash_string).hexdigest() else: cxkey = label + region diff --git a/salt/utils/boto3.py b/salt/utils/boto3.py index 1861c4d372..2d0fa12fc1 100644 --- a/salt/utils/boto3.py +++ b/salt/utils/boto3.py @@ -47,6 +47,7 @@ from salt.exceptions import SaltInvocationError from salt.utils.versions import LooseVersion as _LooseVersion from salt.ext import six import salt.utils +import salt.utils.stringutils # Import third party libs # pylint: disable=import-error @@ -131,7 +132,7 @@ def _get_profile(service, region, key, keyid, profile): if keyid: hash_string = region + keyid + key if six.PY3: - hash_string = salt.utils.to_bytes(hash_string) + hash_string = salt.utils.stringutils.to_bytes(hash_string) cxkey = label + hashlib.md5(hash_string).hexdigest() else: cxkey = label + region diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 654201e1ec..7203ca346a 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -7,7 +7,6 @@ Utility functions for salt.cloud from __future__ import absolute_import import errno import os -import sys import stat import codecs import shutil @@ -26,16 +25,6 @@ import re import uuid -# Let's import pwd and catch the ImportError. We'll raise it if this is not -# Windows -try: - import pwd -except ImportError: - if not sys.platform.lower().startswith('win'): - # We can't use salt.utils.is_windows() from the import a little down - # because that will cause issues under windows at install time. - raise - try: import salt.utils.smb @@ -57,10 +46,12 @@ import salt.client import salt.config import salt.loader import salt.template -import salt.utils +import salt.utils # Can be removed when pem_finger is moved +import salt.utils.compat import salt.utils.event import salt.utils.files -from salt.utils import vt +import salt.utils.platform +import salt.utils.vt from salt.utils.nb_popen import NonBlockingPopen from salt.utils.yamldumper import SafeOrderedDumper from salt.utils.validate.path import is_writeable @@ -76,12 +67,20 @@ from salt.exceptions import ( SaltCloudPasswordError ) -# Import third party libs -import salt.ext.six as six +# Import 3rd-party libs +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin,W0611 from jinja2 import Template import yaml +# Let's import pwd and catch the ImportError. We'll raise it if this is not +# Windows. This import has to be below where we import salt.utils.platform! +try: + import pwd +except ImportError: + if not salt.utils.platform.is_windows(): + raise + try: import getpass @@ -312,6 +311,11 @@ def bootstrap(vm_, opts): } } + if vm_.get('driver') == 'saltify': + saltify_driver = True + else: + saltify_driver = False + key_filename = salt.config.get_cloud_config_value( 'key_filename', vm_, opts, search_global=False, default=salt.config.get_cloud_config_value( @@ -347,7 +351,7 @@ def bootstrap(vm_, opts): ret = {} - minion_conf = salt.utils.cloud.minion_config(opts, vm_) + minion_conf = minion_config(opts, vm_) deploy_script_code = os_script( salt.config.get_cloud_config_value( 'os', vm_, opts, default='bootstrap-salt' @@ -406,10 +410,7 @@ def bootstrap(vm_, opts): 'tmp_dir': salt.config.get_cloud_config_value( 'tmp_dir', vm_, opts, default='/tmp/.saltcloud' ), - 'deploy_command': salt.config.get_cloud_config_value( - 'deploy_command', vm_, opts, - default='/tmp/.saltcloud/deploy.sh', - ), + 'vm_': vm_, 'start_action': opts['start_action'], 'parallel': opts['parallel'], 'sock_dir': opts['sock_dir'], @@ -439,6 +440,9 @@ def bootstrap(vm_, opts): 'script_env', vm_, opts ), 'minion_conf': minion_conf, + 'force_minion_config': salt.config.get_cloud_config_value( + 'force_minion_config', vm_, opts, default=False + ), 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': salt.config.get_cloud_config_value( 'display_ssh_output', vm_, opts, default=True @@ -452,9 +456,16 @@ def bootstrap(vm_, opts): 'maxtries': salt.config.get_cloud_config_value( 'wait_for_passwd_maxtries', vm_, opts, default=15 ), + 'preflight_cmds': salt.config.get_cloud_config_value( + 'preflight_cmds', vm_, __opts__, default=[] + ), + 'cloud_grains': {'driver': vm_['driver'], + 'provider': vm_['provider'], + 'profile': vm_['profile'] + } } - inline_script_kwargs = deploy_kwargs + inline_script_kwargs = deploy_kwargs.copy() # make a copy at this point # forward any info about possible ssh gateway to deploy script # as some providers need also a 'gateway' configuration @@ -466,7 +477,7 @@ def bootstrap(vm_, opts): deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] - master_conf = salt.utils.cloud.master_config(opts, vm_) + master_conf = master_config(opts, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): @@ -476,6 +487,9 @@ def bootstrap(vm_, opts): 'make_minion', vm_, opts, default=True ) + if saltify_driver: + deploy_kwargs['wait_for_passwd_maxtries'] = 0 # No need to wait/retry with Saltify + win_installer = salt.config.get_cloud_config_value( 'win_installer', vm_, opts ) @@ -484,7 +498,7 @@ def bootstrap(vm_, opts): 'smb_port', vm_, opts, default=445 ) deploy_kwargs['win_installer'] = win_installer - minion = salt.utils.cloud.minion_config(opts, vm_) + minion = minion_config(opts, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = salt.config.get_cloud_config_value( 'win_username', vm_, opts, default='Administrator' @@ -506,6 +520,8 @@ def bootstrap(vm_, opts): deploy_kwargs['winrm_verify_ssl'] = salt.config.get_cloud_config_value( 'winrm_verify_ssl', vm_, opts, default=True ) + if saltify_driver: + deploy_kwargs['port_timeout'] = 1 # No need to wait/retry with Saltify # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) @@ -813,21 +829,21 @@ def wait_for_winexesvc(host, port, username, password, timeout=900): log.debug('winexe connected...') return True log.debug('Return code was {0}'.format(ret_code)) - time.sleep(1) except socket.error as exc: log.debug('Caught exception in wait_for_winexesvc: {0}'.format(exc)) - time.sleep(1) - if time.time() - start > timeout: - log.error('winexe connection timed out: {0}'.format(timeout)) - return False - log.debug( - 'Retrying winexe connection to host {0} on port {1} ' - '(try {2})'.format( - host, - port, - try_count - ) + + if time.time() - start > timeout: + log.error('winexe connection timed out: {0}'.format(timeout)) + return False + log.debug( + 'Retrying winexe connection to host {0} on port {1} ' + '(try {2})'.format( + host, + port, + try_count ) + ) + time.sleep(1) def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True, verify=True): @@ -862,19 +878,19 @@ def wait_for_winrm(host, port, username, password, timeout=900, use_ssl=True, ve log.debug('WinRM session connected...') return s log.debug('Return code was {0}'.format(r.status_code)) - time.sleep(1) except WinRMTransportError as exc: log.debug('Caught exception in wait_for_winrm: {0}'.format(exc)) - if time.time() - start > timeout: - log.error('WinRM connection timed out: {0}'.format(timeout)) - return None - log.debug( - 'Retrying WinRM connection to host {0} on port {1} ' - '(try {2})'.format( - host, port, trycount - ) + + if time.time() - start > timeout: + log.error('WinRM connection timed out: {0}'.format(timeout)) + return None + log.debug( + 'Retrying WinRM connection to host {0} on port {1} ' + '(try {2})'.format( + host, port, trycount ) - time.sleep(1) + ) + time.sleep(1) def validate_windows_cred(host, @@ -895,7 +911,7 @@ def validate_windows_cred(host, host ) - for i in xrange(retries): + for i in range(retries): ret_code = win_cmd( cmd, logging_command=logging_cmd @@ -1223,20 +1239,26 @@ def deploy_script(host, sudo_password=None, sudo=False, tty=None, - deploy_command='/tmp/.saltcloud/deploy.sh', + vm_=None, opts=None, tmp_dir='/tmp/.saltcloud', file_map=None, master_sign_pub_file=None, + cloud_grains=None, + force_minion_config=False, **kwargs): ''' Copy a deploy script to a remote server, execute it, and remove it ''' if not isinstance(opts, dict): opts = {} + vm_ = vm_ or {} # if None, default to empty dict + cloud_grains = cloud_grains or {} tmp_dir = '{0}-{1}'.format(tmp_dir.rstrip('/'), uuid.uuid4()) - deploy_command = os.path.join(tmp_dir, 'deploy.sh') + deploy_command = salt.config.get_cloud_config_value( + 'deploy_command', vm_, opts, + default=os.path.join(tmp_dir, 'deploy.sh')) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( @@ -1248,9 +1270,11 @@ def deploy_script(host, if 'gateway' in kwargs: gateway = kwargs['gateway'] - starttime = time.mktime(time.localtime()) - log.debug('Deploying {0} at {1}'.format(host, starttime)) - + starttime = time.localtime() + log.debug('Deploying {0} at {1}'.format( + host, + time.strftime('%Y-%m-%d %H:%M:%S', starttime)) + ) known_hosts_file = kwargs.get('known_hosts_file', '/dev/null') hard_timeout = opts.get('hard_timeout', None) @@ -1382,6 +1406,8 @@ def deploy_script(host, salt_config_to_yaml(minion_grains), ssh_kwargs ) + if cloud_grains and opts.get('enable_cloud_grains', True): + minion_conf['grains'] = {'salt-cloud': cloud_grains} ssh_file( opts, '{0}/minion'.format(tmp_dir), @@ -1465,6 +1491,15 @@ def deploy_script(host, 'Can\'t set ownership for {0}'.format( preseed_minion_keys_tempdir)) + # Run any pre-flight commands before running deploy scripts + preflight_cmds = kwargs.get('preflight_cmds', []) + for command in preflight_cmds: + cmd_ret = root_cmd(command, tty, sudo, **ssh_kwargs) + if cmd_ret: + raise SaltCloudSystemExit( + 'Pre-flight command failed: \'{0}\''.format(command) + ) + # The actual deploy script if script: # got strange escaping issues with sudoer, going onto a @@ -1478,7 +1513,8 @@ def deploy_script(host, raise SaltCloudSystemExit( 'Can\'t set perms on {0}/deploy.sh'.format(tmp_dir)) - newtimeout = timeout - (time.mktime(time.localtime()) - starttime) + time_used = time.mktime(time.localtime()) - time.mktime(starttime) + newtimeout = timeout - time_used queue = None process = None # Consider this code experimental. It causes Salt Cloud to wait @@ -1499,6 +1535,8 @@ def deploy_script(host, if script: if 'bootstrap-salt' in script: deploy_command += ' -c \'{0}\''.format(tmp_dir) + if force_minion_config: + deploy_command += ' -F' if make_syndic is True: deploy_command += ' -S' if make_master is True: @@ -1814,7 +1852,7 @@ def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs): password_retries = kwargs.get('password_retries', 3) try: stdout, stderr = None, None - proc = vt.Terminal( + proc = salt.utils.vt.Terminal( cmd, shell=True, log_stdout=True, @@ -1854,7 +1892,7 @@ def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs): ) ) return proc.exitstatus - except vt.TerminalException as err: + except salt.utils.vt.TerminalException as err: trace = traceback.format_exc() log.error(error_msg.format(cmd, err, trace)) finally: @@ -2005,7 +2043,7 @@ def sftp_file(dest_path, contents=None, kwargs=None, local_file=None): if contents is not None: try: tmpfd, file_to_upload = tempfile.mkstemp() - if isinstance(contents, str): + if isinstance(contents, six.string_types): os.write(tmpfd, contents.encode(__salt_system_encoding__)) else: os.write(tmpfd, contents) @@ -2768,6 +2806,11 @@ def cache_nodes_ip(opts, base=None): Retrieve a list of all nodes from Salt Cloud cache, and any associated IP addresses. Returns a dict. ''' + salt.utils.warn_until( + 'Flourine', + 'This function is incomplete and non-functional ' + 'and will be removed in Salt Flourine.' + ) if base is None: base = opts['cachedir'] @@ -3045,7 +3088,7 @@ def diff_node_cache(prov_dir, node, new_data, opts): # Perform a simple diff between the old and the new data, and if it differs, # return both dicts. # TODO: Return an actual diff - diff = cmp(new_data, cache_data) + diff = salt.utils.compat.cmp(new_data, cache_data) if diff != 0: fire_event( 'event', diff --git a/salt/utils/compat.py b/salt/utils/compat.py index b97820db92..09c5cc2828 100644 --- a/salt/utils/compat.py +++ b/salt/utils/compat.py @@ -46,3 +46,15 @@ def deepcopy_bound(name): finally: copy._deepcopy_dispatch = pre_dispatch return ret + + +def cmp(x, y): + ''' + Compatibility helper function to replace the ``cmp`` function from Python 2. The + ``cmp`` function is no longer available in Python 3. + + cmp(x, y) -> integer + + Return negative if xy. + ''' + return (x > y) - (x < y) diff --git a/salt/utils/configcomparer.py b/salt/utils/configcomparer.py index ffe1723bd5..b7a0f279ff 100644 --- a/salt/utils/configcomparer.py +++ b/salt/utils/configcomparer.py @@ -8,7 +8,7 @@ changes in a way that can be easily reported in a state. from __future__ import absolute_import # Import Salt libs -import salt.ext.six as six +from salt.ext import six def compare_and_update_config(config, update_config, changes, namespace=''): diff --git a/salt/utils/context.py b/salt/utils/context.py index ce1cd9e77e..1073e2df4e 100644 --- a/salt/utils/context.py +++ b/salt/utils/context.py @@ -17,7 +17,7 @@ import threading import collections from contextlib import contextmanager -import salt.ext.six as six +from salt.ext import six @contextmanager @@ -205,7 +205,8 @@ class NamespacedDictWrapper(collections.MutableMapping, dict): self.pre_keys = pre_keys if override_name: self.__class__.__module__ = 'salt' - self.__class__.__name__ = override_name + # __name__ can't be assigned a unicode + self.__class__.__name__ = str(override_name) # future lint: disable=non-unicode-string super(NamespacedDictWrapper, self).__init__(self._dict()) def _dict(self): diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py index 9c6c2a99d8..06276b0020 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py @@ -12,14 +12,12 @@ from functools import wraps from collections import defaultdict # Import salt libs -import salt.utils import salt.utils.args -from salt.exceptions import CommandNotFoundError, CommandExecutionError, SaltConfigurationError -from salt.version import SaltStackVersion, __saltstack_version__ +from salt.exceptions import CommandExecutionError, SaltConfigurationError from salt.log import LOG_LEVELS # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -154,7 +152,7 @@ def timing(function): @wraps(function) def wrapped(*args, **kwargs): start_time = time.time() - ret = function(*args, **salt.utils.clean_kwargs(**kwargs)) + ret = function(*args, **salt.utils.args.clean_kwargs(**kwargs)) end_time = time.time() if function.__module__.startswith('salt.loaded.int.'): mod_name = function.__module__[16:] @@ -171,67 +169,6 @@ def timing(function): return wrapped -def which(exe): - ''' - Decorator wrapper for salt.utils.which - ''' - def wrapper(function): - def wrapped(*args, **kwargs): - if salt.utils.which(exe) is None: - raise CommandNotFoundError( - 'The \'{0}\' binary was not found in $PATH.'.format(exe) - ) - return function(*args, **kwargs) - return identical_signature_wrapper(function, wrapped) - return wrapper - - -def which_bin(exes): - ''' - Decorator wrapper for salt.utils.which_bin - ''' - def wrapper(function): - def wrapped(*args, **kwargs): - if salt.utils.which_bin(exes) is None: - raise CommandNotFoundError( - 'None of provided binaries({0}) was not found ' - 'in $PATH.'.format( - ['\'{0}\''.format(exe) for exe in exes] - ) - ) - return function(*args, **kwargs) - return identical_signature_wrapper(function, wrapped) - return wrapper - - -def identical_signature_wrapper(original_function, wrapped_function): - ''' - Return a function with identical signature as ``original_function``'s which - will call the ``wrapped_function``. - ''' - context = {'__wrapped__': wrapped_function} - function_def = compile( - 'def {0}({1}):\n' - ' return __wrapped__({2})'.format( - # Keep the original function name - original_function.__name__, - # The function signature including defaults, i.e., 'timeout=1' - inspect.formatargspec( - *salt.utils.args.get_function_argspec(original_function) - )[1:-1], - # The function signature without the defaults - inspect.formatargspec( - formatvalue=lambda val: '', - *salt.utils.args.get_function_argspec(original_function) - )[1:-1] - ), - '', - 'exec' - ) - six.exec_(function_def, context) - return wraps(original_function)(context[original_function.__name__]) - - def memoize(func): ''' Memoize aka cache the return output of a function @@ -251,9 +188,8 @@ def memoize(func): str_args.append(str(arg)) else: str_args.append(arg) - args = str_args - args_ = ','.join(list(args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)]) + args_ = ','.join(list(str_args) + ['{0}={1}'.format(k, kwargs[k]) for k in sorted(kwargs)]) if args_ not in cache: cache[args_] = func(*args, **kwargs) return cache[args_] @@ -278,7 +214,7 @@ class _DeprecationDecorator(object): :param version: Expiration version :return: ''' - + from salt.version import SaltStackVersion, __saltstack_version__ self._globals = globals self._exp_version_name = version self._exp_version = SaltStackVersion.from_name(self._exp_version_name) @@ -542,8 +478,14 @@ class _WithDeprecated(_DeprecationDecorator): f_name=function.__name__)) opts = self._globals.get('__opts__', '{}') - use_deprecated = full_name in opts.get(self.CFG_USE_DEPRECATED, list()) - use_superseded = full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) + pillar = self._globals.get('__pillar__', '{}') + + use_deprecated = (full_name in opts.get(self.CFG_USE_DEPRECATED, list()) or + full_name in pillar.get(self.CFG_USE_DEPRECATED, list())) + + use_superseded = (full_name in opts.get(self.CFG_USE_SUPERSEDED, list()) or + full_name in pillar.get(self.CFG_USE_SUPERSEDED, list())) + if use_deprecated and use_superseded: raise SaltConfigurationError("Function '{0}' is mentioned both in deprecated " "and superseded sections. Please remove any of that.".format(full_name)) @@ -565,8 +507,11 @@ class _WithDeprecated(_DeprecationDecorator): f_name=self._orig_f_name) return func_path in self._globals.get('__opts__').get( + self.CFG_USE_DEPRECATED, list()) or func_path in self._globals.get('__pillar__').get( self.CFG_USE_DEPRECATED, list()) or (self._policy == self.OPT_IN and not (func_path in self._globals.get('__opts__', {}).get( + self.CFG_USE_SUPERSEDED, list())) + and not (func_path in self._globals.get('__pillar__', {}).get( self.CFG_USE_SUPERSEDED, list()))), func_path def __call__(self, function): @@ -641,78 +586,3 @@ def ignores_kwargs(*kwarg_names): return fn(*args, **kwargs_filtered) return __ignores_kwargs return _ignores_kwargs - - -class JinjaFilter(object): - ''' - This decorator is used to specify that a function is to be loaded as a - Jinja filter. - ''' - salt_jinja_filters = {} - - def __init__(self, name=None): - ''' - ''' - self.name = name - - def __call__(self, function): - ''' - ''' - name = self.name or function.__name__ - if name not in self.salt_jinja_filters: - log.debug('Marking "{0}" as a jinja filter'.format(name)) - self.salt_jinja_filters[name] = function - return function - - -jinja_filter = JinjaFilter - - -class JinjaTest(object): - ''' - This decorator is used to specify that a function is to be loaded as a - Jinja test. - ''' - salt_jinja_tests = {} - - def __init__(self, name=None): - ''' - ''' - self.name = name - - def __call__(self, function): - ''' - ''' - name = self.name or function.__name__ - if name not in self.salt_jinja_tests: - log.debug('Marking "{0}" as a jinja test'.format(name)) - self.salt_jinja_tests[name] = function - return function - - -jinja_test = JinjaTest - - -class JinjaGlobal(object): - ''' - This decorator is used to specify that a function is to be loaded as a - Jinja global. - ''' - salt_jinja_globals = {} - - def __init__(self, name=None): - ''' - ''' - self.name = name - - def __call__(self, function): - ''' - ''' - name = self.name or function.__name__ - if name not in self.salt_jinja_globals: - log.debug('Marking "{0}" as a jinja global'.format(name)) - self.salt_jinja_globals[name] = function - return function - - -jinja_global = JinjaGlobal diff --git a/salt/utils/decorators/jinja.py b/salt/utils/decorators/jinja.py new file mode 100644 index 0000000000..512eb181a0 --- /dev/null +++ b/salt/utils/decorators/jinja.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +''' +Jinja-specific decorators +''' +from __future__ import absolute_import + +# Import Python libs +import logging + +log = logging.getLogger(__name__) + + +class JinjaFilter(object): + ''' + This decorator is used to specify that a function is to be loaded as a + Jinja filter. + ''' + salt_jinja_filters = {} + + def __init__(self, name=None): + ''' + ''' + self.name = name + + def __call__(self, function): + ''' + ''' + name = self.name or function.__name__ + if name not in self.salt_jinja_filters: + log.debug(u'Marking \'%s\' as a jinja filter', name) + self.salt_jinja_filters[name] = function + return function + + +jinja_filter = JinjaFilter + + +class JinjaTest(object): + ''' + This decorator is used to specify that a function is to be loaded as a + Jinja test. + ''' + salt_jinja_tests = {} + + def __init__(self, name=None): + ''' + ''' + self.name = name + + def __call__(self, function): + ''' + ''' + name = self.name or function.__name__ + if name not in self.salt_jinja_tests: + log.debug('Marking \'%s\' as a jinja test', name) + self.salt_jinja_tests[name] = function + return function + + +jinja_test = JinjaTest + + +class JinjaGlobal(object): + ''' + This decorator is used to specify that a function is to be loaded as a + Jinja global. + ''' + salt_jinja_globals = {} + + def __init__(self, name=None): + ''' + ''' + self.name = name + + def __call__(self, function): + ''' + ''' + name = self.name or function.__name__ + if name not in self.salt_jinja_globals: + log.debug('Marking "{0}" as a jinja global'.format(name)) + self.salt_jinja_globals[name] = function + return function + + +jinja_global = JinjaGlobal diff --git a/salt/utils/decorators/path.py b/salt/utils/decorators/path.py new file mode 100644 index 0000000000..816d75785b --- /dev/null +++ b/salt/utils/decorators/path.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +''' +Decorators for salt.utils.path +''' +from __future__ import absolute_import + +# Import Salt libs +import salt.utils.path +from salt.exceptions import CommandNotFoundError +from salt.utils.decorators.signature import identical_signature_wrapper + + +def which(exe): + ''' + Decorator wrapper for salt.utils.path.which + ''' + def wrapper(function): + def wrapped(*args, **kwargs): + if salt.utils.path.which(exe) is None: + raise CommandNotFoundError( + 'The \'{0}\' binary was not found in $PATH.'.format(exe) + ) + return function(*args, **kwargs) + return identical_signature_wrapper(function, wrapped) + return wrapper + + +def which_bin(exes): + ''' + Decorator wrapper for salt.utils.path.which_bin + ''' + def wrapper(function): + def wrapped(*args, **kwargs): + if salt.utils.path.which_bin(exes) is None: + raise CommandNotFoundError( + 'None of provided binaries({0}) was not found ' + 'in $PATH.'.format( + ['\'{0}\''.format(exe) for exe in exes] + ) + ) + return function(*args, **kwargs) + return identical_signature_wrapper(function, wrapped) + return wrapper diff --git a/salt/utils/decorators/signature.py b/salt/utils/decorators/signature.py new file mode 100644 index 0000000000..a9e7662618 --- /dev/null +++ b/salt/utils/decorators/signature.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +from __future__ import absolute_import +import inspect +from functools import wraps + +# Import Salt libs +import salt.utils.args + +# Import 3rd-party libs +from salt.ext import six + + +def identical_signature_wrapper(original_function, wrapped_function): + ''' + Return a function with identical signature as ``original_function``'s which + will call the ``wrapped_function``. + ''' + context = {'__wrapped__': wrapped_function} + function_def = compile( + 'def {0}({1}):\n' + ' return __wrapped__({2})'.format( + # Keep the original function name + original_function.__name__, + # The function signature including defaults, i.e., 'timeout=1' + inspect.formatargspec( + *salt.utils.args.get_function_argspec(original_function) + )[1:-1], + # The function signature without the defaults + inspect.formatargspec( + formatvalue=lambda val: '', + *salt.utils.args.get_function_argspec(original_function) + )[1:-1] + ), + '', + 'exec' + ) + six.exec_(function_def, context) + return wraps(original_function)(context[original_function.__name__]) diff --git a/salt/utils/dictupdate.py b/salt/utils/dictupdate.py index 1d744dcc85..6687a411ab 100644 --- a/salt/utils/dictupdate.py +++ b/salt/utils/dictupdate.py @@ -11,7 +11,7 @@ import collections # Import 3rd-party libs import copy import logging -import salt.ext.six as six +from salt.ext import six from salt.serializers.yamlex import merge_recursive as _yamlex_merge_recursive log = logging.getLogger(__name__) diff --git a/salt/utils/dns.py b/salt/utils/dns.py index 2ac7034578..e741b0e20e 100644 --- a/salt/utils/dns.py +++ b/salt/utils/dns.py @@ -11,28 +11,30 @@ dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import -# Python + +# Import Python libs import base64 import binascii import hashlib import itertools +import logging import random -import socket import shlex +import socket import ssl import string -from salt.ext.six.moves import map, zip # pylint: disable=redefined-builtin + +# Import Salt libs +import salt.utils.files +import salt.utils.network +import salt.utils.path +import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict -# Salt -import salt.modules.cmdmod -import salt.utils -import salt.utils.files -import salt.utils.network +# Import 3rd-party libs +from salt.ext.six.moves import map, zip # pylint: disable=redefined-builtin -# Debug & Logging -import logging # Integrations try: @@ -40,10 +42,10 @@ try: HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False -HAS_DIG = salt.utils.which('dig') is not None -HAS_DRILL = salt.utils.which('drill') is not None -HAS_HOST = salt.utils.which('host') is not None -HAS_NSLOOKUP = salt.utils.which('nslookup') is not None +HAS_DIG = salt.utils.path.which('dig') is not None +HAS_DRILL = salt.utils.path.which('drill') is not None +HAS_HOST = salt.utils.path.which('host') is not None +HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all diff --git a/salt/utils/doc.py b/salt/utils/doc.py index 292f46c7fd..f28b2f3c7f 100644 --- a/salt/utils/doc.py +++ b/salt/utils/doc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import import re -import salt.ext.six as six +from salt.ext import six def strip_rst(docs): diff --git a/salt/utils/docker/__init__.py b/salt/utils/docker/__init__.py index e186d639d7..530066f6ed 100644 --- a/salt/utils/docker/__init__.py +++ b/salt/utils/docker/__init__.py @@ -13,12 +13,13 @@ import os # Import Salt libs import salt.utils +import salt.utils.args import salt.utils.docker.translate from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.utils.args import get_function_argspec as _argspec # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import docker @@ -174,7 +175,7 @@ def translate_input(**kwargs): have their translation skipped. Optionally, skip_translate can be set to True to skip *all* translation. ''' - kwargs = salt.utils.clean_kwargs(**kwargs) + kwargs = salt.utils.args.clean_kwargs(**kwargs) invalid = {} collisions = [] diff --git a/salt/utils/docker/translate.py b/salt/utils/docker/translate.py index b6d8717316..9436fa3599 100644 --- a/salt/utils/docker/translate.py +++ b/salt/utils/docker/translate.py @@ -14,7 +14,7 @@ import salt.utils.network from salt.exceptions import SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range, zip # pylint: disable=import-error,redefined-builtin log = logging.getLogger(__name__) @@ -387,7 +387,7 @@ def dns(val, **kwargs): def domainname(val, **kwargs): # pylint: disable=unused-argument - return _translate_stringlist(val) + return _translate_str(val) def entrypoint(val, **kwargs): # pylint: disable=unused-argument diff --git a/salt/utils/etcd_util.py b/salt/utils/etcd_util.py index d90466bbc8..30b6a4b2cc 100644 --- a/salt/utils/etcd_util.py +++ b/salt/utils/etcd_util.py @@ -56,7 +56,7 @@ from __future__ import absolute_import import logging # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError # Import third party libs diff --git a/salt/utils/event.py b/salt/utils/event.py index 0b64ee1833..c7e8b457a3 100644 --- a/salt/utils/event.py +++ b/salt/utils/event.py @@ -59,12 +59,13 @@ import fnmatch import hashlib import logging import datetime +import sys from collections import MutableMapping from multiprocessing.util import Finalize from salt.ext.six.moves import range # Import third party libs -import salt.ext.six as six +from salt.ext import six import tornado.ioloop import tornado.iostream @@ -75,7 +76,9 @@ import salt.utils import salt.utils.async import salt.utils.cache import salt.utils.dicttrim +import salt.utils.platform import salt.utils.process +import salt.utils.stringutils import salt.utils.zeromq import salt.log.setup import salt.defaults.exitcodes @@ -248,7 +251,7 @@ class SaltEvent(object): else: self.opts['sock_dir'] = sock_dir - if salt.utils.is_windows() and 'ipc_mode' not in opts: + if salt.utils.platform.is_windows() and 'ipc_mode' not in opts: self.opts['ipc_mode'] = 'tcp' self.puburi, self.pulluri = self.__load_uri(sock_dir, node) self.pending_tags = [] @@ -299,7 +302,7 @@ class SaltEvent(object): hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. - id_hash = hash_type(salt.utils.to_bytes(self.opts['id'])).hexdigest()[:10] + id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] puburi = os.path.join( sock_dir, 'minion_event_{0}_pub.ipc'.format(id_hash) @@ -432,8 +435,8 @@ class SaltEvent(object): mtag, sep, mdata = raw.partition(TAGEND) # split tag from data data = serial.loads(mdata) else: - mtag, sep, mdata = raw.partition(salt.utils.to_bytes(TAGEND)) # split tag from data - mtag = salt.utils.to_str(mtag) + mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data + mtag = salt.utils.stringutils.to_str(mtag) data = serial.loads(mdata, encoding='utf-8') return mtag, data @@ -729,10 +732,10 @@ class SaltEvent(object): event = '{0}{1}{2}'.format(tag, tagend, serialized_data) else: event = b''.join([ - salt.utils.to_bytes(tag), - salt.utils.to_bytes(tagend), + salt.utils.stringutils.to_bytes(tag), + salt.utils.stringutils.to_bytes(tagend), serialized_data]) - msg = salt.utils.to_bytes(event, 'utf-8') + msg = salt.utils.stringutils.to_bytes(event, 'utf-8') if self._run_io_loop_sync: with salt.utils.async.current_ioloop(self.io_loop): try: @@ -949,7 +952,7 @@ class AsyncEventPublisher(object): hash_type = getattr(hashlib, self.opts['hash_type']) # Only use the first 10 chars to keep longer hashes from exceeding the # max socket path length. - id_hash = hash_type(salt.utils.to_bytes(self.opts['id'])).hexdigest()[:10] + id_hash = hash_type(salt.utils.stringutils.to_bytes(self.opts['id'])).hexdigest()[:10] epub_sock_path = os.path.join( self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash) @@ -1159,6 +1162,16 @@ class EventReturn(salt.utils.process.SignalHandlingMultiprocessingProcess): A dedicated process which listens to the master event bus and queues and forwards events to the specified returner. ''' + def __new__(cls, *args, **kwargs): + if sys.platform.startswith('win'): + # This is required for Windows. On Linux, when a process is + # forked, the module namespace is copied and the current process + # gets all of sys.modules from where the fork happens. This is not + # the case for Windows. + import salt.minion + instance = super(EventReturn, cls).__new__(cls, *args, **kwargs) + return instance + def __init__(self, opts, log_queue=None): ''' Initialize the EventReturn system diff --git a/salt/utils/filebuffer.py b/salt/utils/filebuffer.py index 8d95477078..eefc1a3e03 100644 --- a/salt/utils/filebuffer.py +++ b/salt/utils/filebuffer.py @@ -11,7 +11,7 @@ from __future__ import absolute_import # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files from salt.exceptions import SaltException diff --git a/salt/utils/files.py b/salt/utils/files.py index a5c1976f2d..3e68944046 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -14,15 +14,15 @@ import tempfile import time # Import Salt libs -import salt.utils +import salt.utils # Can be removed when backup_minion is moved +import salt.utils.path +import salt.utils.platform import salt.modules.selinux -import salt.ext.six as six from salt.exceptions import CommandExecutionError, FileLockError, MinionError -from salt.utils.decorators import jinja_filter +from salt.utils.decorators.jinja import jinja_filter # Import 3rd-party libs -from stat import S_IMODE - +from salt.ext import six try: import fcntl HAS_FCNTL = True @@ -78,7 +78,7 @@ def recursive_copy(source, dest): (identical to cp -r on a unix machine) ''' for root, _, files in os.walk(source): - path_from_source = root.replace(source, '').lstrip('/') + path_from_source = root.replace(source, '').lstrip(os.sep) target_directory = os.path.join(dest, path_from_source) if not os.path.exists(target_directory): os.makedirs(target_directory) @@ -117,7 +117,7 @@ def copyfile(source, dest, backup_mode='', cachedir=''): # Get current file stats to they can be replicated after the new file is # moved to the destination path. fstat = None - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): try: fstat = os.stat(dest) except OSError: @@ -127,7 +127,7 @@ def copyfile(source, dest, backup_mode='', cachedir=''): os.chown(dest, fstat.st_uid, fstat.st_gid) os.chmod(dest, fstat.st_mode) # If SELINUX is available run a restorecon on the file - rcon = salt.utils.which('restorecon') + rcon = salt.utils.path.which('restorecon') if rcon: policy = False try: @@ -270,7 +270,7 @@ def set_umask(mask): ''' Temporarily set the umask and restore once the contextmanager exits ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Don't attempt on Windows yield else: @@ -296,7 +296,7 @@ def fopen(*args, **kwargs): ''' binary = None # ensure 'binary' mode is always used on Windows in Python 2 - if ((six.PY2 and salt.utils.is_windows() and 'binary' not in kwargs) or + if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or kwargs.pop('binary', False)): if len(args) > 1: args = list(args) @@ -393,7 +393,7 @@ def fpopen(*args, **kwargs): os.chown(path, uid, gid) if mode is not None: - mode_part = S_IMODE(d_stat.st_mode) + mode_part = stat.S_IMODE(d_stat.st_mode) if mode_part != mode: os.chmod(path, (d_stat.st_mode ^ mode_part) | mode) @@ -426,7 +426,7 @@ def rm_rf(path): Usage : `shutil.rmtree(path, onerror=onerror)` ''' - if salt.utils.is_windows() and not os.access(path, os.W_OK): + if salt.utils.platform.is_windows() and not os.access(path, os.W_OK): # Is the error an access error ? os.chmod(path, stat.S_IWUSR) func(path) @@ -457,6 +457,6 @@ def is_fcntl_available(check_sunos=False): If ``check_sunos`` is passed as ``True`` an additional check to see if host is SunOS is also made. For additional information see: http://goo.gl/159FF8 ''' - if check_sunos and salt.utils.is_sunos(): + if check_sunos and salt.utils.platform.is_sunos(): return False return HAS_FCNTL diff --git a/salt/utils/find.py b/salt/utils/find.py index 67c9820d4d..565ee996c3 100644 --- a/salt/utils/find.py +++ b/salt/utils/find.py @@ -102,10 +102,12 @@ except ImportError: pass # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.utils +import salt.utils.args +import salt.utils.stringutils import salt.defaults.exitcodes from salt.utils.filebuffer import BufferedReader @@ -561,8 +563,8 @@ class ExecOption(Option): def execute(self, fullpath, fstat, test=False): try: command = self.command.replace('{}', fullpath) - print(salt.utils.shlex_split(command)) - p = Popen(salt.utils.shlex_split(command), + print(salt.utils.args.shlex_split(command)) + p = Popen(salt.utils.args.shlex_split(command), stdout=PIPE, stderr=PIPE) (out, err) = p.communicate() @@ -570,8 +572,8 @@ class ExecOption(Option): log.error( 'Error running command: {0}\n\n{1}'.format( command, - salt.utils.to_str(err))) - return "{0}:\n{1}\n".format(command, salt.utils.to_str(out)) + salt.utils.stringutils.to_str(err))) + return "{0}:\n{1}\n".format(command, salt.utils.stringutils.to_str(out)) except Exception as e: log.error( diff --git a/salt/utils/fsutils.py b/salt/utils/fsutils.py index 40d31642d2..049ab6c0a2 100644 --- a/salt/utils/fsutils.py +++ b/salt/utils/fsutils.py @@ -17,7 +17,7 @@ import salt.utils.files from salt.exceptions import CommandExecutionError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index fc200c17f8..fa90b7c879 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -23,7 +23,11 @@ from datetime import datetime import salt.utils import salt.utils.files import salt.utils.itertools +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils import salt.utils.url +import salt.utils.versions import salt.fileserver from salt.config import DEFAULT_MASTER_OPTS as __DEFAULT_MASTER_OPTS from salt.utils.odict import OrderedDict @@ -38,7 +42,7 @@ from salt.utils.event import tagify from salt.utils.versions import LooseVersion as _LooseVersion # Import third party libs -import salt.ext.six as six +from salt.ext import six VALID_PROVIDERS = ('pygit2', 'gitpython') VALID_REF_TYPES = __DEFAULT_MASTER_OPTS['gitfs_ref_types'] @@ -327,7 +331,7 @@ class GitProvider(object): for item in ('env_whitelist', 'env_blacklist'): val = getattr(self, item, None) if val: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Neon', 'The gitfs_{0} config option (and {0} per-remote config ' 'option) have been renamed to gitfs_salt{0} (and ' @@ -370,8 +374,8 @@ class GitProvider(object): else: self.hash = hash_type(self.id).hexdigest() self.cachedir_basename = getattr(self, 'name', self.hash) - self.cachedir = salt.utils.path_join(cache_root, self.cachedir_basename) - self.linkdir = salt.utils.path_join(cache_root, + self.cachedir = salt.utils.path.join(cache_root, self.cachedir_basename) + self.linkdir = salt.utils.path.join(cache_root, 'links', self.cachedir_basename) try: @@ -424,7 +428,7 @@ class GitProvider(object): return ret def _get_lock_file(self, lock_type='update'): - return salt.utils.path_join(self.gitdir, lock_type + '.lk') + return salt.utils.path.join(self.gitdir, lock_type + '.lk') @classmethod def add_conf_overlay(cls, name): @@ -505,7 +509,7 @@ class GitProvider(object): # No need to pass an environment to self.root() here since per-saltenv # configuration is a gitfs-only feature and check_root() is not used # for gitfs. - root_dir = salt.utils.path_join(self.cachedir, self.root()).rstrip(os.sep) + root_dir = salt.utils.path.join(self.cachedir, self.root()).rstrip(os.sep) if os.path.isdir(root_dir): return root_dir log.error( @@ -522,7 +526,7 @@ class GitProvider(object): cmd_str = 'git remote prune origin' cmd = subprocess.Popen( shlex.split(cmd_str), - close_fds=not salt.utils.is_windows(), + close_fds=not salt.utils.platform.is_windows(), cwd=os.path.dirname(self.gitdir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -616,7 +620,7 @@ class GitProvider(object): cmd_str = 'git config --unset-all remote.origin.fetch' cmd = subprocess.Popen( shlex.split(cmd_str), - close_fds=not salt.utils.is_windows(), + close_fds=not salt.utils.platform.is_windows(), cwd=os.path.dirname(self.gitdir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -1033,7 +1037,7 @@ class GitPython(GitProvider): log.error(_INVALID_REPO.format(self.cachedir, self.url, self.role)) return new - self.gitdir = salt.utils.path_join(self.repo.working_dir, '.git') + self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git') if not self.repo.remotes: try: @@ -1046,17 +1050,17 @@ class GitPython(GitProvider): else: new = True - try: - ssl_verify = self.repo.git.config('--get', 'http.sslVerify') - except git.exc.GitCommandError: - ssl_verify = '' - desired_ssl_verify = str(self.ssl_verify).lower() - if ssl_verify != desired_ssl_verify: - self.repo.git.config('http.sslVerify', desired_ssl_verify) + try: + ssl_verify = self.repo.git.config('--get', 'http.sslVerify') + except git.exc.GitCommandError: + ssl_verify = '' + desired_ssl_verify = str(self.ssl_verify).lower() + if ssl_verify != desired_ssl_verify: + self.repo.git.config('http.sslVerify', desired_ssl_verify) - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + # Ensure that refspecs for the "origin" remote are set up as configured + if hasattr(self, 'refspecs'): + self.configure_refspecs() return new @@ -1076,7 +1080,7 @@ class GitPython(GitProvider): relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) else: relpath = lambda path: path - add_mountpoint = lambda path: salt.utils.path_join( + add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for blob in tree.traverse(): if isinstance(blob, git.Tree): @@ -1149,7 +1153,7 @@ class GitPython(GitProvider): relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) else: relpath = lambda path: path - add_mountpoint = lambda path: salt.utils.path_join( + add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for file_blob in tree.traverse(): if not isinstance(file_blob, git.Blob): @@ -1192,7 +1196,7 @@ class GitPython(GitProvider): stream.seek(0) link_tgt = stream.read() stream.close() - path = salt.utils.path_join( + path = salt.utils.path.join( os.path.dirname(path), link_tgt, use_posixpath=True) else: blob = file_blob @@ -1511,7 +1515,7 @@ class Pygit2(GitProvider): log.error(_INVALID_REPO.format(self.cachedir, self.url, self.role)) return new - self.gitdir = salt.utils.path_join(self.repo.workdir, '.git') + self.gitdir = salt.utils.path.join(self.repo.workdir, '.git') if not self.repo.remotes: try: @@ -1524,18 +1528,18 @@ class Pygit2(GitProvider): else: new = True - try: - ssl_verify = self.repo.config.get_bool('http.sslVerify') - except KeyError: - ssl_verify = None - if ssl_verify != self.ssl_verify: - self.repo.config.set_multivar('http.sslVerify', - '', - str(self.ssl_verify).lower()) + try: + ssl_verify = self.repo.config.get_bool('http.sslVerify') + except KeyError: + ssl_verify = None + if ssl_verify != self.ssl_verify: + self.repo.config.set_multivar('http.sslVerify', + '', + str(self.ssl_verify).lower()) - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + # Ensure that refspecs for the "origin" remote are set up as configured + if hasattr(self, 'refspecs'): + self.configure_refspecs() return new @@ -1556,11 +1560,11 @@ class Pygit2(GitProvider): if not isinstance(blob, pygit2.Tree): continue blobs.append( - salt.utils.path_join(prefix, entry.name, use_posixpath=True) + salt.utils.path.join(prefix, entry.name, use_posixpath=True) ) if len(blob): _traverse( - blob, blobs, salt.utils.path_join( + blob, blobs, salt.utils.path.join( prefix, entry.name, use_posixpath=True) ) @@ -1582,7 +1586,7 @@ class Pygit2(GitProvider): blobs = [] if len(tree): _traverse(tree, blobs, self.root(tgt_env)) - add_mountpoint = lambda path: salt.utils.path_join( + add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for blob in blobs: ret.add(add_mountpoint(relpath(blob))) @@ -1671,7 +1675,7 @@ class Pygit2(GitProvider): continue obj = self.repo[entry.oid] if isinstance(obj, pygit2.Blob): - repo_path = salt.utils.path_join( + repo_path = salt.utils.path.join( prefix, entry.name, use_posixpath=True) blobs.setdefault('files', []).append(repo_path) if stat.S_ISLNK(tree[entry.name].filemode): @@ -1679,7 +1683,7 @@ class Pygit2(GitProvider): blobs.setdefault('symlinks', {})[repo_path] = link_tgt elif isinstance(obj, pygit2.Tree): _traverse( - obj, blobs, salt.utils.path_join( + obj, blobs, salt.utils.path.join( prefix, entry.name, use_posixpath=True) ) @@ -1705,7 +1709,7 @@ class Pygit2(GitProvider): blobs = {} if len(tree): _traverse(tree, blobs, self.root(tgt_env)) - add_mountpoint = lambda path: salt.utils.path_join( + add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for repo_path in blobs.get('files', []): files.add(add_mountpoint(relpath(repo_path))) @@ -1738,7 +1742,7 @@ class Pygit2(GitProvider): # the symlink and set path to the location indicated # in the blob data. link_tgt = self.repo[entry.oid].data - path = salt.utils.path_join( + path = salt.utils.path.join( os.path.dirname(path), link_tgt, use_posixpath=True) else: blob = self.repo[entry.oid] @@ -1965,12 +1969,12 @@ class GitBase(object): if cache_root is not None: self.cache_root = self.remote_root = cache_root else: - self.cache_root = salt.utils.path_join(self.opts['cachedir'], + self.cache_root = salt.utils.path.join(self.opts['cachedir'], self.role) - self.remote_root = salt.utils.path_join(self.cache_root, 'remotes') - self.env_cache = salt.utils.path_join(self.cache_root, 'envs.p') - self.hash_cachedir = salt.utils.path_join(self.cache_root, 'hash') - self.file_list_cachedir = salt.utils.path_join( + self.remote_root = salt.utils.path.join(self.cache_root, 'remotes') + self.env_cache = salt.utils.path.join(self.cache_root, 'envs.p') + self.hash_cachedir = salt.utils.path.join(self.cache_root, 'hash') + self.file_list_cachedir = salt.utils.path.join( self.opts['cachedir'], 'file_lists', self.role) def init_remotes(self, remotes, per_remote_overrides, @@ -2115,7 +2119,7 @@ class GitBase(object): for item in cachedir_ls: if item in ('hash', 'refs'): continue - path = salt.utils.path_join(self.cache_root, item) + path = salt.utils.path.join(self.cache_root, item) if os.path.isdir(path): to_remove.append(path) failed = [] @@ -2350,7 +2354,7 @@ class GitBase(object): git.__version__ ) ) - if not salt.utils.which('git'): + if not salt.utils.path.which('git'): errors.append( 'The git command line utility is required when using the ' '\'gitpython\' {0}_provider.'.format(self.role) @@ -2414,7 +2418,7 @@ class GitBase(object): pygit2.LIBGIT2_VERSION ) ) - if not salt.utils.which('git'): + if not salt.utils.path.which('git'): errors.append( 'The git command line utility is required when using the ' '\'pygit2\' {0}_provider.'.format(self.role) @@ -2435,7 +2439,7 @@ class GitBase(object): ''' Write the remote_map.txt ''' - remote_map = salt.utils.path_join(self.cache_root, 'remote_map.txt') + remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = \ @@ -2540,17 +2544,17 @@ class GitFS(GitBase): fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or \ - (not salt.utils.is_hex(tgt_env) and tgt_env not in self.envs()): + (not salt.utils.stringutils.is_hex(tgt_env) and tgt_env not in self.envs()): return fnd - dest = salt.utils.path_join(self.cache_root, 'refs', tgt_env, path) - hashes_glob = salt.utils.path_join(self.hash_cachedir, + dest = salt.utils.path.join(self.cache_root, 'refs', tgt_env, path) + hashes_glob = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.hash.*'.format(path)) - blobshadest = salt.utils.path_join(self.hash_cachedir, + blobshadest = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.hash.blob_sha1'.format(path)) - lk_fn = salt.utils.path_join(self.hash_cachedir, + lk_fn = salt.utils.path.join(self.hash_cachedir, tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) @@ -2576,7 +2580,7 @@ class GitFS(GitBase): continue repo_path = path[len(repo.mountpoint(tgt_env)):].lstrip(os.sep) if repo.root(tgt_env): - repo_path = salt.utils.path_join(repo.root(tgt_env), repo_path) + repo_path = salt.utils.path.join(repo.root(tgt_env), repo_path) blob, blob_hexsha, blob_mode = repo.find_file(repo_path, tgt_env) if blob is None: @@ -2632,7 +2636,7 @@ class GitFS(GitBase): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2672,7 +2676,7 @@ class GitFS(GitBase): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2685,7 +2689,7 @@ class GitFS(GitBase): ret = {'hash_type': self.opts['hash_type']} relpath = fnd['rel'] path = fnd['path'] - hashdest = salt.utils.path_join(self.hash_cachedir, + hashdest = salt.utils.path.join(self.hash_cachedir, load['saltenv'], '{0}.hash.{1}'.format(relpath, self.opts['hash_type'])) @@ -2706,7 +2710,7 @@ class GitFS(GitBase): Return a dict containing the file lists for files and dirs ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2724,11 +2728,11 @@ class GitFS(GitBase): ) ) return [] - list_cache = salt.utils.path_join( + list_cache = salt.utils.path.join( self.file_list_cachedir, '{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-')) ) - w_lock = salt.utils.path_join( + w_lock = salt.utils.path.join( self.file_list_cachedir, '.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-')) ) @@ -2740,7 +2744,7 @@ class GitFS(GitBase): return cache_match if refresh_cache: ret = {'files': set(), 'symlinks': {}, 'dirs': set()} - if salt.utils.is_hex(load['saltenv']) \ + if salt.utils.stringutils.is_hex(load['saltenv']) \ or load['saltenv'] in self.envs(): for repo in self.remotes: repo_files, repo_symlinks = repo.file_list(load['saltenv']) @@ -2780,7 +2784,7 @@ class GitFS(GitBase): Return a dict of all symlinks based on a given path in the repo ''' if 'env' in load: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -2788,7 +2792,7 @@ class GitFS(GitBase): ) load.pop('env') - if not salt.utils.is_hex(load['saltenv']) \ + if not salt.utils.stringutils.is_hex(load['saltenv']) \ and load['saltenv'] not in self.envs(): return {} if 'prefix' in load: @@ -2835,7 +2839,7 @@ class GitPillar(GitBase): ''' Ensure that the mountpoint is linked to the passed cachedir ''' - lcachelink = salt.utils.path_join(repo.linkdir, repo._mountpoint) + lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint) if not os.path.islink(lcachelink): ldirname = os.path.dirname(lcachelink) try: diff --git a/salt/utils/github.py b/salt/utils/github.py index a7de81d30f..6db18c72e2 100644 --- a/salt/utils/github.py +++ b/salt/utils/github.py @@ -10,7 +10,7 @@ import salt.utils.http import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/salt/utils/gzip_util.py b/salt/utils/gzip_util.py index 202fca88ca..56be313552 100644 --- a/salt/utils/gzip_util.py +++ b/salt/utils/gzip_util.py @@ -14,7 +14,7 @@ import gzip import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six import BytesIO diff --git a/salt/utils/hashutils.py b/salt/utils/hashutils.py index 9e99af1100..341058e80f 100644 --- a/salt/utils/hashutils.py +++ b/salt/utils/hashutils.py @@ -8,12 +8,13 @@ from __future__ import absolute_import import base64 import hashlib import hmac +import random # Import Salt libs -import salt.ext.six as six -import salt.utils +from salt.ext import six +import salt.utils.stringutils -from salt.utils.decorators import jinja_filter +from salt.utils.decorators.jinja import jinja_filter @jinja_filter('base64_encode') @@ -25,9 +26,9 @@ def base64_b64encode(instr): newline ('\\n') characters in the encoded output. ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) b64 = base64.b64encode(b) - return salt.utils.to_str(b64) + return salt.utils.stringutils.to_str(b64) return base64.b64encode(instr) @@ -37,10 +38,10 @@ def base64_b64decode(instr): Decode a base64-encoded string using the "modern" Python interface. ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) data = base64.b64decode(b) try: - return salt.utils.to_str(data) + return salt.utils.stringutils.to_str(data) except UnicodeDecodeError: return data return base64.b64decode(instr) @@ -55,9 +56,9 @@ def base64_encodestring(instr): at the end of the encoded string. ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) b64 = base64.encodebytes(b) - return salt.utils.to_str(b64) + return salt.utils.stringutils.to_str(b64) return base64.encodestring(instr) @@ -67,10 +68,10 @@ def base64_decodestring(instr): ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) data = base64.decodebytes(b) try: - return salt.utils.to_str(data) + return salt.utils.stringutils.to_str(data) except UnicodeDecodeError: return data return base64.decodestring(instr) @@ -82,7 +83,7 @@ def md5_digest(instr): Generate an md5 hash of a given string. ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) return hashlib.md5(b).hexdigest() return hashlib.md5(instr).hexdigest() @@ -93,7 +94,7 @@ def sha256_digest(instr): Generate an sha256 hash of a given string. ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) return hashlib.sha256(b).hexdigest() return hashlib.sha256(instr).hexdigest() @@ -104,7 +105,7 @@ def sha512_digest(instr): Generate an sha512 hash of a given string ''' if six.PY3: - b = salt.utils.to_bytes(instr) + b = salt.utils.stringutils.to_bytes(instr) return hashlib.sha512(b).hexdigest() return hashlib.sha512(instr).hexdigest() @@ -116,9 +117,9 @@ def hmac_signature(string, shared_secret, challenge_hmac): Returns a boolean if the verification succeeded or failed. ''' if six.PY3: - msg = salt.utils.to_bytes(string) - key = salt.utils.to_bytes(shared_secret) - challenge = salt.utils.to_bytes(challenge_hmac) + msg = salt.utils.stringutils.to_bytes(string) + key = salt.utils.stringutils.to_bytes(shared_secret) + challenge = salt.utils.stringutils.to_bytes(challenge_hmac) else: msg = string key = shared_secret @@ -126,3 +127,15 @@ def hmac_signature(string, shared_secret, challenge_hmac): hmac_hash = hmac.new(key, msg, hashlib.sha256) valid_hmac = base64.b64encode(hmac_hash.digest()) return valid_hmac == challenge + + +@jinja_filter('rand_str') # Remove this for Neon +@jinja_filter('random_hash') +def random_hash(size=9999999999, hash_type=None): + ''' + Return a hash of a randomized data from random.SystemRandom() + ''' + if not hash_type: + hash_type = 'md5' + hasher = getattr(hashlib, hash_type) + return hasher(salt.utils.stringutils.to_bytes(str(random.SystemRandom().randint(0, size)))).hexdigest() diff --git a/salt/utils/http.py b/salt/utils/http.py index 59cb8c96ff..f0b6f0c690 100644 --- a/salt/utils/http.py +++ b/salt/utils/http.py @@ -36,20 +36,22 @@ except ImportError: HAS_MATCHHOSTNAME = False # Import salt libs -import salt.utils -import salt.utils.xmlutil as xml +import salt.config +import salt.loader +import salt.syspaths +import salt.utils # Can be removed once refresh_dns is moved import salt.utils.args import salt.utils.files -import salt.loader -import salt.config +import salt.utils.platform +import salt.utils.stringutils import salt.version +import salt.utils.xmlutil as xml from salt._compat import ElementTree as ET from salt.template import compile_template -from salt.utils.decorators import jinja_filter -from salt import syspaths +from salt.utils.decorators.jinja import jinja_filter # Import 3rd party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error,no-name-in-module import salt.ext.six.moves.http_client import salt.ext.six.moves.http_cookiejar @@ -147,11 +149,11 @@ def query(url, if opts is None: if node == 'master': opts = salt.config.master_config( - os.path.join(syspaths.CONFIG_DIR, 'master') + os.path.join(salt.syspaths.CONFIG_DIR, 'master') ) elif node == 'minion': opts = salt.config.minion_config( - os.path.join(syspaths.CONFIG_DIR, 'minion') + os.path.join(salt.syspaths.CONFIG_DIR, 'minion') ) else: opts = {} @@ -224,9 +226,9 @@ def query(url, header_list = [] if cookie_jar is None: - cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.txt') + cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.txt') if session_cookie_jar is None: - session_cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.session.p') + session_cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.session.p') if persist_session is True and HAS_MSGPACK: # TODO: This is hackish; it will overwrite the session cookie jar with @@ -462,8 +464,8 @@ def query(url, # We want to use curl_http if we have a proxy defined if proxy_host and proxy_port: if HAS_CURL_HTTPCLIENT is False: - ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl, but the ' - 'pycurl library does not seem to be installed') + ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl and tornado, ' + 'but the libraries does not seem to be installed') log.error(ret['error']) return ret @@ -602,7 +604,7 @@ def query(url, return ret if decode_type == 'json': - ret['dict'] = json.loads(salt.utils.to_str(result_text)) + ret['dict'] = json.loads(salt.utils.stringutils.to_str(result_text)) elif decode_type == 'xml': ret['dict'] = [] items = ET.fromstring(result_text) @@ -639,7 +641,7 @@ def get_ca_bundle(opts=None): if opts_bundle is not None and os.path.exists(opts_bundle): return opts_bundle - file_roots = opts.get('file_roots', {'base': [syspaths.SRV_ROOT_DIR]}) + file_roots = opts.get('file_roots', {'base': [salt.syspaths.SRV_ROOT_DIR]}) # Please do not change the order without good reason @@ -667,7 +669,7 @@ def get_ca_bundle(opts=None): if os.path.exists(path): return path - if salt.utils.is_windows() and HAS_CERTIFI: + if salt.utils.platform.is_windows() and HAS_CERTIFI: return certifi.where() return None @@ -775,7 +777,8 @@ def _render(template, render, renderer, template_dict, opts): blacklist = opts.get('renderer_blacklist') whitelist = opts.get('renderer_whitelist') ret = compile_template(template, rend, renderer, blacklist, whitelist, **template_dict) - ret = ret.read() + if salt.utils.stringio.is_readable(ret): + ret = ret.read() if str(ret).startswith('#!') and not str(ret).startswith('#!/'): ret = str(ret).split('\n', 1)[1] return ret diff --git a/salt/utils/iam.py b/salt/utils/iam.py index a148fb4f04..2e7986c69b 100644 --- a/salt/utils/iam.py +++ b/salt/utils/iam.py @@ -11,7 +11,7 @@ import logging import time import pprint from salt.ext.six.moves import range -import salt.ext.six as six +from salt.ext import six try: import requests diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py index fc96c7889d..38896e2b81 100644 --- a/salt/utils/jinja.py +++ b/salt/utils/jinja.py @@ -8,18 +8,18 @@ from __future__ import absolute_import import collections import json import logging +import os.path import pipes import pprint import re import uuid from functools import wraps -from os import path from xml.dom import minidom from xml.etree.ElementTree import Element, SubElement, tostring # Import third party libs import jinja2 -import salt.ext.six as six +from salt.ext import six import yaml from jinja2 import BaseLoader, Markup, TemplateNotFound, nodes from jinja2.environment import TemplateModule @@ -27,11 +27,10 @@ from jinja2.exceptions import TemplateRuntimeError from jinja2.ext import Extension # Import salt libs -import salt import salt.fileclient import salt.utils.files import salt.utils.url -from salt.utils.decorators import jinja_filter, jinja_test, jinja_global +from salt.utils.decorators.jinja import jinja_filter, jinja_test, jinja_global from salt.utils.odict import OrderedDict from salt.exceptions import TemplateError @@ -76,7 +75,7 @@ class SaltCacheLoader(BaseLoader): else: self.searchpath = opts['file_roots'][saltenv] else: - self.searchpath = [path.join(opts['cachedir'], 'files', saltenv)] + self.searchpath = [os.path.join(opts['cachedir'], 'files', saltenv)] log.debug('Jinja search path: %s', self.searchpath) self._file_client = None self.cached = [] @@ -118,7 +117,7 @@ class SaltCacheLoader(BaseLoader): self.check_cache(template) if environment and template: - tpldir = path.dirname(template).replace('\\', '/') + tpldir = os.path.dirname(template).replace('\\', '/') tpldata = { 'tplfile': template, 'tpldir': '.' if tpldir == '' else tpldir, @@ -128,15 +127,15 @@ class SaltCacheLoader(BaseLoader): # pylint: disable=cell-var-from-loop for spath in self.searchpath: - filepath = path.join(spath, template) + filepath = os.path.join(spath, template) try: with salt.utils.files.fopen(filepath, 'rb') as ifile: contents = ifile.read().decode(self.encoding) - mtime = path.getmtime(filepath) + mtime = os.path.getmtime(filepath) def uptodate(): try: - return path.getmtime(filepath) == mtime + return os.path.getmtime(filepath) == mtime except OSError: return False return contents, filepath, uptodate diff --git a/salt/utils/locales.py b/salt/utils/locales.py index b245e7aae8..c114a985ae 100644 --- a/salt/utils/locales.py +++ b/salt/utils/locales.py @@ -3,14 +3,17 @@ the locale utils used by salt ''' +# Import Python libs from __future__ import absolute_import - import sys -import salt.utils -import salt.ext.six as six +# Import Salt libs +import salt.utils.stringutils from salt.utils.decorators import memoize as real_memoize +# Import 3rd-party libs +from salt.ext import six + @real_memoize def get_encodings(): @@ -42,7 +45,7 @@ def sdecode(string_): encodings = get_encodings() for encoding in encodings: try: - decoded = salt.utils.to_unicode(string_, encoding) + decoded = salt.utils.stringutils.to_unicode(string_, encoding) if isinstance(decoded, six.string_types): # Make sure unicode string ops work u' ' + decoded # pylint: disable=W0104 diff --git a/salt/utils/mac_utils.py b/salt/utils/mac_utils.py index 399afb6ba6..baf6ca16c0 100644 --- a/salt/utils/mac_utils.py +++ b/salt/utils/mac_utils.py @@ -13,6 +13,9 @@ import time # Import Salt Libs import salt.utils +import salt.utils.args +import salt.utils.platform +import salt.utils.stringutils import salt.utils.timed_subprocess import salt.grains.extra from salt.ext import six @@ -34,7 +37,7 @@ def __virtual__(): ''' Load only on Mac OS ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): return (False, 'The mac_utils utility could not be loaded: ' 'utility only works on MacOS systems.') @@ -51,7 +54,7 @@ def _run_all(cmd): ''' if not isinstance(cmd, list): - cmd = salt.utils.shlex_split(cmd, posix=False) + cmd = salt.utils.args.shlex_split(cmd, posix=False) for idx, item in enumerate(cmd): if not isinstance(cmd[idx], six.string_types): @@ -95,9 +98,9 @@ def _run_all(cmd): out, err = proc.stdout, proc.stderr if out is not None: - out = salt.utils.to_str(out).rstrip() + out = salt.utils.stringutils.to_str(out).rstrip() if err is not None: - err = salt.utils.to_str(err).rstrip() + err = salt.utils.stringutils.to_str(err).rstrip() ret['pid'] = proc.process.pid ret['retcode'] = proc.process.returncode @@ -190,7 +193,7 @@ def validate_enabled(enabled): :return: "on" or "off" or errors :rtype: str ''' - if isinstance(enabled, str): + if isinstance(enabled, six.string_types): if enabled.lower() not in ['on', 'off', 'yes', 'no']: msg = '\nMac Power: Invalid String Value for Enabled.\n' \ 'String values must be \'on\' or \'off\'/\'yes\' or \'no\'.\n' \ diff --git a/salt/utils/master.py b/salt/utils/master.py index c48137ce4c..51c635bb52 100644 --- a/salt/utils/master.py +++ b/salt/utils/master.py @@ -22,6 +22,8 @@ import salt.pillar import salt.utils import salt.utils.atomicfile import salt.utils.minions +import salt.utils.verify +import salt.utils.versions import salt.payload from salt.exceptions import SaltException import salt.config @@ -29,7 +31,7 @@ from salt.utils.cache import CacheCli as cache_cli from salt.utils.process import MultiprocessingProcess # Import third party libs -import salt.ext.six as six +from salt.ext import six try: import zmq HAS_ZMQ = True @@ -78,7 +80,7 @@ class MasterPillarUtil(object): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -610,7 +612,7 @@ class ConnectedCache(MultiprocessingProcess): log.debug('ConCache Received request: {0}'.format(msg)) # requests to the minion list are send as str's - if isinstance(msg, str): + if isinstance(msg, six.string_types): if msg == 'minions': # Send reply back to client reply = serial.dumps(self.minions) @@ -642,7 +644,7 @@ class ConnectedCache(MultiprocessingProcess): data = new_c_data[0] - if isinstance(data, str): + if isinstance(data, six.string_types): if data not in self.minions: log.debug('ConCache Adding minion {0} to cache'.format(new_c_data[0])) self.minions.append(data) diff --git a/salt/utils/minion.py b/salt/utils/minion.py index 1f9591fa0b..67ae2f09b1 100644 --- a/salt/utils/minion.py +++ b/salt/utils/minion.py @@ -10,13 +10,13 @@ import logging import threading # Import Salt Libs -import salt.utils -import salt.utils.files -import salt.utils.process import salt.payload +import salt.utils.files +import salt.utils.platform +import salt.utils.process # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -131,7 +131,7 @@ def _check_cmdline(data): For non-Linux systems we punt and just return True ''' - if not salt.utils.is_linux(): + if not salt.utils.platform.is_linux(): return True pid = data.get('pid') if not pid: diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 588e473f12..eeeb5ad369 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -16,11 +16,12 @@ import salt.payload import salt.utils import salt.utils.files import salt.utils.network +import salt.utils.versions from salt.defaults import DEFAULT_TARGET_DELIM from salt.exceptions import CommandExecutionError, SaltCacheError import salt.auth.ldap import salt.cache -import salt.ext.six as six +from salt.ext import six # Import 3rd-party libs if six.PY3: @@ -681,7 +682,7 @@ class CkMinions(object): # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'the target type should be passed using the \'tgt_type\' ' 'argument instead of \'expr_form\'. Support for using ' @@ -706,7 +707,7 @@ class CkMinions(object): functions ''' vals = [] - if isinstance(fun, str): + if isinstance(fun, six.string_types): fun = [fun] for func in fun: try: diff --git a/salt/utils/msazure.py b/salt/utils/msazure.py index 7488f3077c..ec72617331 100644 --- a/salt/utils/msazure.py +++ b/salt/utils/msazure.py @@ -19,7 +19,7 @@ except ImportError: pass # Import salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import SaltSystemExit log = logging.getLogger(__name__) diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index 36914be042..10a3bf75ee 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -14,18 +14,18 @@ Utils for the NAPALM modules and proxy. .. versionadded:: 2017.7.0 ''' - +# Import Python libs from __future__ import absolute_import - import traceback import logging import importlib from functools import wraps -log = logging.getLogger(__file__) -import salt.utils +# Import Salt libs +import salt.utils.platform -# Import third party lib +# Import 3rd-party libs +from salt.ext import six try: # will try to import NAPALM # https://github.com/napalm-automation/napalm @@ -45,14 +45,14 @@ try: except ImportError: HAS_CONN_CLOSED_EXC_CLASS = False -from salt.ext import six as six +log = logging.getLogger(__file__) def is_proxy(opts): ''' Is this a NAPALM proxy? ''' - return salt.utils.is_proxy() and opts.get('proxy', {}).get('proxytype') == 'napalm' + return salt.utils.platform.is_proxy() and opts.get('proxy', {}).get('proxytype') == 'napalm' def is_always_alive(opts): @@ -73,7 +73,7 @@ def is_minion(opts): ''' Is this a NAPALM straight minion? ''' - return not salt.utils.is_proxy() and 'napalm' in opts + return not salt.utils.platform.is_proxy() and 'napalm' in opts def virtual(opts, virtualname, filename): @@ -243,6 +243,11 @@ def get_device_opts(opts, salt_obj=None): network_device = {} # by default, look in the proxy config details device_dict = opts.get('proxy', {}) or opts.get('napalm', {}) + if opts.get('proxy') or opts.get('napalm'): + opts['multiprocessing'] = device_dict.get('multiprocessing', False) + # Most NAPALM drivers are SSH-based, so multiprocessing should default to False. + # But the user can be allows to have a different value for the multiprocessing, which will + # override the opts. if salt_obj and not device_dict: # get the connection details from the opts device_dict = salt_obj['config.merge']('napalm') @@ -349,11 +354,11 @@ def proxy_napalm_wrap(func): # the execution modules will make use of this variable from now on # previously they were accessing the device properties through the __proxy__ object always_alive = opts.get('proxy', {}).get('always_alive', True) - if salt.utils.is_proxy() and always_alive: + if salt.utils.platform.is_proxy() and always_alive: # if it is running in a proxy and it's using the default always alive behaviour, # will get the cached copy of the network device wrapped_global_namespace['napalm_device'] = proxy['napalm.get_device']() - elif salt.utils.is_proxy() and not always_alive: + elif salt.utils.platform.is_proxy() and not always_alive: # if still proxy, but the user does not want the SSH session always alive # get a new device instance # which establishes a new connection diff --git a/salt/utils/network.py b/salt/utils/network.py index 82fd140959..77289d21b6 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -16,7 +16,7 @@ import subprocess from string import ascii_letters, digits # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: @@ -26,13 +26,16 @@ except ImportError: pass # Import salt libs -import salt.utils +import salt.utils.args import salt.utils.files +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils from salt._compat import ipaddress -from salt.utils.decorators import jinja_filter +from salt.utils.decorators.jinja import jinja_filter # inet_pton does not exist in Windows, this is a workaround -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) @@ -144,7 +147,7 @@ def _generate_minion_id(): hosts.extend(hst) # include public and private ipaddresses - return hosts.extend([addr for addr in salt.utils.network.ip_addrs() + return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) @@ -207,6 +210,21 @@ def ip_to_host(ip): # pylint: enable=C0103 +def is_reachable_host(entity_name): + ''' + Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). + :param hostname: + :return: + ''' + try: + assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list + ret = True + except socket.gaierror: + ret = False + + return ret + + def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. @@ -681,7 +699,7 @@ def _interfaces_ifconfig(out): piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') @@ -708,7 +726,7 @@ def _interfaces_ifconfig(out): iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) @@ -740,11 +758,11 @@ def _interfaces_ifconfig(out): mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) - if not salt.utils.is_sunos(): + if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf - if not salt.utils.is_sunos() \ + if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) @@ -773,8 +791,8 @@ def linux_interfaces(): Obtain interface information for *NIX/BSD variants ''' ifaces = dict() - ip_path = salt.utils.which('ip') - ifconfig_path = None if ip_path else salt.utils.which('ifconfig') + ip_path = salt.utils.path.which('ip') + ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), @@ -789,15 +807,15 @@ def linux_interfaces(): stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( - salt.utils.to_str(cmd1), - salt.utils.to_str(cmd2))) + salt.utils.stringutils.to_str(cmd1), + salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] - ifaces = _interfaces_ifconfig(salt.utils.to_str(cmd)) + ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces @@ -900,7 +918,7 @@ def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return win_interfaces() else: return linux_interfaces() @@ -991,7 +1009,7 @@ def hw_addr(iface): Added support for AIX ''' - if salt.utils.is_aix(): + if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) @@ -1257,17 +1275,17 @@ def _remotes_on(port, which_end): ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools - if salt.utils.is_sunos(): + if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) - if salt.utils.is_freebsd(): + if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) - if salt.utils.is_netbsd(): + if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) - if salt.utils.is_openbsd(): + if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) - if salt.utils.is_aix(): + if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) @@ -1314,7 +1332,7 @@ def _sunos_remotes_on(port, which_end): log.error('Failed netstat') raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue @@ -1354,13 +1372,13 @@ def _freebsd_remotes_on(port, which_end): remotes = set() try: - cmd = salt.utils.shlex_split('sockstat -4 -c -p {0}'.format(port)) + cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode)) raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() @@ -1414,13 +1432,13 @@ def _netbsd_remotes_on(port, which_end): remotes = set() try: - cmd = salt.utils.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) + cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = {0}'.format(ex.returncode)) raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() @@ -1509,7 +1527,7 @@ def _windows_remotes_on(port, which_end): log.error('Failed netstat') raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue @@ -1556,7 +1574,7 @@ def _linux_remotes_on(port, which_end): log.error('Failed "lsof" with returncode = {0}'.format(ex.returncode)) raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: @@ -1615,7 +1633,7 @@ def _aix_remotes_on(port, which_end): log.error('Failed netstat') raise - lines = salt.utils.to_str(data).split('\n') + lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue diff --git a/salt/utils/odict.py b/salt/utils/odict.py index 7838256232..86b8d2b4c1 100644 --- a/salt/utils/odict.py +++ b/salt/utils/odict.py @@ -25,7 +25,7 @@ from __future__ import absolute_import from collections import Callable # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: # pylint: disable=E0611,minimum-python-version diff --git a/salt/utils/openstack/neutron.py b/salt/utils/openstack/neutron.py index 671d9216af..54fd2ec57a 100644 --- a/salt/utils/openstack/neutron.py +++ b/salt/utils/openstack/neutron.py @@ -9,7 +9,7 @@ from __future__ import absolute_import, with_statement import logging # Import third party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error HAS_NEUTRON = False try: diff --git a/salt/utils/openstack/nova.py b/salt/utils/openstack/nova.py index 261190d89e..e1ea7b405d 100644 --- a/salt/utils/openstack/nova.py +++ b/salt/utils/openstack/nova.py @@ -10,7 +10,7 @@ import logging import time # Import third party libs -import salt.ext.six as six +from salt.ext import six HAS_NOVA = False # pylint: disable=import-error try: diff --git a/salt/utils/oset.py b/salt/utils/oset.py new file mode 100644 index 0000000000..677181f307 --- /dev/null +++ b/salt/utils/oset.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +""" + +Available at repository https://github.com/LuminosoInsight/ordered-set + + salt.utils.oset + ~~~~~~~~~~~~~~~~ + +An OrderedSet is a custom MutableSet that remembers its order, so that every +entry has an index that can be looked up. + +Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger, +and released under the MIT license. + +Rob Speer's changes are as follows: + + - changed the content from a doubly-linked list to a regular Python list. + Seriously, who wants O(1) deletes but O(N) lookups by index? + - add() returns the index of the added item + - index() just returns the index of an item + - added a __getstate__ and __setstate__ so it can be pickled + - added __getitem__ +""" +from __future__ import absolute_import +import collections + +SLICE_ALL = slice(None) +__version__ = '2.0.1' + + +def is_iterable(obj): + """ + Are we being asked to look up a list of things, instead of a single thing? + We check for the `__iter__` attribute so that this can cover types that + don't have to be known by this module, such as NumPy arrays. + + Strings, however, should be considered as atomic values to look up, not + iterables. The same goes for tuples, since they are immutable and therefore + valid entries. + + We don't need to check for the Python 2 `unicode` type, because it doesn't + have an `__iter__` attribute anyway. + """ + return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple) + + +class OrderedSet(collections.MutableSet): + """ + An OrderedSet is a custom MutableSet that remembers its order, so that + every entry has an index that can be looked up. + """ + def __init__(self, iterable=None): + self.items = [] + self.map = {} + if iterable is not None: + self |= iterable + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + """ + Get the item at a given index. + + If `index` is a slice, you will get back that slice of items. If it's + the slice [:], exactly the same object is returned. (If you want an + independent copy of an OrderedSet, use `OrderedSet.copy()`.) + + If `index` is an iterable, you'll get the OrderedSet of items + corresponding to those indices. This is similar to NumPy's + "fancy indexing". + """ + if index == SLICE_ALL: + return self + elif hasattr(index, '__index__') or isinstance(index, slice): + result = self.items[index] + if isinstance(result, list): + return OrderedSet(result) + else: + return result + elif is_iterable(index): + return OrderedSet([self.items[i] for i in index]) + else: + raise TypeError("Don't know how to index an OrderedSet by {}".format(repr(index))) + + def copy(self): + return OrderedSet(self) + + def __getstate__(self): + if len(self) == 0: + # The state can't be an empty list. + # We need to return a truthy value, or else __setstate__ won't be run. + # + # This could have been done more gracefully by always putting the state + # in a tuple, but this way is backwards- and forwards- compatible with + # previous versions of OrderedSet. + return (None,) + else: + return list(self) + + def __setstate__(self, state): + if state == (None,): + self.__init__([]) + else: + self.__init__(state) + + def __contains__(self, key): + return key in self.map + + def add(self, key): + """ + Add `key` as an item to this OrderedSet, then return its index. + + If `key` is already in the OrderedSet, return the index it already + had. + """ + if key not in self.map: + self.map[key] = len(self.items) + self.items.append(key) + return self.map[key] + append = add + + def update(self, sequence): + """ + Update the set with the given iterable sequence, then return the index + of the last element inserted. + """ + item_index = None + try: + for item in sequence: + item_index = self.add(item) + except TypeError: + raise ValueError("Argument needs to be an iterable, got {}".format(type(sequence))) + return item_index + + def index(self, key): + """ + Get the index of a given entry, raising an IndexError if it's not + present. + + `key` can be an iterable of entries that is not a string, in which case + this returns a list of indices. + """ + if is_iterable(key): + return [self.index(subkey) for subkey in key] + return self.map[key] + + def pop(self): + """ + Remove and return the last element from the set. + + Raises KeyError if the set is empty. + """ + if not self.items: + raise KeyError('Set is empty') + + elem = self.items[-1] + del self.items[-1] + del self.map[elem] + return elem + + def discard(self, key): + """ + Remove an element. Do not raise an exception if absent. + + The MutableSet mixin uses this to implement the .remove() method, which + *does* raise an error when asked to remove a non-existent item. + """ + if key in self: + i = self.map[key] + del self.items[i] + del self.map[key] + for k, v in self.map.items(): + if v >= i: + self.map[k] = v - 1 + + def clear(self): + """ + Remove all items from this OrderedSet. + """ + del self.items[:] + self.map.clear() + + def __iter__(self): + return iter(self.items) + + def __reversed__(self): + return reversed(self.items) + + def __repr__(self): + if not self: + return "{}()".format(self.__class__.__name__) + return "{}({})".format(self.__class__.__name__, repr(list(self))) + + def __eq__(self, other): + if isinstance(other, OrderedSet): + return len(self) == len(other) and self.items == other.items + try: + other_as_set = set(other) + except TypeError: + # If `other` can't be converted into a set, it's not equal. + return False + else: + return set(self) == other_as_set diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 8c4ca33cc7..c152109e3e 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -33,14 +33,15 @@ import salt.version as version import salt.utils import salt.utils.args import salt.utils.files -import salt.utils.xdg import salt.utils.jid -from salt.utils import kinds +import salt.utils.kinds as kinds +import salt.utils.platform +import salt.utils.xdg from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.validate.path import is_writeable from salt.utils.verify import verify_files import salt.exceptions -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from salt.utils.yamldumper import SafeOrderedDumper @@ -807,7 +808,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): # Log rotate options log_rotate_max_bytes = self.config.get('log_rotate_max_bytes', 0) log_rotate_backup_count = self.config.get('log_rotate_backup_count', 0) - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): # Not supported on platforms other than Windows. # Other platforms may use an external tool such as 'logrotate' if log_rotate_max_bytes != 0: @@ -826,7 +827,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): self.config['log_rotate_backup_count'] = log_rotate_backup_count def setup_logfile_logger(self): - if salt.utils.is_windows() and self._setup_mp_logging_listener_: + if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_: # On Windows when using a logging listener, all log file logging # will go through the logging listener. return @@ -850,7 +851,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): log.set_logger_level(name, level) def __setup_extended_logging(self, *args): # pylint: disable=unused-argument - if salt.utils.is_windows() and self._setup_mp_logging_listener_: + if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_: # On Windows when using a logging listener, all extended logging # will go through the logging listener. return @@ -867,7 +868,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): ) def _setup_mp_logging_client(self, *args): # pylint: disable=unused-argument - if salt.utils.is_windows() and self._setup_mp_logging_listener_: + if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_: # On Windows, all logging including console and # log file logging will go through the multiprocessing # logging listener if it exists. @@ -914,7 +915,7 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)): if getattr(self.options, 'daemon', False) is True: return - if salt.utils.is_windows() and self._setup_mp_logging_listener_: + if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_: # On Windows when using a logging listener, all console logging # will go through the logging listener. return @@ -1008,7 +1009,7 @@ class DaemonMixIn(six.with_metaclass(MixInMeta, object)): if self.check_pidfile(): pid = self.get_pidfile() - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): if self.check_pidfile() and self.is_daemonized(pid) and not os.getppid() == pid: return True else: @@ -1057,6 +1058,13 @@ class TargetOptionsMixIn(six.with_metaclass(MixInMeta, object)): self, 'Target Options', 'Target selection options.' ) self.add_option_group(group) + group.add_option( + '-H', '--hosts', + default=False, + action='store_true', + dest='list_hosts', + help='List all known hosts to currently visible or other specified rosters' + ) group.add_option( '-E', '--pcre', default=False, @@ -2480,7 +2488,7 @@ class SaltKeyOptionParser(six.with_metaclass(OptionParserMeta, return keys_config def process_rotate_aes_key(self): - if hasattr(self.options, 'rotate_aes_key') and isinstance(self.options.rotate_aes_key, str): + if hasattr(self.options, 'rotate_aes_key') and isinstance(self.options.rotate_aes_key, six.string_types): if self.options.rotate_aes_key.lower() == 'true': self.options.rotate_aes_key = True elif self.options.rotate_aes_key.lower() == 'false': @@ -2782,6 +2790,12 @@ class SaltRunOptionParser(six.with_metaclass(OptionParserMeta, action='store_true', help=('Start the runner operation and immediately return control.') ) + self.add_option( + '--skip-grains', + default=False, + action='store_true', + help=('Do not load grains.') + ) group = self.output_options_group = optparse.OptionGroup( self, 'Output Options', 'Configure your preferred output format.' ) @@ -3037,6 +3051,14 @@ class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta, action='store_true', help='Run command via sudo.' ) + auth_group.add_option( + '--skip-roster', + dest='ssh_skip_roster', + default=False, + action='store_true', + help='If hostname is not found in the roster, do not store the information' + 'into the default roster file (flat).' + ) self.add_option_group(auth_group) scan_group = optparse.OptionGroup( diff --git a/salt/utils/path.py b/salt/utils/path.py index 53f94792b3..b9700fdc3a 100644 --- a/salt/utils/path.py +++ b/salt/utils/path.py @@ -4,15 +4,25 @@ Platform independent versions of some os/os.path functions. Gets around PY2's lack of support for reading NTFS links. ''' -# Import python lib +# Import python libs from __future__ import absolute_import +import collections import errno import logging import os +import posixpath +import re import struct +import sys + +# Import Salt libs +import salt.utils.args +import salt.utils.platform +import salt.utils.stringutils +from salt.utils.decorators import memoize as real_memoize +from salt.utils.decorators.jinja import jinja_filter # Import 3rd-party libs -import salt.utils from salt.ext import six try: @@ -29,7 +39,7 @@ def islink(path): ''' Equivalent to os.path.islink() ''' - if six.PY3 or not salt.utils.is_windows(): + if six.PY3 or not salt.utils.platform.is_windows(): return os.path.islink(path) if not HAS_WIN32FILE: @@ -64,7 +74,7 @@ def readlink(path): ''' Equivalent to os.readlink() ''' - if six.PY3 or not salt.utils.is_windows(): + if six.PY3 or not salt.utils.platform.is_windows(): return os.readlink(path) if not HAS_WIN32FILE: @@ -168,3 +178,133 @@ def _get_reparse_data(path): win32file.CloseHandle(fileHandle) return reparseData + + +@jinja_filter('which') +def which(exe=None): + ''' + Python clone of /usr/bin/which + ''' + def _is_executable_file_or_link(exe): + # check for os.X_OK doesn't suffice because directory may executable + return (os.access(exe, os.X_OK) and + (os.path.isfile(exe) or os.path.islink(exe))) + + if exe: + if _is_executable_file_or_link(exe): + # executable in cwd or fullpath + return exe + + ext_list = os.environ.get('PATHEXT', '.EXE').split(';') + + @real_memoize + def _exe_has_ext(): + ''' + Do a case insensitive test if exe has a file extension match in + PATHEXT + ''' + for ext in ext_list: + try: + pattern = r'.*\.' + ext.lstrip('.') + r'$' + re.match(pattern, exe, re.I).groups() + return True + except AttributeError: + continue + return False + + # Enhance POSIX path for the reliability at some environments, when $PATH is changing + # This also keeps order, where 'first came, first win' for cases to find optional alternatives + search_path = os.environ.get('PATH') and os.environ['PATH'].split(os.pathsep) or list() + for default_path in ['/bin', '/sbin', '/usr/bin', '/usr/sbin', '/usr/local/bin']: + if default_path not in search_path: + search_path.append(default_path) + os.environ['PATH'] = os.pathsep.join(search_path) + for path in search_path: + full_path = os.path.join(path, exe) + if _is_executable_file_or_link(full_path): + return full_path + elif salt.utils.platform.is_windows() and not _exe_has_ext(): + # On Windows, check for any extensions in PATHEXT. + # Allows both 'cmd' and 'cmd.exe' to be matched. + for ext in ext_list: + # Windows filesystem is case insensitive so we + # safely rely on that behavior + if _is_executable_file_or_link(full_path + ext): + return full_path + ext + log.trace('\'{0}\' could not be found in the following search path: \'{1}\''.format(exe, search_path)) + else: + log.error('No executable was passed to be searched by salt.utils.path.which()') + + return None + + +def which_bin(exes): + ''' + Scan over some possible executables and return the first one that is found + ''' + if not isinstance(exes, collections.Iterable): + return None + for exe in exes: + path = which(exe) + if not path: + continue + return path + return None + + +@jinja_filter('path_join') +def join(*parts, **kwargs): + ''' + This functions tries to solve some issues when joining multiple absolute + paths on both *nix and windows platforms. + + See tests/unit/utils/path_join_test.py for some examples on what's being + talked about here. + + The "use_posixpath" kwarg can be be used to force joining using poxixpath, + which is useful for Salt fileserver paths on Windows masters. + ''' + if six.PY3: + new_parts = [] + for part in parts: + new_parts.append(salt.utils.stringutils.to_str(part)) + parts = new_parts + + kwargs = salt.utils.args.clean_kwargs(**kwargs) + use_posixpath = kwargs.pop('use_posixpath', False) + if kwargs: + salt.utils.args.invalid_kwargs(kwargs) + + pathlib = posixpath if use_posixpath else os.path + + # Normalize path converting any os.sep as needed + parts = [pathlib.normpath(p) for p in parts] + + try: + root = parts.pop(0) + except IndexError: + # No args passed to func + return '' + + if not parts: + ret = root + else: + stripped = [p.lstrip(os.sep) for p in parts] + try: + ret = pathlib.join(root, *stripped) + except UnicodeDecodeError: + # This is probably Python 2 and one of the parts contains unicode + # characters in a bytestring. First try to decode to the system + # encoding. + try: + enc = __salt_system_encoding__ + except NameError: + enc = sys.stdin.encoding or sys.getdefaultencoding() + try: + ret = pathlib.join(root.decode(enc), + *[x.decode(enc) for x in stripped]) + except UnicodeDecodeError: + # Last resort, try UTF-8 + ret = pathlib.join(root.decode('UTF-8'), + *[x.decode('UTF-8') for x in stripped]) + return pathlib.normpath(ret) diff --git a/salt/utils/pkg/__init__.py b/salt/utils/pkg/__init__.py index 6fe8147cdf..68e5864676 100644 --- a/salt/utils/pkg/__init__.py +++ b/salt/utils/pkg/__init__.py @@ -10,8 +10,9 @@ import os import re # Import Salt libs -import salt.utils +import salt.utils # Can be removed once is_true is moved import salt.utils.files +import salt.utils.versions log = logging.getLogger(__name__) @@ -86,7 +87,7 @@ def match_version(desired, available, cmp_func=None, ignore_epoch=False): if not oper: oper = '==' for candidate in available: - if salt.utils.compare_versions(ver1=candidate, + if salt.utils.versions.compare(ver1=candidate, oper=oper, ver2=version, cmp_func=cmp_func, diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py index 0d5c21a82f..599b37ac84 100644 --- a/salt/utils/pkg/rpm.py +++ b/salt/utils/pkg/rpm.py @@ -6,6 +6,7 @@ Common functions for working with RPM packages # Import python libs from __future__ import absolute_import import collections +import datetime import logging import subprocess @@ -34,7 +35,7 @@ ARCHES = ARCHES_64 + ARCHES_32 + ARCHES_PPC + ARCHES_S390 + \ ARCHES_ALPHA + ARCHES_ARM + ARCHES_SH # EPOCHNUM can't be used until RHEL5 is EOL as it is not present -QUERYFORMAT = '%{NAME}_|-%{EPOCH}_|-%{VERSION}_|-%{RELEASE}_|-%{ARCH}_|-%{REPOID}' +QUERYFORMAT = '%{NAME}_|-%{EPOCH}_|-%{VERSION}_|-%{RELEASE}_|-%{ARCH}_|-%{REPOID}_|-%{INSTALLTIME}' def get_osarch(): @@ -59,15 +60,17 @@ def check_32(arch, osarch=None): return all(x in ARCHES_32 for x in (osarch, arch)) -def pkginfo(name, version, arch, repoid): +def pkginfo(name, version, arch, repoid, install_date=None, install_date_time_t=None): ''' Build and return a pkginfo namedtuple ''' pkginfo_tuple = collections.namedtuple( 'PkgInfo', - ('name', 'version', 'arch', 'repoid') + ('name', 'version', 'arch', 'repoid', 'install_date', + 'install_date_time_t') ) - return pkginfo_tuple(name, version, arch, repoid) + return pkginfo_tuple(name, version, arch, repoid, install_date, + install_date_time_t) def resolve_name(name, arch, osarch=None): @@ -89,7 +92,7 @@ def parse_pkginfo(line, osarch=None): pkginfo namedtuple. ''' try: - name, epoch, version, release, arch, repoid = line.split('_|-') + name, epoch, version, release, arch, repoid, install_time = line.split('_|-') # Handle unpack errors (should never happen with the queryformat we are # using, but can't hurt to be careful). except ValueError: @@ -101,7 +104,10 @@ def parse_pkginfo(line, osarch=None): if epoch not in ('(none)', '0'): version = ':'.join((epoch, version)) - return pkginfo(name, version, arch, repoid) + install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" + install_date_time_t = int(install_time) + + return pkginfo(name, version, arch, repoid, install_date, install_date_time_t) def combine_comments(comments): @@ -119,3 +125,38 @@ def combine_comments(comments): if not comments[idx].startswith('#'): comments[idx] = '#' + comments[idx] return '\n'.join(comments) + + +def version_to_evr(verstring): + ''' + Split the package version string into epoch, version and release. + Return this as tuple. + + The epoch is always not empty. The version and the release can be an empty + string if such a component could not be found in the version string. + + "2:1.0-1.2" => ('2', '1.0', '1.2) + "1.0" => ('0', '1.0', '') + "" => ('0', '', '') + ''' + if verstring in [None, '']: + return '0', '', '' + + idx_e = verstring.find(':') + if idx_e != -1: + try: + epoch = str(int(verstring[:idx_e])) + except ValueError: + # look, garbage in the epoch field, how fun, kill it + epoch = '0' # this is our fallback, deal + else: + epoch = '0' + idx_r = verstring.find('-') + if idx_r != -1: + version = verstring[idx_e + 1:idx_r] + release = verstring[idx_r + 1:] + else: + version = verstring[idx_e + 1:] + release = '' + + return epoch, version, release diff --git a/salt/utils/platform.py b/salt/utils/platform.py new file mode 100644 index 0000000000..2da117e62b --- /dev/null +++ b/salt/utils/platform.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +''' +Functions for identifying which platform a machine is +''' +# Import Python libs +from __future__ import absolute_import +import os +import subprocess +import sys + +# Import Salt libs +from salt.utils.decorators import memoize as real_memoize + + +@real_memoize +def is_windows(): + ''' + Simple function to return if a host is Windows or not + ''' + return sys.platform.startswith('win') + + +@real_memoize +def is_proxy(): + ''' + Return True if this minion is a proxy minion. + Leverages the fact that is_linux() and is_windows + both return False for proxies. + TODO: Need to extend this for proxies that might run on + other Unices + ''' + import __main__ as main + # This is a hack. If a proxy minion is started by other + # means, e.g. a custom script that creates the minion objects + # then this will fail. + ret = False + try: + # Changed this from 'salt-proxy in main...' to 'proxy in main...' + # to support the testsuite's temp script that is called 'cli_salt_proxy' + if 'proxy' in main.__file__: + ret = True + except AttributeError: + pass + return ret + + +@real_memoize +def is_linux(): + ''' + Simple function to return if a host is Linux or not. + Note for a proxy minion, we need to return something else + ''' + return sys.platform.startswith('linux') + + +@real_memoize +def is_darwin(): + ''' + Simple function to return if a host is Darwin (macOS) or not + ''' + return sys.platform.startswith('darwin') + + +@real_memoize +def is_sunos(): + ''' + Simple function to return if host is SunOS or not + ''' + return sys.platform.startswith('sunos') + + +@real_memoize +def is_smartos(): + ''' + Simple function to return if host is SmartOS (Illumos) or not + ''' + if not is_sunos(): + return False + else: + return os.uname()[3].startswith('joyent_') + + +@real_memoize +def is_smartos_globalzone(): + ''' + Function to return if host is SmartOS (Illumos) global zone or not + ''' + if not is_smartos(): + return False + else: + cmd = ['zonename'] + try: + zonename = subprocess.Popen( + cmd, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + return False + if zonename.returncode: + return False + if zonename.stdout.read().strip() == 'global': + return True + + return False + + +@real_memoize +def is_smartos_zone(): + ''' + Function to return if host is SmartOS (Illumos) and not the gz + ''' + if not is_smartos(): + return False + else: + cmd = ['zonename'] + try: + zonename = subprocess.Popen( + cmd, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + return False + if zonename.returncode: + return False + if zonename.stdout.read().strip() == 'global': + return False + + return True + + +@real_memoize +def is_freebsd(): + ''' + Simple function to return if host is FreeBSD or not + ''' + return sys.platform.startswith('freebsd') + + +@real_memoize +def is_netbsd(): + ''' + Simple function to return if host is NetBSD or not + ''' + return sys.platform.startswith('netbsd') + + +@real_memoize +def is_openbsd(): + ''' + Simple function to return if host is OpenBSD or not + ''' + return sys.platform.startswith('openbsd') + + +@real_memoize +def is_aix(): + ''' + Simple function to return if host is AIX or not + ''' + return sys.platform.startswith('aix') diff --git a/salt/utils/process.py b/salt/utils/process.py index b25d1594bc..cee0194c63 100644 --- a/salt/utils/process.py +++ b/salt/utils/process.py @@ -19,14 +19,16 @@ import multiprocessing.util # Import salt libs import salt.defaults.exitcodes -import salt.utils +import salt.utils # Can be removed once appendproctitle is moved import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.log.setup import salt.defaults.exitcodes from salt.log.mixins import NewStyleClassMixIn # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin from tornado import gen @@ -55,7 +57,8 @@ def notify_systemd(): try: import systemd.daemon except ImportError: - if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'): + if salt.utils.path.which('systemd-notify') \ + and systemd_notify_call('--booted'): return systemd_notify_call('--ready') return False @@ -81,7 +84,7 @@ def set_pidfile(pidfile, user): pass log.debug(('Created pidfile: {0}').format(pidfile)) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return True import pwd # after confirming not running Windows @@ -131,8 +134,10 @@ def get_pidfile(pidfile): ''' with salt.utils.files.fopen(pidfile) as pdf: pid = pdf.read() - - return int(pid) + if pid: + return int(pid) + else: + return def clean_proc(proc, wait_for_kill=10): @@ -278,7 +283,7 @@ class ProcessManager(object): if kwargs is None: kwargs = {} - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Need to ensure that 'log_queue' is correctly transferred to # processes that inherit from 'MultiprocessingProcess'. if type(MultiprocessingProcess) is type(tgt) and ( @@ -348,7 +353,7 @@ class ProcessManager(object): self._restart_processes = False def send_signal_to_processes(self, signal_): - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and signal_ in (signal.SIGTERM, signal.SIGINT)): # On Windows, the subprocesses automatically have their signal # handlers invoked. If you send one of these signals while the @@ -439,7 +444,7 @@ class ProcessManager(object): return signal.default_int_handler(signal.SIGTERM)(*args) else: return - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if multiprocessing.current_process().name != 'MainProcess': # Since the main process will kill subprocesses by tree, # no need to do anything in the subprocesses. @@ -560,7 +565,7 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn): return instance def __init__(self, *args, **kwargs): - if (salt.utils.is_windows() and + if (salt.utils.platform.is_windows() and not hasattr(self, '_is_child') and self.__setstate__.__code__ is MultiprocessingProcess.__setstate__.__code__): @@ -591,7 +596,7 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn): # 'log_queue' from kwargs. super(MultiprocessingProcess, self).__init__(*args, **kwargs) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # On Windows, the multiprocessing.Process object is reinitialized # in the child process via the constructor. Due to this, methods # such as ident() and is_alive() won't work properly. So we use @@ -662,7 +667,7 @@ class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn): class SignalHandlingMultiprocessingProcess(MultiprocessingProcess): def __init__(self, *args, **kwargs): super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if hasattr(self, '_is_child'): # On Windows, no need to call register_after_fork(). # register_after_fork() would only work on Windows if called diff --git a/salt/utils/psutil_compat.py b/salt/utils/psutil_compat.py index a32712860a..cbb9cd9da9 100644 --- a/salt/utils/psutil_compat.py +++ b/salt/utils/psutil_compat.py @@ -14,7 +14,7 @@ Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html from __future__ import absolute_import # Import Salt libs -import salt.ext.six as six +from salt.ext import six # No exception handling, as we want ImportError if psutil doesn't exist import psutil # pylint: disable=3rd-party-module-not-gated diff --git a/salt/utils/pycrypto.py b/salt/utils/pycrypto.py index ea6d5370f7..f2220b368a 100644 --- a/salt/utils/pycrypto.py +++ b/salt/utils/pycrypto.py @@ -29,6 +29,7 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.stringutils from salt.exceptions import SaltInvocationError @@ -43,7 +44,7 @@ def secure_password(length=20, use_random=True): pw += re.sub( r'\W', '', - salt.utils.to_str(CRand.get_random_bytes(1)) + salt.utils.stringutils.to_str(CRand.get_random_bytes(1)) ) else: pw += random.SystemRandom().choice(string.ascii_letters + string.digits) diff --git a/salt/utils/pydsl.py b/salt/utils/pydsl.py index 221b24e826..1587e61d93 100644 --- a/salt/utils/pydsl.py +++ b/salt/utils/pydsl.py @@ -88,12 +88,12 @@ from __future__ import absolute_import from uuid import uuid4 as _uuid # Import salt libs +import salt.utils.versions from salt.utils.odict import OrderedDict -from salt.utils import warn_until from salt.state import HighState # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six REQUISITES = set('listen require watch prereq use listen_in require_in watch_in prereq_in use_in onchanges onfail'.split()) @@ -140,7 +140,7 @@ class Sls(object): def include(self, *sls_names, **kws): if 'env' in kws: - warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/salt/utils/pyobjects.py b/salt/utils/pyobjects.py index b67252db3b..9306a3a3ed 100644 --- a/salt/utils/pyobjects.py +++ b/salt/utils/pyobjects.py @@ -11,7 +11,7 @@ import logging from salt.utils.odict import OrderedDict from salt.utils.schema import Prepareable -import salt.ext.six as six +from salt.ext import six REQUISITES = ('listen', 'onchanges', 'onfail', 'require', 'watch', 'use', 'listen_in', 'onchanges_in', 'onfail_in', 'require_in', 'watch_in', 'use_in') diff --git a/salt/utils/raetevent.py b/salt/utils/raetevent.py index 2e75443b87..2ed1428e6f 100644 --- a/salt/utils/raetevent.py +++ b/salt/utils/raetevent.py @@ -18,19 +18,28 @@ import salt.payload import salt.loader import salt.state import salt.utils.event -from salt.utils import kinds +import salt.utils.kinds as kinds from salt import transport from salt import syspaths -from raet import raeting, nacling -from raet.lane.stacking import LaneStack -from raet.lane.yarding import RemoteYard + +try: + from raet import raeting, nacling + from raet.lane.stacking import LaneStack + from raet.lane.yarding import RemoteYard + HAS_RAET = True +except ImportError: + HAS_RAET = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) +def __virtual__(): + return HAS_RAET + + class RAETEvent(object): ''' The base class used to manage salt events diff --git a/salt/utils/raetlane.py b/salt/utils/raetlane.py index 7d6ffa8a83..f76d51b9d3 100644 --- a/salt/utils/raetlane.py +++ b/salt/utils/raetlane.py @@ -56,7 +56,7 @@ import time # Import Salt Libs import logging -from salt.utils import kinds +import salt.utils.kinds as kinds log = logging.getLogger(__name__) diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py index bb3f387fa7..58c7e98c23 100644 --- a/salt/utils/reactor.py +++ b/salt/utils/reactor.py @@ -18,7 +18,7 @@ import salt.defaults.exitcodes # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -274,12 +274,19 @@ class ReactWrap(object): try: f_call = salt.utils.format_call(l_fun, low) kwargs = f_call.get('kwargs', {}) + if 'arg' not in kwargs: + kwargs['arg'] = [] + if 'kwarg' not in kwargs: + kwargs['kwarg'] = {} # TODO: Setting the user doesn't seem to work for actual remote publishes if low['state'] in ('runner', 'wheel'): # Update called function's low data with event user to # segregate events fired by reactor and avoid reaction loops kwargs['__user__'] = self.event_user + # Replace ``state`` kwarg which comes from high data compiler. + # It breaks some runner functions and seems unnecessary. + kwargs['__state__'] = kwargs.pop('state') l_fun(*f_call.get('args', ()), **kwargs) except Exception: diff --git a/salt/utils/rsax931.py b/salt/utils/rsax931.py index b7ecfcd0b8..f827cc6db8 100644 --- a/salt/utils/rsax931.py +++ b/salt/utils/rsax931.py @@ -3,17 +3,18 @@ Create and verify ANSI X9.31 RSA signatures using OpenSSL libcrypto ''' -# python libs +# Import Python libs from __future__ import absolute_import import glob import sys import os -# salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform +import salt.utils.stringutils -# 3rd-party libs -import salt.ext.six as six +# Import 3rd-party libs +from salt.ext import six from ctypes import cdll, c_char_p, c_int, c_void_p, pointer, create_string_buffer from ctypes.util import find_library @@ -29,13 +30,13 @@ def _load_libcrypto(): ''' if sys.platform.startswith('win'): return cdll.LoadLibrary('libeay32') - elif getattr(sys, 'frozen', False) and salt.utils.is_smartos(): + elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos(): return cdll.LoadLibrary(glob.glob(os.path.join( os.path.dirname(sys.executable), 'libcrypto.so*'))[0]) else: lib = find_library('crypto') - if not lib and salt.utils.is_sunos(): + if not lib and salt.utils.platform.is_sunos(): # Solaris-like distribution that use pkgsrc have # libraries in a non standard location. # (SmartOS, OmniOS, OpenIndiana, ...) @@ -98,7 +99,7 @@ class RSAX931Signer(object): :param str keydata: The RSA private key in PEM format ''' - keydata = salt.utils.to_bytes(keydata, 'ascii') + keydata = salt.utils.stringutils.to_bytes(keydata, 'ascii') self._bio = libcrypto.BIO_new_mem_buf(keydata, len(keydata)) self._rsa = c_void_p(libcrypto.RSA_new()) if not libcrypto.PEM_read_bio_RSAPrivateKey(self._bio, pointer(self._rsa), None, None): @@ -118,7 +119,7 @@ class RSAX931Signer(object): ''' # Allocate a buffer large enough for the signature. Freed by ctypes. buf = create_string_buffer(libcrypto.RSA_size(self._rsa)) - msg = salt.utils.to_bytes(msg) + msg = salt.utils.stringutils.to_bytes(msg) size = libcrypto.RSA_private_encrypt(len(msg), msg, buf, self._rsa, RSA_X931_PADDING) if size < 0: raise ValueError('Unable to encrypt message') @@ -135,7 +136,7 @@ class RSAX931Verifier(object): :param str pubdata: The RSA public key in PEM format ''' - pubdata = salt.utils.to_bytes(pubdata, 'ascii') + pubdata = salt.utils.stringutils.to_bytes(pubdata, 'ascii') pubdata = pubdata.replace(six.b('RSA '), six.b('')) self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata)) self._rsa = c_void_p(libcrypto.RSA_new()) @@ -157,7 +158,7 @@ class RSAX931Verifier(object): ''' # Allocate a buffer large enough for the signature. Freed by ctypes. buf = create_string_buffer(libcrypto.RSA_size(self._rsa)) - signed = salt.utils.to_bytes(signed) + signed = salt.utils.stringutils.to_bytes(signed) size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING) if size < 0: raise ValueError('Unable to decrypt message') diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index bbf4fbd06b..61e087e607 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -333,17 +333,20 @@ import logging import errno import random import yaml +import copy # Import Salt libs import salt.config -import salt.utils +import salt.utils # Can be removed once appendproctitle and daemonize_if are moved +import salt.utils.args import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid -import salt.utils.process -import salt.utils.args import salt.utils.minion +import salt.utils.platform +import salt.utils.process +import salt.utils.stringutils import salt.loader import salt.minion import salt.payload @@ -352,11 +355,10 @@ import salt.exceptions import salt.log.setup as log_setup import salt.defaults.exitcodes from salt.utils.odict import OrderedDict -from salt.utils.process import os_is_running, default_signals, SignalHandlingMultiprocessingProcess from salt.utils.yamldumper import SafeOrderedDumper # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error try: @@ -477,7 +479,7 @@ class Schedule(object): try: with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_: fp_.write( - salt.utils.to_bytes( + salt.utils.stringutils.to_bytes( yaml.dump( {'schedule': self._get_schedule(include_pillar=False)}, Dumper=SafeOrderedDumper @@ -666,12 +668,12 @@ class Schedule(object): multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: - thread_cls = SignalHandlingMultiprocessingProcess + thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess else: thread_cls = threading.Thread if multiprocessing_enabled: - with default_signals(signal.SIGINT, signal.SIGTERM): + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) # Reset current signals before starting the process in # order not to inherit the current signal handlers @@ -745,7 +747,8 @@ class Schedule(object): ''' Execute this method in a multiprocess or thread ''' - if salt.utils.is_windows() or self.opts.get('transport') == 'zeromq': + if salt.utils.platform.is_windows() \ + or self.opts.get('transport') == 'zeromq': # Since function references can't be pickled and pickling # is required when spawning new processes on Windows, regenerate # the functions and returners. @@ -792,7 +795,8 @@ class Schedule(object): if 'schedule' in job: log.debug('schedule.handle_func: Checking job against ' 'fun {0}: {1}'.format(ret['fun'], job)) - if ret['schedule'] == job['schedule'] and os_is_running(job['pid']): + if ret['schedule'] == job['schedule'] \ + and salt.utils.process.os_is_running(job['pid']): jobcount += 1 log.debug( 'schedule.handle_func: Incrementing jobcount, now ' @@ -805,7 +809,7 @@ class Schedule(object): ret['schedule'], data['maxrunning'])) return False - if multiprocessing_enabled and not salt.utils.is_windows(): + if multiprocessing_enabled and not salt.utils.platform.is_windows(): # Reconfigure multiprocessing logging after daemonizing log_setup.setup_multiprocessing_logging() @@ -844,10 +848,16 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - kwargs['__pub_{0}'.format(key)] = val + kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) + # runners do not provide retcode + if 'retcode' in self.functions.pack['__context__']: + ret['retcode'] = self.functions.pack['__context__']['retcode'] + + ret['success'] = True + data_returner = data.get('returner', None) if data_returner or self.schedule_returner: if 'return_config' in data: @@ -856,7 +866,7 @@ class Schedule(object): ret['ret_kwargs'] = data['return_kwargs'] rets = [] for returner in [data_returner, self.schedule_returner]: - if isinstance(returner, str): + if isinstance(returner, six.string_types): rets.append(returner) elif isinstance(returner, list): rets.extend(returner) @@ -864,7 +874,6 @@ class Schedule(object): for returner in OrderedDict.fromkeys(rets): ret_str = '{0}.returner'.format(returner) if ret_str in self.returners: - ret['success'] = True self.returners[ret_str](ret) else: log.info( @@ -873,11 +882,6 @@ class Schedule(object): ) ) - # runners do not provide retcode - if 'retcode' in self.functions.pack['__context__']: - ret['retcode'] = self.functions.pack['__context__']['retcode'] - - ret['success'] = True except Exception: log.exception("Unhandled exception running {0}".format(ret['fun'])) # Although catch-all exception handlers are bad, the exception here @@ -1132,21 +1136,28 @@ class Schedule(object): log.error('Invalid date string {0}. ' 'Ignoring job {1}.'.format(i, job)) continue - when = int(time.mktime(when__.timetuple())) - if when >= now: - _when.append(when) + _when.append(int(time.mktime(when__.timetuple()))) if data['_splay']: _when.append(data['_splay']) + # Sort the list of "whens" from earlier to later schedules _when.sort() + + for i in _when: + if i < now and len(_when) > 1: + # Remove all missed schedules except the latest one. + # We need it to detect if it was triggered previously. + _when.remove(i) + if _when: - # Grab the first element - # which is the next run time + # Grab the first element, which is the next run time or + # last scheduled time in the past. when = _when[0] if '_run' not in data: - data['_run'] = True + # Prevent run of jobs from the past + data['_run'] = bool(when >= now) if not data['_next_fire_time']: data['_next_fire_time'] = when @@ -1327,7 +1338,7 @@ class Schedule(object): multiprocessing_enabled = self.opts.get('multiprocessing', True) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Temporarily stash our function references. # You can't pickle function references, and pickling is # required when spawning new processes on Windows. @@ -1337,13 +1348,13 @@ class Schedule(object): self.returners = {} try: if multiprocessing_enabled: - thread_cls = SignalHandlingMultiprocessingProcess + thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess else: thread_cls = threading.Thread proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, func, data)) if multiprocessing_enabled: - with default_signals(signal.SIGINT, signal.SIGTERM): + with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers proc.start() @@ -1356,7 +1367,7 @@ class Schedule(object): if '_seconds' in data: data['_next_fire_time'] = now + data['_seconds'] data['_splay'] = None - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Restore our function references. self.functions = functions self.returners = returners @@ -1377,7 +1388,7 @@ def clean_proc_dir(opts): job = salt.payload.Serial(opts).load(fp_) except Exception: # It's corrupted # Windows cannot delete an open file - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): fp_.close() try: os.unlink(fn_) @@ -1392,7 +1403,7 @@ def clean_proc_dir(opts): 'pid {0} still exists.'.format(job['pid'])) else: # Windows cannot delete an open file - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): fp_.close() # Maybe the file is already gone try: diff --git a/salt/utils/schema.py b/salt/utils/schema.py index 6f1d824b3a..7ce3371ee8 100644 --- a/salt/utils/schema.py +++ b/salt/utils/schema.py @@ -332,7 +332,7 @@ from salt.utils.odict import OrderedDict # Import 3rd-party libs #import yaml -import salt.ext.six as six +from salt.ext import six BASE_SCHEMA_URL = 'https://non-existing.saltstack.com/schemas' RENDER_COMMENT_YAML_MAX_LINE_LENGTH = 80 diff --git a/salt/utils/shlex.py b/salt/utils/shlex.py new file mode 100644 index 0000000000..5b45174d83 --- /dev/null +++ b/salt/utils/shlex.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +''' +Modified versions of functions from shlex module +''' +from __future__ import absolute_import + +# Import Python libs +import shlex + +# Import 3rd-party libs +from salt.ext import six + + +def split(s, **kwargs): + ''' + Only split if variable is a string + ''' + if isinstance(s, six.string_types): + return shlex.split(s, **kwargs) + else: + return s diff --git a/salt/utils/stormpath.py b/salt/utils/stormpath.py deleted file mode 100644 index 516e2d57a7..0000000000 --- a/salt/utils/stormpath.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -''' -Support for Stormpath - -.. versionadded:: 2015.8.0 -''' - -# Import python libs -from __future__ import absolute_import, print_function -import logging - -# Import salt libs -import salt.utils.http - -log = logging.getLogger(__name__) - - -def query(action=None, - command=None, - args=None, - method='GET', - header_dict=None, - data=None, - opts=None): - ''' - Make a web call to Stormpath - - .. versionadded:: 2015.8.0 - ''' - if opts is None: - opts = {} - - apiid = opts.get('stormpath', {}).get('apiid', None) - apikey = opts.get('stormpath', {}).get('apikey', None) - path = 'https://api.stormpath.com/v1/' - - if action: - path += action - - if command: - path += '/{0}'.format(command) - - log.debug('Stormpath URL: {0}'.format(path)) - - if not isinstance(args, dict): - args = {} - - if header_dict is None: - header_dict = {} - - if method != 'POST': - header_dict['Accept'] = 'application/json' - - decode = True - if method == 'DELETE': - decode = False - - return_content = None - result = salt.utils.http.query( - path, - method, - username=apiid, - password=apikey, - params=args, - data=data, - header_dict=header_dict, - decode=decode, - decode_type='json', - text=True, - status=True, - opts=opts, - ) - log.debug( - 'Stormpath Response Status Code: {0}'.format( - result['status'] - ) - ) - - return [result['status'], result.get('dict', {})] diff --git a/salt/utils/stringutils.py b/salt/utils/stringutils.py new file mode 100644 index 0000000000..d3a5fb79c3 --- /dev/null +++ b/salt/utils/stringutils.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +from __future__ import absolute_import +import os +import string + +# Import Salt libs +from salt.utils.decorators.jinja import jinja_filter + +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves import range # pylint: disable=redefined-builtin + + +@jinja_filter('to_bytes') +def to_bytes(s, encoding=None): + ''' + Given bytes, bytearray, str, or unicode (python 2), return bytes (str for + python 2) + ''' + if six.PY3: + if isinstance(s, bytes): + return s + if isinstance(s, bytearray): + return bytes(s) + if isinstance(s, six.string_types): + return s.encode(encoding or __salt_system_encoding__) + raise TypeError('expected bytes, bytearray, or str') + else: + return to_str(s, encoding) + + +def to_str(s, encoding=None): + ''' + Given str, bytes, bytearray, or unicode (py2), return str + ''' + # This shouldn't be six.string_types because if we're on PY2 and we already + # have a string, we should just return it. + if isinstance(s, str): + return s + if six.PY3: + if isinstance(s, (bytes, bytearray)): + # https://docs.python.org/3/howto/unicode.html#the-unicode-type + # replace error with U+FFFD, REPLACEMENT CHARACTER + return s.decode(encoding or __salt_system_encoding__, "replace") + raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s))) + else: + if isinstance(s, bytearray): + return str(s) + if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable + return s.encode(encoding or __salt_system_encoding__) + raise TypeError('expected str, bytearray, or unicode') + + +def to_unicode(s, encoding=None): + ''' + Given str or unicode, return unicode (str for python 3) + ''' + if not isinstance(s, (bytes, bytearray, six.string_types)): + return s + if six.PY3: + if isinstance(s, (bytes, bytearray)): + return to_str(s, encoding) + else: + # This needs to be str and not six.string_types, since if the string is + # already a unicode type, it does not need to be decoded (and doing so + # will raise an exception). + if isinstance(s, str): + return s.decode(encoding or __salt_system_encoding__) + return s + + +@jinja_filter('str_to_num') # Remove this for Neon +@jinja_filter('to_num') +def to_num(text): + ''' + Convert a string to a number. + Returns an integer if the string represents an integer, a floating + point number if the string is a real number, or the string unchanged + otherwise. + ''' + try: + return int(text) + except ValueError: + try: + return float(text) + except ValueError: + return text + + +def is_quoted(value): + ''' + Return a single or double quote, if a string is wrapped in extra quotes. + Otherwise return an empty string. + ''' + ret = '' + if isinstance(value, six.string_types) \ + and value[0] == value[-1] \ + and value.startswith(('\'', '"')): + ret = value[0] + return ret + + +def dequote(value): + ''' + Remove extra quotes around a string. + ''' + if is_quoted(value): + return value[1:-1] + return value + + +@jinja_filter('is_hex') +def is_hex(value): + ''' + Returns True if value is a hexidecimal string, otherwise returns False + ''' + try: + int(value, 16) + return True + except (TypeError, ValueError): + return False + + +def is_binary(data): + ''' + Detects if the passed string of data is binary or text + ''' + if '\0' in data: + return True + if not data: + return False + + text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\n\r\t\b')) + # Get the non-text characters (map each character to itself then use the + # 'remove' option to get rid of the text characters.) + if six.PY3: + trans = ''.maketrans('', '', text_characters) + nontext = data.translate(trans) + else: + trans = string.maketrans('', '') # pylint: disable=no-member + nontext = data.translate(trans, text_characters) + + # If more than 30% non-text characters, then + # this is considered binary data + if float(len(nontext)) / len(data) > 0.30: + return True + return False + + +@jinja_filter('random_str') +def random(size=32): + key = os.urandom(size) + return key.encode('base64').replace('\n', '')[:size] + + +@jinja_filter('contains_whitespace') +def contains_whitespace(text): + ''' + Returns True if there are any whitespace characters in the string + ''' + return any(x.isspace() for x in text) diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py index 471d9bb241..b41dce546b 100644 --- a/salt/utils/systemd.py +++ b/salt/utils/systemd.py @@ -11,6 +11,7 @@ import subprocess # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils +import salt.utils.stringutils log = logging.getLogger(__name__) @@ -63,7 +64,7 @@ def version(context=None): ['systemctl', '--version'], close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] - outstr = salt.utils.to_str(stdout) + outstr = salt.utils.stringutils.to_str(stdout) try: ret = int(outstr.splitlines()[0].split()[-1]) except (IndexError, ValueError): diff --git a/salt/utils/templates.py b/salt/utils/templates.py index 1069a29532..bbb834bb41 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -5,7 +5,7 @@ Template render systems from __future__ import absolute_import -# Import python libs +# Import Python libs import codecs import os import logging @@ -13,10 +13,10 @@ import tempfile import traceback import sys -# Import third party libs +# Import 3rd-party libs import jinja2 import jinja2.ext -import salt.ext.six as six +from salt.ext import six if sys.version_info[:2] >= (3, 5): import importlib.machinery # pylint: disable=no-name-in-module,import-error @@ -26,10 +26,11 @@ else: import imp USE_IMPORTLIB = False -# Import salt libs +# Import Salt libs import salt.utils import salt.utils.http import salt.utils.files +import salt.utils.platform import salt.utils.yamlencoding import salt.utils.locales import salt.utils.hashutils @@ -39,7 +40,7 @@ from salt.exceptions import ( import salt.utils.jinja import salt.utils.network from salt.utils.odict import OrderedDict -from salt.utils.decorators import JinjaFilter, JinjaTest, JinjaGlobal +from salt.utils.decorators.jinja import JinjaFilter, JinjaTest, JinjaGlobal from salt import __path__ as saltpath log = logging.getLogger(__name__) @@ -73,6 +74,9 @@ class AliasedLoader(object): def __getattr__(self, name): return getattr(self.wrapped, name) + def __contains__(self, name): + return name in self.wrapped + class AliasedModule(object): ''' @@ -165,7 +169,7 @@ def wrap_tmpl_func(render_str): output = render_str(tmplstr, context, tmplpath) if six.PY2: output = output.encode(SLS_ENCODING) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Write out with Windows newlines output = os.linesep.join(output.splitlines()) @@ -354,9 +358,10 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): line, out = _get_jinja_error(trace, context=decoded_context) if not line: tmplstr = '' - raise SaltRenderError('Jinja syntax error: {0}{1}'.format(exc, out), - line, - tmplstr) + raise SaltRenderError( + 'Jinja syntax error: {0}{1}'.format(exc, out), + line, + tmplstr) except jinja2.exceptions.UndefinedError as exc: trace = traceback.extract_tb(sys.exc_info()[2]) out = _get_jinja_error(trace, context=decoded_context)[1] diff --git a/salt/utils/url.py b/salt/utils/url.py index 0998ee5a0c..32ad0b97fd 100644 --- a/salt/utils/url.py +++ b/salt/utils/url.py @@ -11,6 +11,8 @@ import sys # Import salt libs from salt.ext.six.moves.urllib.parse import urlparse, urlunparse # pylint: disable=import-error,no-name-in-module import salt.utils +import salt.utils.platform +import salt.utils.versions from salt.utils.locales import sdecode @@ -25,7 +27,7 @@ def parse(url): resource = url.split('salt://', 1)[-1] if '?env=' in resource: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the salt:// URL. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' @@ -37,7 +39,7 @@ def parse(url): else: path, saltenv = resource, None - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = salt.utils.sanitize_win_path_string(path) return path, saltenv @@ -47,7 +49,7 @@ def create(path, saltenv=None): ''' join `path` and `saltenv` into a 'salt://' URL. ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = salt.utils.sanitize_win_path_string(path) path = sdecode(path) @@ -60,7 +62,7 @@ def is_escaped(url): ''' test whether `url` is escaped with `|` ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return False scheme = urlparse(url).scheme @@ -77,7 +79,7 @@ def escape(url): ''' add escape character `|` to `url` ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return url scheme = urlparse(url).scheme @@ -100,7 +102,7 @@ def unescape(url): ''' remove escape character `|` from `url` ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return url scheme = urlparse(url).scheme diff --git a/salt/utils/validate/net.py b/salt/utils/validate/net.py index f96b7d0e04..9250086d81 100644 --- a/salt/utils/validate/net.py +++ b/salt/utils/validate/net.py @@ -4,16 +4,16 @@ Various network validation utilities ''' from __future__ import absolute_import -# Import python libs +# Import Python libs import re import socket -# Import salt libs -from salt.ext.six import string_types -import salt.utils +# Import Salt libs +import salt.utils.platform -# Import third party libs -if salt.utils.is_windows(): +# Import 3rd-party libs +from salt.ext.six import string_types +if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import diff --git a/salt/utils/vault.py b/salt/utils/vault.py index 30e66ab20e..e34a41bb85 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -16,6 +16,7 @@ import requests import salt.crypt import salt.exceptions +import salt.utils.versions log = logging.getLogger(__name__) logging.getLogger("requests").setLevel(logging.WARNING) @@ -136,7 +137,7 @@ def make_request_with_profile(method, resource, profile, **args): DEPRECATED! Make a request to Vault, with a profile including connection details. ''' - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Fluorine', 'Specifying Vault connection data within a \'profile\' has been ' 'deprecated. Please see the documentation for details on the new ' diff --git a/salt/utils/verify.py b/salt/utils/verify.py index 13e735262f..1559224e4d 100644 --- a/salt/utils/verify.py +++ b/salt/utils/verify.py @@ -27,8 +27,9 @@ from salt.log.setup import LOG_LEVELS from salt.exceptions import SaltClientError, SaltSystemExit, \ CommandExecutionError import salt.defaults.exitcodes -import salt.utils +import salt.utils # Can be removed once get_jid_list and get_user are moved import salt.utils.files +import salt.utils.platform log = logging.getLogger(__name__) @@ -146,7 +147,7 @@ def verify_files(files, user): ''' Verify that the named files exist and are owned by the named user ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return True import pwd # after confirming not running Windows try: @@ -198,7 +199,7 @@ def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False): Verify that the named directories are in place and that the environment can shake the salt ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return win_verify_env(dirs, permissive, pki_dir, skip_extra) import pwd # after confirming not running Windows try: @@ -299,7 +300,7 @@ def check_user(user): ''' Check user and assign process uid/gid. ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): return True if user == salt.utils.get_user(): return True @@ -481,12 +482,21 @@ def clean_path(root, path, subdir=False): return '' +def clean_id(id_): + ''' + Returns if the passed id is clean. + ''' + if re.search(r'\.\.\{sep}'.format(sep=os.sep), id_): + return False + return True + + def valid_id(opts, id_): ''' Returns if the passed id is valid ''' try: - return bool(clean_path(opts['pki_dir'], id_)) + return bool(clean_path(opts['pki_dir'], id_)) and clean_id(id_) except (AttributeError, KeyError, TypeError) as e: return False diff --git a/salt/utils/versions.py b/salt/utils/versions.py index e4eb352415..e51ae40b75 100644 --- a/salt/utils/versions.py +++ b/salt/utils/versions.py @@ -11,15 +11,24 @@ because on python 3 you can no longer compare strings against integers. ''' -# Import pytohn libs +# Import Python libs from __future__ import absolute_import +import logging +import numbers +import sys +import warnings # pylint: disable=blacklisted-module from distutils.version import StrictVersion as _StrictVersion from distutils.version import LooseVersion as _LooseVersion # pylint: enable=blacklisted-module +# Import Salt libs +import salt.version + # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six + +log = logging.getLogger(__name__) class StrictVersion(_StrictVersion): @@ -64,3 +73,221 @@ class LooseVersion(_LooseVersion): return -1 if self._str_version > other._str_version: return 1 + + +def warn_until(version, + message, + category=DeprecationWarning, + stacklevel=None, + _version_info_=None, + _dont_call_warnings=False): + ''' + Helper function to raise a warning, by default, a ``DeprecationWarning``, + until the provided ``version``, after which, a ``RuntimeError`` will + be raised to remind the developers to remove the warning because the + target version has been reached. + + :param version: The version info or name after which the warning becomes a + ``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen`` + or an instance of :class:`salt.version.SaltStackVersion`. + :param message: The warning message to be displayed. + :param category: The warning class to be thrown, by default + ``DeprecationWarning`` + :param stacklevel: There should be no need to set the value of + ``stacklevel``. Salt should be able to do the right thing. + :param _version_info_: In order to reuse this function for other SaltStack + projects, they need to be able to provide the + version info to compare to. + :param _dont_call_warnings: This parameter is used just to get the + functionality until the actual error is to be + issued. When we're only after the salt version + checks to raise a ``RuntimeError``. + ''' + if not isinstance(version, (tuple, + six.string_types, + salt.version.SaltStackVersion)): + raise RuntimeError( + 'The \'version\' argument should be passed as a tuple, string or ' + 'an instance of \'salt.version.SaltStackVersion\'.' + ) + elif isinstance(version, tuple): + version = salt.version.SaltStackVersion(*version) + elif isinstance(version, six.string_types): + version = salt.version.SaltStackVersion.from_name(version) + + if stacklevel is None: + # Attribute the warning to the calling function, not to warn_until() + stacklevel = 2 + + if _version_info_ is None: + _version_info_ = salt.version.__version_info__ + + _version_ = salt.version.SaltStackVersion(*_version_info_) + + if _version_ >= version: + import inspect + caller = inspect.getframeinfo(sys._getframe(stacklevel - 1)) + raise RuntimeError( + 'The warning triggered on filename \'{filename}\', line number ' + '{lineno}, is supposed to be shown until version ' + '{until_version} is released. Current version is now ' + '{salt_version}. Please remove the warning.'.format( + filename=caller.filename, + lineno=caller.lineno, + until_version=version.formatted_version, + salt_version=_version_.formatted_version + ), + ) + + if _dont_call_warnings is False: + def _formatwarning(message, + category, + filename, + lineno, + line=None): # pylint: disable=W0613 + ''' + Replacement for warnings.formatwarning that disables the echoing of + the 'line' parameter. + ''' + return '{0}:{1}: {2}: {3}\n'.format( + filename, lineno, category.__name__, message + ) + saved = warnings.formatwarning + warnings.formatwarning = _formatwarning + warnings.warn( + message.format(version=version.formatted_version), + category, + stacklevel=stacklevel + ) + warnings.formatwarning = saved + + +def kwargs_warn_until(kwargs, + version, + category=DeprecationWarning, + stacklevel=None, + _version_info_=None, + _dont_call_warnings=False): + ''' + Helper function to raise a warning (by default, a ``DeprecationWarning``) + when unhandled keyword arguments are passed to function, until the + provided ``version_info``, after which, a ``RuntimeError`` will be raised + to remind the developers to remove the ``**kwargs`` because the target + version has been reached. + This function is used to help deprecate unused legacy ``**kwargs`` that + were added to function parameters lists to preserve backwards compatibility + when removing a parameter. See + :ref:`the deprecation development docs ` + for the modern strategy for deprecating a function parameter. + + :param kwargs: The caller's ``**kwargs`` argument value (a ``dict``). + :param version: The version info or name after which the warning becomes a + ``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen`` + or an instance of :class:`salt.version.SaltStackVersion`. + :param category: The warning class to be thrown, by default + ``DeprecationWarning`` + :param stacklevel: There should be no need to set the value of + ``stacklevel``. Salt should be able to do the right thing. + :param _version_info_: In order to reuse this function for other SaltStack + projects, they need to be able to provide the + version info to compare to. + :param _dont_call_warnings: This parameter is used just to get the + functionality until the actual error is to be + issued. When we're only after the salt version + checks to raise a ``RuntimeError``. + ''' + if not isinstance(version, (tuple, + six.string_types, + salt.version.SaltStackVersion)): + raise RuntimeError( + 'The \'version\' argument should be passed as a tuple, string or ' + 'an instance of \'salt.version.SaltStackVersion\'.' + ) + elif isinstance(version, tuple): + version = salt.version.SaltStackVersion(*version) + elif isinstance(version, six.string_types): + version = salt.version.SaltStackVersion.from_name(version) + + if stacklevel is None: + # Attribute the warning to the calling function, + # not to kwargs_warn_until() or warn_until() + stacklevel = 3 + + if _version_info_ is None: + _version_info_ = salt.version.__version_info__ + + _version_ = salt.version.SaltStackVersion(*_version_info_) + + if kwargs or _version_.info >= version.info: + arg_names = ', '.join('\'{0}\''.format(key) for key in kwargs) + warn_until( + version, + message='The following parameter(s) have been deprecated and ' + 'will be removed in \'{0}\': {1}.'.format(version.string, + arg_names), + category=category, + stacklevel=stacklevel, + _version_info_=_version_.info, + _dont_call_warnings=_dont_call_warnings + ) + + +def version_cmp(pkg1, pkg2, ignore_epoch=False): + ''' + Compares two version strings using salt.utils.versions.LooseVersion. This + is a fallback for providers which don't have a version comparison utility + built into them. Return -1 if version1 < version2, 0 if version1 == + version2, and 1 if version1 > version2. Return None if there was a problem + making the comparison. + ''' + normalize = lambda x: str(x).split(':', 1)[-1] if ignore_epoch else str(x) + pkg1 = normalize(pkg1) + pkg2 = normalize(pkg2) + + try: + # pylint: disable=no-member + if LooseVersion(pkg1) < LooseVersion(pkg2): + return -1 + elif LooseVersion(pkg1) == LooseVersion(pkg2): + return 0 + elif LooseVersion(pkg1) > LooseVersion(pkg2): + return 1 + except Exception as exc: + log.exception(exc) + return None + + +def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False): + ''' + Compares two version numbers. Accepts a custom function to perform the + cmp-style version comparison, otherwise uses version_cmp(). + ''' + cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,), + '>=': (0, 1), '>': (1,)} + if oper not in ('!=',) and oper not in cmp_map: + log.error('Invalid operator \'%s\' for version comparison', oper) + return False + + if cmp_func is None: + cmp_func = version_cmp + + cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch) + if cmp_result is None: + return False + + # Check if integer/long + if not isinstance(cmp_result, numbers.Integral): + log.error('The version comparison function did not return an ' + 'integer/long.') + return False + + if oper == '!=': + return cmp_result not in cmp_map['=='] + else: + # Gracefully handle cmp_result not in (-1, 0, 1). + if cmp_result < -1: + cmp_result = -1 + elif cmp_result > 1: + cmp_result = 1 + + return cmp_result in cmp_map[oper] diff --git a/salt/utils/virtualbox.py b/salt/utils/virtualbox.py index 2798f1e6be..c055a6ff8a 100644 --- a/salt/utils/virtualbox.py +++ b/salt/utils/virtualbox.py @@ -19,7 +19,9 @@ from salt.utils.timeout import wait_for log = logging.getLogger(__name__) # Import 3rd-party libs +from salt.ext import six from salt.ext.six.moves import range + # Import virtualbox libs HAS_LIBS = False try: @@ -598,7 +600,7 @@ def vb_machinestate_to_tuple(machinestate): ''' if isinstance(machinestate, int): return MACHINE_STATES_ENUM.get(machinestate, UNKNOWN_MACHINE_STATE) - elif isinstance(machinestate, str): + elif isinstance(machinestate, six.string_types): return MACHINE_STATES.get(machinestate, UNKNOWN_MACHINE_STATE) else: return UNKNOWN_MACHINE_STATE @@ -625,9 +627,9 @@ def vb_machine_exists(name): vbox.findMachine(name) return True except Exception as e: - if isinstance(e.message, str): + if isinstance(e.message, six.string_types): message = e.message - elif hasattr(e, 'msg') and isinstance(getattr(e, 'msg'), str): + elif hasattr(e, 'msg') and isinstance(getattr(e, 'msg'), six.string_types): message = getattr(e, 'msg') else: message = '' diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 19292f2128..bfe9a14353 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -83,11 +83,13 @@ import time # Import Salt Libs import salt.exceptions import salt.modules.cmdmod -import salt.utils +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils # Import Third Party Libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611 try: from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub @@ -134,7 +136,7 @@ def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, creds :return: Dictionary ''' - esx_cmd = salt.utils.which('esxcli') + esx_cmd = salt.utils.path.which('esxcli') if not esx_cmd: log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.') return False @@ -238,12 +240,12 @@ def _get_service_instance(host, username, password, protocol, pwd=password, protocol=protocol, port=port, - sslContext=ssl._create_unverified_context(), + sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(), b64token=token, mechanism=mechanism) else: + log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else default_msg - log.trace(exc) raise salt.exceptions.VMwareConnectionError(err_msg) except Exception as exc: if 'certificate verify failed' in str(exc): @@ -262,8 +264,8 @@ def _get_service_instance(host, username, password, protocol, mechanism=mechanism ) except Exception as exc: + log.exception(exc) err_msg = exc.msg if hasattr(exc, 'msg') else str(exc) - log.trace(err_msg) raise salt.exceptions.VMwareConnectionError( 'Could not connect to host \'{0}\': ' '{1}'.format(host, err_msg)) @@ -353,7 +355,7 @@ def get_service_instance(host, username=None, password=None, protocol=None, service_instance = GetSi() if service_instance: stub = GetStub() - if salt.utils.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, str(port)])): + if salt.utils.platform.is_proxy() or (hasattr(stub, 'host') and stub.host != ':'.join([host, str(port)])): # Proxies will fork and mess up the cached service instance. # If this is a proxy or we are connecting to a different host # invalidate the service instance to avoid a potential memory leak @@ -388,9 +390,16 @@ def get_service_instance(host, username=None, password=None, protocol=None, mechanism, principal, domain) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return service_instance @@ -425,9 +434,16 @@ def disconnect(service_instance): log.trace('Disconnecting') try: Disconnect(service_instance) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) @@ -441,9 +457,16 @@ def is_connection_to_a_vcenter(service_instance): ''' try: api_type = service_instance.content.about.apiType + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) log.trace('api_type = {0}'.format(api_type)) if api_type == 'VirtualCenter': @@ -552,7 +575,7 @@ def get_gssapi_token(principal, host, domain): if out_token: if six.PY2: return base64.b64encode(out_token) - return base64.b64encode(salt.utils.to_bytes(out_token)) + return base64.b64encode(salt.utils.stringutils.to_bytes(out_token)) if ctx.established: break if not in_token: @@ -648,9 +671,16 @@ def get_root_folder(service_instance): try: log.trace('Retrieving root folder') return service_instance.RetrieveContent().rootFolder + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) @@ -700,9 +730,16 @@ def get_content(service_instance, obj_type, property_list=None, try: obj_ref = service_instance.content.viewManager.CreateContainerView( container_ref, [obj_type], True) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Create 'Traverse All' traversal spec to determine the path for @@ -738,18 +775,32 @@ def get_content(service_instance, obj_type, property_list=None, # Retrieve the contents try: content = service_instance.content.propertyCollector.RetrieveContents([filter_spec]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) # Destroy the object view if local_traversal_spec: try: obj_ref.Destroy() + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return content @@ -1000,9 +1051,16 @@ def create_datacenter(service_instance, datacenter_name): log.trace('Creating datacenter \'{0}\''.format(datacenter_name)) try: dc_obj = root_folder.CreateDatacenter(datacenter_name) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return dc_obj @@ -1062,9 +1120,16 @@ def create_cluster(dc_ref, cluster_name, cluster_spec): ''.format(cluster_name, dc_name)) try: dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) @@ -1084,9 +1149,16 @@ def update_cluster(cluster_ref, cluster_spec): try: task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec, modify=True) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, cluster_name, 'ClusterUpdateTask') @@ -1291,9 +1363,16 @@ def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='de task.__class__.__name__)) try: task_info = task.info + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: @@ -1307,9 +1386,16 @@ def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='de time_counter += 1 try: task_info = task.info + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: + log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( @@ -1324,11 +1410,19 @@ def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='de # task is in an error state try: raise task_info.error + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: + log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: + log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: + log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, diff --git a/salt/utils/vt.py b/salt/utils/vt.py index 4cde141dac..b3ffb6d239 100644 --- a/salt/utils/vt.py +++ b/salt/utils/vt.py @@ -53,6 +53,7 @@ except ImportError: # Import salt libs import salt.utils +import salt.utils.stringutils from salt.ext.six import string_types from salt.log.setup import LOG_LEVELS @@ -643,7 +644,7 @@ class Terminal(object): if self.child_fde in rlist: try: stderr = self._translate_newlines( - salt.utils.to_str( + salt.utils.stringutils.to_str( os.read(self.child_fde, maxsize) ) ) @@ -676,7 +677,7 @@ class Terminal(object): if self.child_fd in rlist: try: stdout = self._translate_newlines( - salt.utils.to_str( + salt.utils.stringutils.to_str( os.read(self.child_fd, maxsize) ) ) diff --git a/salt/utils/win_dacl.py b/salt/utils/win_dacl.py index 749cb1665d..4b41429a6e 100644 --- a/salt/utils/win_dacl.py +++ b/salt/utils/win_dacl.py @@ -134,7 +134,7 @@ import logging # Import Salt libs from salt.exceptions import CommandExecutionError, SaltInvocationError from salt.ext.six.moves import range -import salt.ext.six as six +from salt.ext import six # Import 3rd-party libs HAS_WIN32 = False diff --git a/salt/utils/win_update.py b/salt/utils/win_update.py index bc0e1e7648..fac45d9276 100644 --- a/salt/utils/win_update.py +++ b/salt/utils/win_update.py @@ -9,6 +9,7 @@ import subprocess # Import Salt libs import salt.utils +import salt.utils.args from salt.ext import six from salt.ext.six.moves import range from salt.exceptions import CommandExecutionError @@ -963,7 +964,7 @@ class WindowsUpdateAgent(object): ''' if isinstance(cmd, six.string_types): - cmd = salt.utils.shlex_split(cmd) + cmd = salt.utils.args.shlex_split(cmd) try: log.debug(cmd) diff --git a/salt/utils/yamldumper.py b/salt/utils/yamldumper.py index 6205cb6803..514aae9613 100644 --- a/salt/utils/yamldumper.py +++ b/salt/utils/yamldumper.py @@ -17,6 +17,7 @@ except ImportError: import yaml import collections + from salt.utils.odict import OrderedDict try: @@ -27,6 +28,17 @@ except ImportError: HAS_IOFLO = False +class IndentMixin(Dumper): + ''' + Mixin that improves YAML dumped list readability + by indenting them by two spaces, + instead of being flush with the key they are under. + ''' + + def increase_indent(self, flow=False, indentless=False): + return super(IndentMixin, self).increase_indent(flow, False) + + class OrderedDumper(Dumper): ''' A YAML dumper that represents python OrderedDict as simple YAML map. @@ -39,6 +51,14 @@ class SafeOrderedDumper(SafeDumper): ''' +class IndentedSafeOrderedDumper(IndentMixin, SafeOrderedDumper): + ''' + A YAML safe dumper that represents python OrderedDict as simple YAML map, + and also indents lists by two spaces. + ''' + pass + + def represent_ordereddict(dumper, data): return dumper.represent_dict(list(data.items())) @@ -60,6 +80,14 @@ if HAS_IOFLO: SafeOrderedDumper.add_representer(odict, represent_ordereddict) +def get_dumper(dumper_name): + return { + 'OrderedDumper': OrderedDumper, + 'SafeOrderedDumper': SafeOrderedDumper, + 'IndentedSafeOrderedDumper': IndentedSafeOrderedDumper, + }.get(dumper_name) + + def safe_dump(data, stream=None, **kwargs): ''' Use a custom dumper to ensure that defaultdict and OrderedDict are diff --git a/salt/utils/yamlencoding.py b/salt/utils/yamlencoding.py index 325b207722..15e35b83da 100644 --- a/salt/utils/yamlencoding.py +++ b/salt/utils/yamlencoding.py @@ -6,10 +6,10 @@ import io # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six # Import salt libs -from salt.utils.decorators import jinja_filter +from salt.utils.decorators.jinja import jinja_filter @jinja_filter() diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py index a51d1f1f25..6fe3a3fa1b 100644 --- a/salt/utils/yamlloader.py +++ b/salt/utils/yamlloader.py @@ -4,6 +4,7 @@ from __future__ import absolute_import import warnings # Import third party libs +import re import yaml from yaml.nodes import MappingNode, SequenceNode from yaml.constructor import ConstructorError @@ -36,7 +37,7 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): to make things like sls file more intuitive. ''' def __init__(self, stream, dictclass=dict): - yaml.SafeLoader.__init__(self, stream) + super(SaltYamlSafeLoader, self).__init__(stream) if dictclass is not dict: # then assume ordered dict and use it for both !map and !omap self.add_constructor( @@ -45,9 +46,9 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): self.add_constructor( u'tag:yaml.org,2002:omap', type(self).construct_yaml_map) - self.add_constructor( - u'tag:yaml.org,2002:python/unicode', - type(self).construct_unicode) + self.add_constructor( + u'tag:yaml.org,2002:python/unicode', + type(self).construct_unicode) self.dictclass = dictclass def construct_yaml_map(self, node): @@ -101,6 +102,11 @@ class SaltYamlSafeLoader(yaml.SafeLoader, object): # an empty string. Change it to '0'. if node.value == '': node.value = '0' + elif node.tag == 'tag:yaml.org,2002:str': + # If any string comes in as a quoted unicode literal, eval it into + # the proper unicode string type. + if re.match(r'^u([\'"]).+\1$', node.value, flags=re.IGNORECASE): + node.value = eval(node.value, {}, {}) # pylint: disable=W0123 return super(SaltYamlSafeLoader, self).construct_scalar(node) def flatten_mapping(self, node): diff --git a/salt/version.py b/salt/version.py index e9e46f97ff..9a32a87615 100644 --- a/salt/version.py +++ b/salt/version.py @@ -60,8 +60,9 @@ class SaltStackVersion(object): and also supports version comparison. ''' - __slots__ = ('name', 'major', 'minor', 'bugfix', 'mbugfix', 'pre_type', 'pre_num', 'noc', 'sha') + __slots__ = (u'name', u'major', u'minor', u'bugfix', u'mbugfix', u'pre_type', u'pre_num', u'noc', u'sha') + # future lint: disable=non-unicode-string git_describe_regex = re.compile( r'(?:[^\d]+)?(?P[\d]{1,4})' r'\.(?P[\d]{1,2})' @@ -70,7 +71,11 @@ class SaltStackVersion(object): r'(?:(?Prc|a|b|alpha|beta|nb)(?P[\d]{1}))?' r'(?:(?:.*)-(?P(?:[\d]+|n/a))-(?P[a-z0-9]{8}))?' ) - git_sha_regex = re.compile(r'(?P[a-z0-9]{7})') + git_sha_regex = r'(?P[a-z0-9]{7})' + if six.PY2: + git_sha_regex = git_sha_regex.decode(__salt_system_encoding__) + git_sha_regex = re.compile(git_sha_regex) + # future lint: enable=non-unicode-string # Salt versions after 0.17.0 will be numbered like: # <4-digit-year>.. @@ -88,17 +93,17 @@ class SaltStackVersion(object): # ----- Please refrain from fixing PEP-8 E203 and E265 -----> # The idea is to keep this readable. # ----------------------------------------------------------- - 'Hydrogen' : (2014, 1), - 'Helium' : (2014, 7), - 'Lithium' : (2015, 5), - 'Beryllium' : (2015, 8), - 'Boron' : (2016, 3), - 'Carbon' : (2016, 11), - 'Nitrogen' : (2017, 7), - 'Oxygen' : (MAX_SIZE - 101, 0), - 'Fluorine' : (MAX_SIZE - 100, 0), - 'Neon' : (MAX_SIZE - 99, 0), - 'Sodium' : (MAX_SIZE - 98, 0), + u'Hydrogen' : (2014, 1), + u'Helium' : (2014, 7), + u'Lithium' : (2015, 5), + u'Beryllium' : (2015, 8), + u'Boron' : (2016, 3), + u'Carbon' : (2016, 11), + u'Nitrogen' : (2017, 7), + u'Oxygen' : (MAX_SIZE - 101, 0), + u'Fluorine' : (MAX_SIZE - 100, 0), + u'Neon' : (MAX_SIZE - 99, 0), + u'Sodium' : (MAX_SIZE - 98, 0), # pylint: disable=E8265 #'Sodium' : (MAX_SIZE - 98, 0), #'Magnesium' : (MAX_SIZE - 97, 0), @@ -234,7 +239,7 @@ class SaltStackVersion(object): mbugfix = int(mbugfix) if pre_type is None: - pre_type = '' + pre_type = u'' if pre_num is None: pre_num = 0 elif isinstance(pre_num, string_types): @@ -242,7 +247,7 @@ class SaltStackVersion(object): if noc is None: noc = 0 - elif isinstance(noc, string_types) and noc == 'n/a': + elif isinstance(noc, string_types) and noc == u'n/a': noc = -1 elif isinstance(noc, string_types): noc = int(noc) @@ -265,7 +270,7 @@ class SaltStackVersion(object): match = cls.git_describe_regex.match(vstr) if not match: raise ValueError( - 'Unable to parse version string: \'{0}\''.format(version_string) + u'Unable to parse version string: \'{0}\''.format(version_string) ) return cls(*match.groups()) @@ -273,7 +278,7 @@ class SaltStackVersion(object): def from_name(cls, name): if name.lower() not in cls.LNAMES: raise ValueError( - 'Named version \'{0}\' is not known'.format(name) + u'Named version \'{0}\' is not known'.format(name) ) return cls(*cls.LNAMES[name.lower()]) @@ -349,20 +354,20 @@ class SaltStackVersion(object): @property def string(self): - version_string = '{0}.{1}.{2}'.format( + version_string = u'{0}.{1}.{2}'.format( self.major, self.minor, self.bugfix ) if self.mbugfix: - version_string += '.{0}'.format(self.mbugfix) + version_string += u'.{0}'.format(self.mbugfix) if self.pre_type: - version_string += '{0}{1}'.format(self.pre_type, self.pre_num) + version_string += u'{0}{1}'.format(self.pre_type, self.pre_num) if self.noc and self.sha: noc = self.noc if noc < 0: - noc = 'n/a' - version_string += '-{0}-{1}'.format(noc, self.sha) + noc = u'n/a' + version_string += u'-{0}-{1}'.format(noc, self.sha) return version_string @property @@ -370,14 +375,14 @@ class SaltStackVersion(object): if self.name and self.major > 10000: version_string = self.name if self.sse: - version_string += ' Enterprise' - version_string += ' (Unreleased)' + version_string += u' Enterprise' + version_string += u' (Unreleased)' return version_string version_string = self.string if self.sse: - version_string += ' Enterprise' + version_string += u' Enterprise' if (self.major, self.minor) in self.RMATCH: - version_string += ' ({0})'.format(self.RMATCH[(self.major, self.minor)]) + version_string += u' ({0})'.format(self.RMATCH[(self.major, self.minor)]) return version_string def __str__(self): @@ -391,7 +396,7 @@ class SaltStackVersion(object): other = SaltStackVersion(*other) else: raise ValueError( - 'Cannot instantiate Version from type \'{0}\''.format( + u'Cannot instantiate Version from type \'{0}\''.format( type(other) ) ) @@ -403,13 +408,13 @@ class SaltStackVersion(object): if self.pre_type and not other.pre_type: # We have pre-release information, the other side doesn't other_noc_info = list(other.noc_info) - other_noc_info[4] = 'zzzzz' + other_noc_info[4] = u'zzzzz' return method(self.noc_info, tuple(other_noc_info)) if not self.pre_type and other.pre_type: # The other side has pre-release informatio, we don't noc_info = list(self.noc_info) - noc_info[4] = 'zzzzz' + noc_info[4] = u'zzzzz' return method(tuple(noc_info), other.noc_info) def __lt__(self, other): @@ -433,25 +438,25 @@ class SaltStackVersion(object): def __repr__(self): parts = [] if self.name: - parts.append('name=\'{0}\''.format(self.name)) + parts.append(u'name=\'{0}\''.format(self.name)) parts.extend([ - 'major={0}'.format(self.major), - 'minor={0}'.format(self.minor), - 'bugfix={0}'.format(self.bugfix) + u'major={0}'.format(self.major), + u'minor={0}'.format(self.minor), + u'bugfix={0}'.format(self.bugfix) ]) if self.mbugfix: - parts.append('minor-bugfix={0}'.format(self.mbugfix)) + parts.append(u'minor-bugfix={0}'.format(self.mbugfix)) if self.pre_type: - parts.append('{0}={1}'.format(self.pre_type, self.pre_num)) + parts.append(u'{0}={1}'.format(self.pre_type, self.pre_num)) noc = self.noc if noc == -1: - noc = 'n/a' + noc = u'n/a' if noc and self.sha: parts.extend([ - 'noc={0}'.format(noc), - 'sha={0}'.format(self.sha) + u'noc={0}'.format(noc), + u'sha={0}'.format(self.sha) ]) - return '<{0} {1}>'.format(self.__class__.__name__, ' '.join(parts)) + return u'<{0} {1}>'.format(self.__class__.__name__, u' '.join(parts)) # ----- Hardcoded Salt Codename Version Information -----------------------------------------------------------------> @@ -469,15 +474,15 @@ def __discover_version(saltstack_version): import os import subprocess - if 'SETUP_DIRNAME' in globals(): + if u'SETUP_DIRNAME' in globals(): # This is from the exec() call in Salt's setup.py cwd = SETUP_DIRNAME # pylint: disable=E0602 - if not os.path.exists(os.path.join(cwd, '.git')): + if not os.path.exists(os.path.join(cwd, u'.git')): # This is not a Salt git checkout!!! Don't even try to parse... return saltstack_version else: cwd = os.path.abspath(os.path.dirname(__file__)) - if not os.path.exists(os.path.join(os.path.dirname(cwd), '.git')): + if not os.path.exists(os.path.join(os.path.dirname(cwd), u'.git')): # This is not a Salt git checkout!!! Don't even try to parse... return saltstack_version @@ -488,12 +493,12 @@ def __discover_version(saltstack_version): cwd=cwd ) - if not sys.platform.startswith('win'): + if not sys.platform.startswith(u'win'): # Let's not import `salt.utils` for the above check - kwargs['close_fds'] = True + kwargs[u'close_fds'] = True process = subprocess.Popen( - ['git', 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*', '--always'], **kwargs) + [u'git', u'describe', u'--tags', u'--first-parent', u'--match', u'v[0-9]*', u'--always'], **kwargs) out, err = process.communicate() @@ -501,7 +506,7 @@ def __discover_version(saltstack_version): # The git version running this might not support --first-parent # Revert to old command process = subprocess.Popen( - ['git', 'describe', '--tags', '--match', 'v[0-9]*', '--always'], **kwargs) + [u'git', u'describe', u'--tags', u'--match', u'v[0-9]*', u'--always'], **kwargs) out, err = process.communicate() out = out.strip() err = err.strip() @@ -558,7 +563,7 @@ def salt_information(): ''' Report version of salt. ''' - yield 'Salt', __version__ + yield u'Salt', __version__ def dependency_information(include_salt_cloud=False): @@ -566,39 +571,39 @@ def dependency_information(include_salt_cloud=False): Report versions of library dependencies. ''' libs = [ - ('Python', None, sys.version.rsplit('\n')[0].strip()), - ('Jinja2', 'jinja2', '__version__'), - ('M2Crypto', 'M2Crypto', 'version'), - ('msgpack-python', 'msgpack', 'version'), - ('msgpack-pure', 'msgpack_pure', 'version'), - ('pycrypto', 'Crypto', '__version__'), - ('pycryptodome', 'Cryptodome', 'version_info'), - ('libnacl', 'libnacl', '__version__'), - ('PyYAML', 'yaml', '__version__'), - ('ioflo', 'ioflo', '__version__'), - ('PyZMQ', 'zmq', '__version__'), - ('RAET', 'raet', '__version__'), - ('ZMQ', 'zmq', 'zmq_version'), - ('Mako', 'mako', '__version__'), - ('Tornado', 'tornado', 'version'), - ('timelib', 'timelib', 'version'), - ('dateutil', 'dateutil', '__version__'), - ('pygit2', 'pygit2', '__version__'), - ('libgit2', 'pygit2', 'LIBGIT2_VERSION'), - ('smmap', 'smmap', '__version__'), - ('cffi', 'cffi', '__version__'), - ('pycparser', 'pycparser', '__version__'), - ('gitdb', 'gitdb', '__version__'), - ('gitpython', 'git', '__version__'), - ('python-gnupg', 'gnupg', '__version__'), - ('mysql-python', 'MySQLdb', '__version__'), - ('cherrypy', 'cherrypy', '__version__'), - ('docker-py', 'docker', '__version__'), + (u'Python', None, sys.version.rsplit(u'\n')[0].strip()), + (u'Jinja2', u'jinja2', u'__version__'), + (u'M2Crypto', u'M2Crypto', u'version'), + (u'msgpack-python', u'msgpack', u'version'), + (u'msgpack-pure', u'msgpack_pure', u'version'), + (u'pycrypto', u'Crypto', u'__version__'), + (u'pycryptodome', u'Cryptodome', u'version_info'), + (u'libnacl', u'libnacl', u'__version__'), + (u'PyYAML', u'yaml', u'__version__'), + (u'ioflo', u'ioflo', u'__version__'), + (u'PyZMQ', u'zmq', u'__version__'), + (u'RAET', u'raet', u'__version__'), + (u'ZMQ', u'zmq', u'zmq_version'), + (u'Mako', u'mako', u'__version__'), + (u'Tornado', u'tornado', u'version'), + (u'timelib', u'timelib', u'version'), + (u'dateutil', u'dateutil', u'__version__'), + (u'pygit2', u'pygit2', u'__version__'), + (u'libgit2', u'pygit2', u'LIBGIT2_VERSION'), + (u'smmap', u'smmap', u'__version__'), + (u'cffi', u'cffi', u'__version__'), + (u'pycparser', u'pycparser', u'__version__'), + (u'gitdb', u'gitdb', u'__version__'), + (u'gitpython', u'git', u'__version__'), + (u'python-gnupg', u'gnupg', u'__version__'), + (u'mysql-python', u'MySQLdb', u'__version__'), + (u'cherrypy', u'cherrypy', u'__version__'), + (u'docker-py', u'docker', u'__version__'), ] if include_salt_cloud: libs.append( - ('Apache Libcloud', 'libcloud', '__version__'), + (u'Apache Libcloud', u'libcloud', u'__version__'), ) for name, imp, attr in libs: @@ -611,7 +616,7 @@ def dependency_information(include_salt_cloud=False): if callable(version): version = version() if isinstance(version, (tuple, list)): - version = '.'.join(map(str, version)) + version = u'.'.join(map(str, version)) yield name, version except Exception: yield name, None @@ -630,41 +635,45 @@ def system_information(): win_ver = platform.win32_ver() if lin_ver[0]: - return ' '.join(lin_ver) + return u' '.join(lin_ver) elif mac_ver[0]: - if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]): - return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]]) + if isinstance(mac_ver[1], (tuple, list)) and u''.join(mac_ver[1]): + return u' '.join([mac_ver[0], u'.'.join(mac_ver[1]), mac_ver[2]]) else: - return ' '.join([mac_ver[0], mac_ver[2]]) + return u' '.join([mac_ver[0], mac_ver[2]]) elif win_ver[0]: - return ' '.join(win_ver) + return u' '.join(win_ver) else: - return '' + return u'' version = system_version() release = platform.release() if platform.win32_ver()[0]: import win32api # pylint: disable=3rd-party-module-not-gated - if ((sys.version_info.major == 2 and sys.version_info >= (2, 7, 12)) or - (sys.version_info.major == 3 and sys.version_info >= (3, 5, 2))): - if win32api.GetVersionEx(1)[8] > 1: - server = {'Vista': '2008Server', - '7': '2008ServerR2', - '8': '2012Server', - '8.1': '2012ServerR2', - '10': '2016Server'} - release = server.get(platform.release(), - 'UNKServer') - _, ver, sp, extra = platform.win32_ver() - version = ' '.join([release, ver, sp, extra]) + server = {u'Vista': u'2008Server', + u'7': u'2008ServerR2', + u'8': u'2012Server', + u'8.1': u'2012ServerR2', + u'10': u'2016Server'} + # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` function + # started reporting the Desktop version instead of the Server version on + # Server versions of Windows, so we need to look those up + # So, if you find a Server Platform that's a key in the server + # dictionary, then lookup the actual Server Release. + # If this is a Server Platform then `GetVersionEx` will return a number + # greater than 1. + if win32api.GetVersionEx(1)[8] > 1 and release in server: + release = server[release] + _, ver, sp, extra = platform.win32_ver() + version = ' '.join([release, ver, sp, extra]) system = [ - ('system', platform.system()), - ('dist', ' '.join(linux_distribution(full_distribution_name=False))), - ('release', release), - ('machine', platform.machine()), - ('version', version), - ('locale', locale.getpreferredencoding()), + (u'system', platform.system()), + (u'dist', u' '.join(linux_distribution(full_distribution_name=False))), + (u'release', release), + (u'machine', platform.machine()), + (u'version', version), + (u'locale', locale.getpreferredencoding()), ] for name, attr in system: @@ -680,9 +689,9 @@ def versions_information(include_salt_cloud=False): lib_info = list(dependency_information(include_salt_cloud)) sys_info = list(system_information()) - return {'Salt Version': dict(salt_info), - 'Dependency Versions': dict(lib_info), - 'System Versions': dict(sys_info)} + return {u'Salt Version': dict(salt_info), + u'Dependency Versions': dict(lib_info), + u'System Versions': dict(sys_info)} def versions_report(include_salt_cloud=False): @@ -691,21 +700,21 @@ def versions_report(include_salt_cloud=False): ''' ver_info = versions_information(include_salt_cloud) - lib_pad = max(len(name) for name in ver_info['Dependency Versions']) - sys_pad = max(len(name) for name in ver_info['System Versions']) + lib_pad = max(len(name) for name in ver_info[u'Dependency Versions']) + sys_pad = max(len(name) for name in ver_info[u'System Versions']) padding = max(lib_pad, sys_pad) + 1 - fmt = '{0:>{pad}}: {1}' + fmt = u'{0:>{pad}}: {1}' info = [] - for ver_type in ('Salt Version', 'Dependency Versions', 'System Versions'): - info.append('{0}:'.format(ver_type)) + for ver_type in (u'Salt Version', u'Dependency Versions', u'System Versions'): + info.append(u'{0}:'.format(ver_type)) # List dependencies in alphabetical, case insensitive order for name in sorted(ver_info[ver_type], key=lambda x: x.lower()): ver = fmt.format(name, - ver_info[ver_type][name] or 'Not Installed', + ver_info[ver_type][name] or u'Not Installed', pad=padding) info.append(ver) - info.append(' ') + info.append(u' ') for line in info: yield line @@ -727,10 +736,10 @@ def msi_conformant_version(): month = __saltstack_version__.minor minor = __saltstack_version__.bugfix commi = __saltstack_version__.noc - return '{0}.{1}.{2}.{3}'.format(year2, month, minor, commi) + return u'{0}.{1}.{2}.{3}'.format(year2, month, minor, commi) -if __name__ == '__main__': - if len(sys.argv) == 2 and sys.argv[1] == 'msi': +if __name__ == u'__main__': + if len(sys.argv) == 2 and sys.argv[1] == u'msi': # Building the msi requires an msi-conformant version print(msi_conformant_version()) else: diff --git a/salt/wheel/file_roots.py b/salt/wheel/file_roots.py index 4662e7eb3d..109c8ea79c 100644 --- a/salt/wheel/file_roots.py +++ b/salt/wheel/file_roots.py @@ -12,7 +12,7 @@ import salt.utils import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def find(path, saltenv='base'): diff --git a/salt/wheel/key.py b/salt/wheel/key.py index 73f9f64c70..9f1a17bfa7 100644 --- a/salt/wheel/key.py +++ b/salt/wheel/key.py @@ -36,8 +36,9 @@ import logging # Import salt libs from salt.key import get_key import salt.crypt -import salt.utils +import salt.utils # Can be removed once pem_finger is moved import salt.utils.files +import salt.utils.platform from salt.utils.sanitizers import clean @@ -361,7 +362,7 @@ def gen(id_=None, keysize=2048): # The priv key is given the Read-Only attribute. The causes `os.remove` to # fail in Windows. - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): os.chmod(priv, 128) os.remove(priv) diff --git a/salt/wheel/pillar_roots.py b/salt/wheel/pillar_roots.py index 53614088f7..9eab69344a 100644 --- a/salt/wheel/pillar_roots.py +++ b/salt/wheel/pillar_roots.py @@ -13,7 +13,7 @@ import salt.utils import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def find(path, saltenv='base'): diff --git a/scripts/salt-master b/scripts/salt-master index eeaae97c50..b4323bd532 100755 --- a/scripts/salt-master +++ b/scripts/salt-master @@ -3,12 +3,12 @@ Start the salt-master ''' +import salt.utils.platform from salt.scripts import salt_master -from salt.utils import is_windows if __name__ == '__main__': - if is_windows(): + if salt.utils.platform.is_windows(): # Since this file does not have a '.py' extension, when running on # Windows, spawning any addional processes will fail due to Python # not being able to load this 'module' in the new process. diff --git a/scripts/salt-minion b/scripts/salt-minion index e787fd8f22..74a9fa1d67 100755 --- a/scripts/salt-minion +++ b/scripts/salt-minion @@ -3,13 +3,13 @@ This script is used to kick off a salt minion daemon ''' +import salt.utils.platform from salt.scripts import salt_minion -from salt.utils import is_windows from multiprocessing import freeze_support if __name__ == '__main__': - if is_windows(): + if salt.utils.platform.is_windows(): # Since this file does not have a '.py' extension, when running on # Windows, spawning any addional processes will fail due to Python # not being able to load this 'module' in the new process. diff --git a/scripts/salt-proxy b/scripts/salt-proxy index 9d2a3b2f43..e921e6590b 100755 --- a/scripts/salt-proxy +++ b/scripts/salt-proxy @@ -5,13 +5,13 @@ This script is used to kick off a salt proxy minion daemon ''' from __future__ import absolute_import +import salt.utils.platform from salt.scripts import salt_proxy -from salt.utils import is_windows from multiprocessing import freeze_support if __name__ == '__main__': - if is_windows(): + if salt.utils.platform.is_windows(): # Since this file does not have a '.py' extension, when running on # Windows, spawning any addional processes will fail due to Python # not being able to load this 'module' in the new process. diff --git a/scripts/salt-unity b/scripts/salt-unity index c81ed60b66..8180b68a24 100644 --- a/scripts/salt-unity +++ b/scripts/salt-unity @@ -5,7 +5,7 @@ import sys # Import salt libs import salt.scripts -from salt.utils import is_windows +import salt.utils.platform def get_avail(): @@ -43,7 +43,7 @@ def redirect(): if __name__ == '__main__': - if is_windows(): + if salt.utils.platform.is_windows(): # Since this file does not have a '.py' extension, when running on # Windows, spawning any addional processes will fail due to Python # not being able to load this 'module' in the new process. diff --git a/tests/conftest.py b/tests/conftest.py index 51ba0c6c70..7591cd46df 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -46,11 +46,11 @@ from _pytest.terminal import TerminalReporter # Import 3rd-party libs import psutil -import salt.ext.six as six +from salt.ext import six # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path import salt.log.setup from salt.utils.odict import OrderedDict @@ -282,14 +282,14 @@ def pytest_runtest_setup(item): message = skip_if_binaries_missing_marker.kwargs.get('message', None) if check_all: for binary in binaries: - if salt.utils.which(binary) is None: + if salt.utils.path.which(binary) is None: pytest.skip( '{0}The "{1}" binary was not found'.format( message and '{0}. '.format(message) or '', binary ) ) - elif salt.utils.which_bin(binaries) is None: + elif salt.utils.path.which_bin(binaries) is None: pytest.skip( '{0}None of the following binaries was found: {1}'.format( message and '{0}. '.format(message) or '', diff --git a/tests/consist.py b/tests/consist.py index 6df7f15367..fea84b59f5 100644 --- a/tests/consist.py +++ b/tests/consist.py @@ -9,13 +9,13 @@ import pprint import optparse # Import Salt libs -from salt.utils import get_colors +import salt.utils # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six -colors = get_colors() +colors = salt.utils.get_colors() def parse(): diff --git a/tests/eventlisten.py b/tests/eventlisten.py index d9f9a26727..65bbeed94d 100644 --- a/tests/eventlisten.py +++ b/tests/eventlisten.py @@ -18,7 +18,7 @@ import os import salt.utils.event # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def parse(): diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 91bdb5ed90..f2651f9470 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -49,11 +49,13 @@ import salt.minion import salt.runner import salt.output import salt.version -import salt.utils +import salt.utils # Can be removed once get_colors and appendproctitle are moved import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.utils.process +import salt.utils.stringutils import salt.log.setup as salt_log_setup -from salt.ext import six from salt.utils.verify import verify_env from salt.utils.immutabletypes import freeze from salt.utils.nb_popen import NonBlockingPopen @@ -68,7 +70,7 @@ except ImportError: # Import 3rd-party libs import yaml import msgpack -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import cStringIO try: @@ -187,7 +189,7 @@ class TestDaemon(object): def __init__(self, parser): self.parser = parser self.colors = salt.utils.get_colors(self.parser.options.no_colors is False) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # There's no shell color support on windows... for key in self.colors: self.colors[key] = '' @@ -528,8 +530,8 @@ class TestDaemon(object): **self.colors ) ) - keygen = salt.utils.which('ssh-keygen') - sshd = salt.utils.which('sshd') + keygen = salt.utils.path.which('ssh-keygen') + sshd = salt.utils.path.which('sshd') if not (keygen and sshd): print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!') @@ -562,7 +564,7 @@ class TestDaemon(object): ) _, keygen_err = keygen_process.communicate() if keygen_err: - print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err))) + print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err))) sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config') shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR) auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub') @@ -605,7 +607,7 @@ class TestDaemon(object): ) _, keygen_dsa_err = keygen_process_dsa.communicate() if keygen_dsa_err: - print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err))) + print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err))) keygen_process_ecdsa = subprocess.Popen( [keygen, '-t', @@ -625,7 +627,7 @@ class TestDaemon(object): ) _, keygen_escda_err = keygen_process_ecdsa.communicate() if keygen_escda_err: - print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err))) + print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err))) keygen_process_ed25519 = subprocess.Popen( [keygen, '-t', @@ -645,7 +647,7 @@ class TestDaemon(object): ) _, keygen_ed25519_err = keygen_process_ed25519.communicate() if keygen_ed25519_err: - print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err))) + print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err))) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config: ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file)) @@ -666,7 +668,7 @@ class TestDaemon(object): ) _, sshd_err = self.sshd_process.communicate() if sshd_err: - print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err))) + print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err))) else: os.environ['SSH_DAEMON_RUNNING'] = 'True' roster_path = os.path.join(FILES, 'conf/_ssh/roster') @@ -826,7 +828,7 @@ class TestDaemon(object): for opts_dict in (master_opts, syndic_master_opts): if 'ext_pillar' not in opts_dict: opts_dict['ext_pillar'] = [] - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): opts_dict['ext_pillar'].append( {'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))}) else: diff --git a/tests/integration/cli/test_grains.py b/tests/integration/cli/test_grains.py index bebedfab48..3ef0d35217 100644 --- a/tests/integration/cli/test_grains.py +++ b/tests/integration/cli/test_grains.py @@ -64,6 +64,8 @@ class GrainsTargetingTest(ShellCase): with salt.utils.files.fopen(key_file, 'a'): pass + import logging + log = logging.getLogger(__name__) # ping disconnected minion and ensure it times out and returns with correct message try: ret = '' diff --git a/tests/integration/client/test_kwarg.py b/tests/integration/client/test_kwarg.py index bb5b369de0..ac4d69a0e7 100644 --- a/tests/integration/client/test_kwarg.py +++ b/tests/integration/client/test_kwarg.py @@ -94,3 +94,25 @@ class StdTest(ModuleCase): ret = self.client.cmd('minion', 'test.ping', full_return=True) for mid, data in ret.items(): self.assertIn('retcode', data) + + def test_cmd_arg_kwarg_parsing(self): + ret = self.client.cmd('minion', 'test.arg_clean', + arg=[ + 'foo', + 'bar=off', + 'baz={qux: 123}' + ], + kwarg={ + 'quux': 'Quux', + }) + + self.assertEqual(ret['minion'], { + 'args': ['foo'], + 'kwargs': { + 'bar': False, + 'baz': { + 'qux': 123, + }, + 'quux': 'Quux', + }, + }) diff --git a/tests/integration/cloud/helpers/virtualbox.py b/tests/integration/cloud/helpers/virtualbox.py index 2f3cc6f86d..c7eb9897ce 100644 --- a/tests/integration/cloud/helpers/virtualbox.py +++ b/tests/integration/cloud/helpers/virtualbox.py @@ -13,7 +13,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.paths import FILES # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.virtualbox # Create the cloud instance name to be used throughout the tests diff --git a/tests/integration/cloud/providers/test_oneandone.py b/tests/integration/cloud/providers/test_oneandone.py new file mode 100644 index 0000000000..8e26134f18 --- /dev/null +++ b/tests/integration/cloud/providers/test_oneandone.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Amel Ajdinovic ` +''' + +# Import Python Libs +from __future__ import absolute_import +import os + +# Import Salt Testing Libs +from tests.support.case import ShellCase +from tests.support.paths import FILES +from tests.support.unit import skipIf +from tests.support.helpers import expensiveTest, generate_random_name + +# Import Salt Libs +from salt.config import cloud_providers_config + +# Import Third-Party Libs +try: + from oneandone.client import OneAndOneService # pylint: disable=unused-import + HAS_ONEANDONE = True +except ImportError: + HAS_ONEANDONE = False + +# Create the cloud instance name to be used throughout the tests +INSTANCE_NAME = generate_random_name('CLOUD-TEST-') +PROVIDER_NAME = 'oneandone' +DRIVER_NAME = 'oneandone' + + +@skipIf(HAS_ONEANDONE is False, 'salt-cloud requires >= 1and1 1.2.0') +class OneAndOneTest(ShellCase): + ''' + Integration tests for the 1and1 cloud provider + ''' + + @expensiveTest + def setUp(self): + ''' + Sets up the test requirements + ''' + super(OneAndOneTest, self).setUp() + + # check if appropriate cloud provider and profile files are present + profile_str = 'oneandone-config' + providers = self.run_cloud('--list-providers') + if profile_str + ':' not in providers: + self.skipTest( + 'Configuration file for {0} was not found. Check {0}.conf ' + 'files in tests/integration/files/conf/cloud.*.d/ to run ' + 'these tests.'.format(PROVIDER_NAME) + ) + + # check if api_token present + config = cloud_providers_config( + os.path.join( + FILES, + 'conf', + 'cloud.providers.d', + PROVIDER_NAME + '.conf' + ) + ) + + api_token = config[profile_str][DRIVER_NAME]['api_token'] + if api_token == '': + self.skipTest( + 'api_token must be provided to ' + 'run these tests. Check ' + 'tests/integration/files/conf/cloud.providers.d/{0}.conf' + .format(PROVIDER_NAME) + ) + + def test_list_images(self): + ''' + Tests the return of running the --list-images command for 1and1 + ''' + image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) + self.assertIn( + 'coreOSimage', + [i.strip() for i in image_list] + ) + + def test_instance(self): + ''' + Test creating an instance on 1and1 + ''' + # check if instance with salt installed returned + try: + self.assertIn( + INSTANCE_NAME, + [i.strip() for i in self.run_cloud( + '-p oneandone-test {0}'.format(INSTANCE_NAME), timeout=500 + )] + ) + except AssertionError: + self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) + raise + + # delete the instance + try: + self.assertIn( + INSTANCE_NAME + ':', + [i.strip() for i in self.run_cloud( + '-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500 + )] + ) + except AssertionError: + raise + + def tearDown(self): + ''' + Clean up after tests + ''' + query = self.run_cloud('--query') + ret = ' {0}:'.format(INSTANCE_NAME) + + # if test instance is still present, delete it + if ret in query: + self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME), timeout=500) diff --git a/tests/integration/cloud/providers/test_virtualbox.py b/tests/integration/cloud/providers/test_virtualbox.py index 2d31b45bd0..73325014b6 100644 --- a/tests/integration/cloud/providers/test_virtualbox.py +++ b/tests/integration/cloud/providers/test_virtualbox.py @@ -22,7 +22,7 @@ from tests.integration.cloud.helpers.virtualbox import (VirtualboxTestCase, DEPLOY_PROFILE_NAME) # Import Salt Libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range from salt.config import cloud_providers_config, vm_profiles_config from salt.utils.virtualbox import (vb_xpcom_to_attribute_dict, diff --git a/tests/integration/files/conf/_ssh/sshd_config b/tests/integration/files/conf/_ssh/sshd_config index 3c6cb93f62..36247d7754 100644 --- a/tests/integration/files/conf/_ssh/sshd_config +++ b/tests/integration/files/conf/_ssh/sshd_config @@ -4,7 +4,7 @@ Port 2827 ListenAddress 127.0.0.1 Protocol 2 -UsePrivilegeSeparation yes +#UsePrivilegeSeparation yes # Turn strict modes off so that we can operate in /tmp StrictModes no diff --git a/tests/integration/files/conf/cloud.profiles.d/oneandone.conf b/tests/integration/files/conf/cloud.profiles.d/oneandone.conf new file mode 100644 index 0000000000..e2cf63129d --- /dev/null +++ b/tests/integration/files/conf/cloud.profiles.d/oneandone.conf @@ -0,0 +1,15 @@ +oneandone-test: + provider: oneandone-config + description: Testing salt-cloud create operation + vcore: 2 + cores_per_processor: 1 + ram: 2 + password: P4$$w0rD + appliance_id: 8E3BAA98E3DFD37857810E0288DD8FBA + hdds: + - + is_main: true + size: 20 + - + is_main: false + size: 20 diff --git a/tests/integration/files/conf/cloud.providers.d/oneandone.conf b/tests/integration/files/conf/cloud.providers.d/oneandone.conf new file mode 100644 index 0000000000..bc4b5f48df --- /dev/null +++ b/tests/integration/files/conf/cloud.providers.d/oneandone.conf @@ -0,0 +1,5 @@ +oneandone-config: + driver: oneandone + api_token: '' + ssh_private_key: ~/.ssh/id_rsa + ssh_public_key: ~/.ssh/id_rsa.pub diff --git a/tests/integration/files/file/base/_grains/matcher_grain.py b/tests/integration/files/file/base/_grains/matcher_grain.py index 95db7fd9ef..590ae41d16 100644 --- a/tests/integration/files/file/base/_grains/matcher_grain.py +++ b/tests/integration/files/file/base/_grains/matcher_grain.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +# -*- coding: utf-8 -*- + def myfunction(): grains = {} grains['match'] = 'maker' diff --git a/tests/integration/files/file/base/_grains/test_custom_grain1.py b/tests/integration/files/file/base/_grains/test_custom_grain1.py index 983ca95ee3..99d0d41976 100644 --- a/tests/integration/files/file/base/_grains/test_custom_grain1.py +++ b/tests/integration/files/file/base/_grains/test_custom_grain1.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +# -*- coding: utf-8 -*- + def myfunction(): grains = {} grains['a_custom'] = {'k1': 'v1'} diff --git a/tests/integration/files/file/base/_grains/test_custom_grain2.py b/tests/integration/files/file/base/_grains/test_custom_grain2.py index 239b8f3cb7..5783a5f130 100644 --- a/tests/integration/files/file/base/_grains/test_custom_grain2.py +++ b/tests/integration/files/file/base/_grains/test_custom_grain2.py @@ -1,4 +1,5 @@ -#!/usr/bin/env python +# -*- coding: utf-8 -*- + def myfunction(): grains = {} grains['a_custom'] = {'k2': 'v2'} diff --git a/tests/integration/files/file/base/_modules/runtests_decorators.py b/tests/integration/files/file/base/_modules/runtests_decorators.py index b43168c999..512d0b7014 100644 --- a/tests/integration/files/file/base/_modules/runtests_decorators.py +++ b/tests/integration/files/file/base/_modules/runtests_decorators.py @@ -9,6 +9,11 @@ import salt.utils.decorators def _fallbackfunc(): + ''' + CLI Example: + + .. code-block:: bash + ''' return False, 'fallback' @@ -33,11 +38,21 @@ def booldependsTrue(): @salt.utils.decorators.depends(False) def booldependsFalse(): + ''' + CLI Example: + + .. code-block:: bash + ''' return True @salt.utils.decorators.depends('time') def depends(): + ''' + CLI Example: + + .. code-block:: bash + ''' ret = {'ret': True, 'time': time.time()} return ret @@ -45,6 +60,11 @@ def depends(): @salt.utils.decorators.depends('time123') def missing_depends(): + ''' + CLI Example: + + .. code-block:: bash + ''' return True @@ -62,6 +82,11 @@ def depends_will_not_fallback(): @salt.utils.decorators.depends('time123', fallback_function=_fallbackfunc) def missing_depends_will_fallback(): + ''' + CLI Example: + + .. code-block:: bash + ''' ret = {'ret': True, 'time': time.time()} return ret diff --git a/tests/integration/files/file/base/_modules/runtests_helpers.py b/tests/integration/files/file/base/_modules/runtests_helpers.py index e9fbaa4077..2d64b1607b 100644 --- a/tests/integration/files/file/base/_modules/runtests_helpers.py +++ b/tests/integration/files/file/base/_modules/runtests_helpers.py @@ -16,16 +16,16 @@ import fnmatch import tempfile # Import salt libs -import salt.utils +import salt.utils.platform # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six SYS_TMP_DIR = os.path.realpath( # Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long # for unix sockets: ``error: AF_UNIX path too long`` # Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR} - os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.is_darwin() else '/tmp' + os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.platform.is_darwin() else '/tmp' ) # This tempdir path is defined on tests.integration.__init__ TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir') @@ -76,6 +76,7 @@ def get_invalid_docs(): 'state.apply', 'status.list2cmdline', 'swift.head', + 'test.rand_str', 'travisci.parse_qs', 'vsphere.clean_kwargs', 'vsphere.disconnect', diff --git a/tests/integration/files/file/base/jinja_salt_contains_function.sls b/tests/integration/files/file/base/jinja_salt_contains_function.sls new file mode 100644 index 0000000000..24978d1799 --- /dev/null +++ b/tests/integration/files/file/base/jinja_salt_contains_function.sls @@ -0,0 +1,10 @@ +{% set salt_foo_bar_exist = 'foo.bar' in salt %} +{% set salt_test_ping_exist = 'test.ping' in salt %} + +test-ping-exist: + test.succeed_without_changes: + - name: salt_test_ping_exist_{{ salt_test_ping_exist }} + +foo-bar-not-exist: + test.succeed_without_changes: + - name: salt_foo_bar_exist_{{ salt_foo_bar_exist }} diff --git a/tests/integration/files/file/base/script.py b/tests/integration/files/file/base/script.py index dc347ff1ff..ea1c887150 100644 --- a/tests/integration/files/file/base/script.py +++ b/tests/integration/files/file/base/script.py @@ -1,4 +1,7 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import import sys print(' '.join(sys.argv[1:])) diff --git a/tests/integration/files/log_handlers/runtests_log_handler.py b/tests/integration/files/log_handlers/runtests_log_handler.py index 848215b3bd..82e1509f72 100644 --- a/tests/integration/files/log_handlers/runtests_log_handler.py +++ b/tests/integration/files/log_handlers/runtests_log_handler.py @@ -23,7 +23,7 @@ from multiprocessing import Queue import msgpack # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.log.setup log = logging.getLogger(__name__) diff --git a/tests/integration/grains/test_core.py b/tests/integration/grains/test_core.py index a219b50f3d..25ac137096 100644 --- a/tests/integration/grains/test_core.py +++ b/tests/integration/grains/test_core.py @@ -10,9 +10,9 @@ from __future__ import absolute_import from tests.support.case import ModuleCase from tests.support.unit import skipIf -# Import salt libs -import salt.utils -if salt.utils.is_windows(): +# Import Salt libs +import salt.utils.platform +if salt.utils.platform.is_windows(): try: import salt.modules.reg except ImportError: @@ -23,7 +23,7 @@ class TestGrainsCore(ModuleCase): ''' Test the core grains grains ''' - @skipIf(not salt.utils.is_windows(), 'Only run on Windows') + @skipIf(not salt.utils.platform.is_windows(), 'Only run on Windows') def test_win_cpu_model(self): ''' test grains['cpu_model'] diff --git a/tests/integration/minion/test_pillar.py b/tests/integration/minion/test_pillar.py index c0860f5337..a2fa587914 100644 --- a/tests/integration/minion/test_pillar.py +++ b/tests/integration/minion/test_pillar.py @@ -21,11 +21,11 @@ from tests.support.helpers import requires_system_grains # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path import salt.pillar as pillar log = logging.getLogger(__name__) @@ -191,7 +191,7 @@ GPG_PILLAR_DECRYPTED = { } -@skipIf(not salt.utils.which('gpg'), 'GPG is not installed') +@skipIf(not salt.utils.path.which('gpg'), 'GPG is not installed') class DecryptGPGPillarTest(ModuleCase): ''' Tests for pillar decryption diff --git a/tests/integration/modules/test_archive.py b/tests/integration/modules/test_archive.py index d4d47df58a..8898c0ddb2 100644 --- a/tests/integration/modules/test_archive.py +++ b/tests/integration/modules/test_archive.py @@ -15,8 +15,8 @@ from tests.support.paths import TMP from tests.support.helpers import destructiveTest # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path # Import 3rd party libs try: @@ -67,16 +67,16 @@ class ArchiveTest(ModuleCase): Given a Gödel numbering $φ$ of the computable functions and a Blum complexity measure $Φ$ where a complexity class for a boundary function $f$ is defined as - + $\mathrm C(f) := \{φ_i ∈ \mathbb R^{(1)} | (∀^∞ x) Φ_i(x) ≤ f(x)\}$. - + Then there exists a total computable function $f$ so that for all $i$ - + $\mathrm{Dom}(φ_i) = \mathrm{Dom}(φ_{f(i)})$ - + and - + $\mathrm C(φ_i) ⊊ \mathrm{C}(φ_{f(i)})$. ''')) @@ -119,7 +119,7 @@ class ArchiveTest(ModuleCase): self.assertTrue(dir_in_ret) self.assertTrue(file_in_ret) - @skipIf(not salt.utils.which('tar'), 'Cannot find tar executable') + @skipIf(not salt.utils.path.which('tar'), 'Cannot find tar executable') def test_tar_pack(self): ''' Validate using the tar function to create archives @@ -133,7 +133,7 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('tar'), 'Cannot find tar executable') + @skipIf(not salt.utils.path.which('tar'), 'Cannot find tar executable') def test_tar_unpack(self): ''' Validate using the tar function to extract archives @@ -148,7 +148,7 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('gzip'), 'Cannot find gzip executable') + @skipIf(not salt.utils.path.which('gzip'), 'Cannot find gzip executable') def test_gzip(self): ''' Validate using the gzip function @@ -162,8 +162,8 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('gzip'), 'Cannot find gzip executable') - @skipIf(not salt.utils.which('gunzip'), 'Cannot find gunzip executable') + @skipIf(not salt.utils.path.which('gzip'), 'Cannot find gzip executable') + @skipIf(not salt.utils.path.which('gunzip'), 'Cannot find gunzip executable') def test_gunzip(self): ''' Validate using the gunzip function @@ -178,7 +178,7 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('zip'), 'Cannot find zip executable') + @skipIf(not salt.utils.path.which('zip'), 'Cannot find zip executable') def test_cmd_zip(self): ''' Validate using the cmd_zip function @@ -192,8 +192,8 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('zip'), 'Cannot find zip executable') - @skipIf(not salt.utils.which('unzip'), 'Cannot find unzip executable') + @skipIf(not salt.utils.path.which('zip'), 'Cannot find zip executable') + @skipIf(not salt.utils.path.which('unzip'), 'Cannot find unzip executable') def test_cmd_unzip(self): ''' Validate using the cmd_unzip function @@ -237,7 +237,7 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('rar'), 'Cannot find rar executable') + @skipIf(not salt.utils.path.which('rar'), 'Cannot find rar executable') def test_rar(self): ''' Validate using the rar function @@ -251,8 +251,8 @@ class ArchiveTest(ModuleCase): self._tear_down() - @skipIf(not salt.utils.which('rar'), 'Cannot find rar executable') - @skipIf(not salt.utils.which('unrar'), 'Cannot find unrar executable') + @skipIf(not salt.utils.path.which('rar'), 'Cannot find rar executable') + @skipIf(not salt.utils.path.which('unrar'), 'Cannot find unrar executable') def test_unrar(self): ''' Validate using the unrar function diff --git a/tests/integration/modules/test_beacons.py b/tests/integration/modules/test_beacons.py index 475a37ff50..7c906caeb4 100644 --- a/tests/integration/modules/test_beacons.py +++ b/tests/integration/modules/test_beacons.py @@ -35,7 +35,7 @@ class BeaconsAddDeleteTest(ModuleCase): ''' Test adding and deleting a beacon ''' - _add = self.run_function('beacons.add', ['ps', [{'apache2': 'stopped'}]]) + _add = self.run_function('beacons.add', ['ps', [{'processes': {'apache2': 'stopped'}}]]) self.assertTrue(_add['result']) # save added beacon @@ -71,7 +71,7 @@ class BeaconsTest(ModuleCase): self.__class__.beacons_config_file_path = os.path.join(self.minion_conf_d_dir, 'beacons.conf') try: # Add beacon to disable - self.run_function('beacons.add', ['ps', [{'apache2': 'stopped'}]]) + self.run_function('beacons.add', ['ps', [{'processes': {'apache2': 'stopped'}}]]) self.run_function('beacons.save') except CommandExecutionError: self.skipTest('Unable to add beacon') @@ -143,6 +143,6 @@ class BeaconsTest(ModuleCase): # list beacons ret = self.run_function('beacons.list', return_yaml=False) if 'enabled' in ret: - self.assertEqual(ret, {'ps': [{'apache2': 'stopped'}], 'enabled': True}) + self.assertEqual(ret, {'ps': [{'processes': {'apache2': 'stopped'}}], 'enabled': True}) else: - self.assertEqual(ret, {'ps': {'apache': 'stopped'}}) + self.assertEqual(ret, {'ps': [{'processes': {'apache2': 'stopped'}}]}) diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py index a1ecde08e7..8434361e08 100644 --- a/tests/integration/modules/test_cmdmod.py +++ b/tests/integration/modules/test_cmdmod.py @@ -15,13 +15,13 @@ from tests.support.helpers import ( ) # Import salt libs -import salt.utils +import salt.utils.path # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six -AVAILABLE_PYTHON_EXECUTABLE = salt.utils.which_bin([ +AVAILABLE_PYTHON_EXECUTABLE = salt.utils.path.which_bin([ 'python', 'python2', 'python2.6', diff --git a/tests/integration/modules/test_cp.py b/tests/integration/modules/test_cp.py index 9b3c2325e6..d8a5dd8a62 100644 --- a/tests/integration/modules/test_cp.py +++ b/tests/integration/modules/test_cp.py @@ -19,9 +19,13 @@ from tests.support.unit import skipIf import tests.support.paths as paths # Import salt libs -import salt.ext.six as six -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.platform +import salt.utils.stringutils + +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -89,7 +93,7 @@ class CPModuleTest(ModuleCase): with salt.utils.files.fopen(src, 'r') as fp_: data = fp_.read() if six.PY3: - data = salt.utils.to_bytes(data) + data = salt.utils.stringutils.to_bytes(data) hash_str = hashlib.md5(data).hexdigest() self.run_function( @@ -105,7 +109,7 @@ class CPModuleTest(ModuleCase): self.assertIn('KNIGHT: They\'re nervous, sire.', data) self.assertNotIn('bacon', data) if six.PY3: - data = salt.utils.to_bytes(data) + data = salt.utils.stringutils.to_bytes(data) self.assertEqual(hash_str, hashlib.md5(data).hexdigest()) def test_get_file_makedirs(self): @@ -457,7 +461,7 @@ class CPModuleTest(ModuleCase): with salt.utils.files.fopen(ret, 'r') as cp_: self.assertEqual(cp_.read(), 'foo') - @skipIf(not salt.utils.which('nginx'), 'nginx not installed') + @skipIf(not salt.utils.path.which('nginx'), 'nginx not installed') @skip_if_not_root def test_cache_remote_file(self): ''' @@ -556,7 +560,7 @@ class CPModuleTest(ModuleCase): ret = self.run_function('cp.list_minion') found = False search = 'grail/scene33' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): search = r'grail\scene33' for path in ret: if search in path: @@ -603,7 +607,7 @@ class CPModuleTest(ModuleCase): with salt.utils.files.fopen(path, 'r') as fn_: data = fn_.read() if six.PY3: - data = salt.utils.to_bytes(data) + data = salt.utils.stringutils.to_bytes(data) self.assertEqual( sha256_hash['hsum'], hashlib.sha256(data).hexdigest()) diff --git a/tests/integration/modules/test_decorators.py b/tests/integration/modules/test_decorators.py index a6f919678d..dc896c343e 100644 --- a/tests/integration/modules/test_decorators.py +++ b/tests/integration/modules/test_decorators.py @@ -22,8 +22,9 @@ class DecoratorTest(ModuleCase): self.assertTrue(isinstance(ret['time'], float)) def test_missing_depends(self): - self.assertIn( - 'is not available', + self.assertEqual( + {'runtests_decorators.missing_depends_will_fallback': '\n CLI Example:\n\n ', + 'runtests_decorators.missing_depends': "'runtests_decorators.missing_depends' is not available."}, self.run_function('runtests_decorators.missing_depends' ) ) diff --git a/tests/integration/modules/test_disk.py b/tests/integration/modules/test_disk.py index d3672d6df8..66923573bc 100644 --- a/tests/integration/modules/test_disk.py +++ b/tests/integration/modules/test_disk.py @@ -10,16 +10,16 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @destructiveTest -@skipIf(salt.utils.is_windows(), 'No mtab on Windows') -@skipIf(salt.utils.is_darwin(), 'No mtab on Darwin') +@skipIf(salt.utils.platform.is_windows(), 'No mtab on Windows') +@skipIf(salt.utils.platform.is_darwin(), 'No mtab on Darwin') class DiskModuleVirtualizationTest(ModuleCase): ''' Test to make sure we return a clean result under Docker. Refs #8976 @@ -52,7 +52,7 @@ class DiskModuleTest(ModuleCase): self.assertTrue(isinstance(ret, dict)) if not isinstance(ret, dict): return - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): for key, val in six.iteritems(ret): self.assertTrue('filesystem' in val) self.assertTrue('512-blocks' in val) @@ -70,7 +70,7 @@ class DiskModuleTest(ModuleCase): self.assertTrue('available' in val) self.assertTrue('capacity' in val) - @skipIf(salt.utils.is_windows(), 'inode info not available on Windows') + @skipIf(salt.utils.platform.is_windows(), 'inode info not available on Windows') def test_inodeusage(self): ''' disk.inodeusage diff --git a/tests/integration/modules/test_dockermod.py b/tests/integration/modules/test_dockermod.py index 64f981aae0..f3bc50fa1e 100644 --- a/tests/integration/modules/test_dockermod.py +++ b/tests/integration/modules/test_dockermod.py @@ -18,7 +18,7 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import Salt Libs -import salt.utils +import salt.utils.path # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin @@ -43,7 +43,7 @@ def with_random_name(func): @destructiveTest -@skipIf(not salt.utils.which('dockerd'), 'Docker not installed') +@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed') class DockerCallTestCase(ModuleCase, SaltReturnAssertsMixin): ''' Test docker_container states diff --git a/tests/integration/modules/test_event.py b/tests/integration/modules/test_event.py index 080b7b8e02..a7d672051b 100644 --- a/tests/integration/modules/test_event.py +++ b/tests/integration/modules/test_event.py @@ -16,7 +16,7 @@ import threading from tests.support.case import ModuleCase # Import salt libs -from salt.utils import event +import salt.utils.event as event # Import 3rd-party libs from salt.ext.six.moves.queue import Queue, Empty # pylint: disable=import-error,no-name-in-module diff --git a/tests/integration/modules/test_file.py b/tests/integration/modules/test_file.py index 2cd26b2e68..be96ee4c01 100644 --- a/tests/integration/modules/test_file.py +++ b/tests/integration/modules/test_file.py @@ -15,8 +15,8 @@ from tests.support.unit import skipIf from tests.support.paths import FILES, TMP # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.platform class FileModuleTest(ModuleCase): @@ -51,7 +51,7 @@ class FileModuleTest(ModuleCase): shutil.rmtree(self.mydir, ignore_errors=True) super(FileModuleTest, self).tearDown() - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chown(self): user = getpass.getuser() if sys.platform == 'darwin': @@ -64,14 +64,14 @@ class FileModuleTest(ModuleCase): self.assertEqual(fstat.st_uid, os.getuid()) self.assertEqual(fstat.st_gid, grp.getgrnam(group).gr_gid) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chown_no_user(self): user = 'notanyuseriknow' group = grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name ret = self.run_function('file.chown', arg=[self.myfile, user, group]) self.assertIn('not exist', ret) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chown_no_user_no_group(self): user = 'notanyuseriknow' group = 'notanygroupyoushoulduse' @@ -79,7 +79,7 @@ class FileModuleTest(ModuleCase): self.assertIn('Group does not exist', ret) self.assertIn('User does not exist', ret) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chown_no_path(self): user = getpass.getuser() if sys.platform == 'darwin': @@ -90,7 +90,7 @@ class FileModuleTest(ModuleCase): arg=['/tmp/nosuchfile', user, group]) self.assertIn('File not found', ret) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chown_noop(self): user = '' group = '' @@ -100,7 +100,7 @@ class FileModuleTest(ModuleCase): self.assertEqual(fstat.st_uid, os.getuid()) self.assertEqual(fstat.st_gid, os.getgid()) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chgrp(self): if sys.platform == 'darwin': group = 'everyone' @@ -111,7 +111,7 @@ class FileModuleTest(ModuleCase): fstat = os.stat(self.myfile) self.assertEqual(fstat.st_gid, grp.getgrnam(group).gr_gid) - @skipIf(salt.utils.is_windows(), 'No chgrp on Windows') + @skipIf(salt.utils.platform.is_windows(), 'No chgrp on Windows') def test_chgrp_failure(self): group = 'thisgroupdoesntexist' ret = self.run_function('file.chgrp', arg=[self.myfile, group]) diff --git a/tests/integration/modules/test_gem.py b/tests/integration/modules/test_gem.py index da902c9f66..b0ea463b32 100644 --- a/tests/integration/modules/test_gem.py +++ b/tests/integration/modules/test_gem.py @@ -12,15 +12,16 @@ from tests.support.unit import skipIf from tests.support.helpers import destructiveTest # Import salt libs -import salt.utils +import salt.utils.path # Import 3rd-party libs from tornado.httpclient import HTTPClient GEM = 'tidy' GEM_VER = '1.1.2' -OLD_GEM = 'thor' -OLD_VERSION = '0.17.0' +OLD_GEM = 'brass' +OLD_VERSION = '1.0.0' +NEW_VERSION = '1.2.1' GEM_LIST = [GEM, OLD_GEM] @@ -35,7 +36,7 @@ def check_status(): @destructiveTest -@skipIf(not salt.utils.which('gem'), 'Gem is not available') +@skipIf(not salt.utils.path.which('gem'), 'Gem is not available') class GemModuleTest(ModuleCase): ''' Validate gem module @@ -129,18 +130,18 @@ class GemModuleTest(ModuleCase): self.run_function('gem.install', [OLD_GEM], version=OLD_VERSION) gem_list = self.run_function('gem.list', [OLD_GEM]) - self.assertEqual({'thor': ['0.17.0']}, gem_list) + self.assertEqual({OLD_GEM: [OLD_VERSION]}, gem_list) self.run_function('gem.update', [OLD_GEM]) gem_list = self.run_function('gem.list', [OLD_GEM]) - self.assertEqual({'thor': ['0.19.4', '0.17.0']}, gem_list) + self.assertEqual({OLD_GEM: [NEW_VERSION, OLD_VERSION]}, gem_list) self.run_function('gem.uninstall', [OLD_GEM]) self.assertFalse(self.run_function('gem.list', [OLD_GEM])) - def test_udpate_system(self): + def test_update_system(self): ''' - gem.udpate_system + gem.update_system ''' ret = self.run_function('gem.update_system') self.assertTrue(ret) diff --git a/tests/integration/modules/test_git.py b/tests/integration/modules/test_git.py index cb976c1715..6fd1524218 100644 --- a/tests/integration/modules/test_git.py +++ b/tests/integration/modules/test_git.py @@ -26,12 +26,12 @@ from tests.support.paths import TMP from tests.support.helpers import skip_if_binaries_missing # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.platform from salt.utils.versions import LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -41,7 +41,7 @@ def _git_version(): git_version = subprocess.Popen( ['git', '--version'], shell=False, - close_fds=False if salt.utils.is_windows else True, + close_fds=False if salt.utils.platform.is_windows() else True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] except OSError: @@ -160,7 +160,7 @@ class GitModuleTest(ModuleCase): ) ret = self.run_function('git.add', [self.repo, newdir]) res = '\n'.join(sorted(['add \'{0}\''.format(x) for x in files_relpath])) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): res = res.replace('\\', '/') self.assertEqual(ret, res) @@ -598,7 +598,7 @@ class GitModuleTest(ModuleCase): # the full, unshortened name of the folder. Therefore you can't compare # the path returned by `tempfile.mkdtemp` and the results of `git.init` # exactly. - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): new_repo = new_repo.replace('\\', '/') # Get the name of the temp directory @@ -835,7 +835,7 @@ class GitModuleTest(ModuleCase): sorted(['rm \'' + os.path.join(entire_dir, x) + '\'' for x in self.files]) ) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): expected = expected.replace('\\', '/') self.assertEqual( self.run_function('git.rm', [self.repo, entire_dir], opts='-r'), @@ -946,7 +946,7 @@ class GitModuleTest(ModuleCase): worktree_path2 = tempfile.mkdtemp(dir=TMP) # Even though this is Windows, git commands return a unix style path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): worktree_path = worktree_path.replace('\\', '/') worktree_path2 = worktree_path2.replace('\\', '/') diff --git a/tests/integration/modules/test_locale.py b/tests/integration/modules/test_locale.py index b202d48aa7..5059c6252a 100644 --- a/tests/integration/modules/test_locale.py +++ b/tests/integration/modules/test_locale.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Import python libs +# Import Python libs from __future__ import absolute_import # Import Salt Testing libs @@ -11,8 +11,8 @@ from tests.support.helpers import ( destructiveTest, ) -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform def _find_new_locale(current_locale): @@ -21,8 +21,8 @@ def _find_new_locale(current_locale): return locale -@skipIf(salt.utils.is_windows(), 'minion is windows') -@skipIf(salt.utils.is_darwin(), 'locale method is not supported on mac') +@skipIf(salt.utils.platform.is_windows(), 'minion is windows') +@skipIf(salt.utils.platform.is_darwin(), 'locale method is not supported on mac') @requires_salt_modules('locale') class LocaleModuleTest(ModuleCase): def test_get_locale(self): diff --git a/tests/integration/modules/test_lxc.py b/tests/integration/modules/test_lxc.py index 75b6b81170..c334f2dadd 100644 --- a/tests/integration/modules/test_lxc.py +++ b/tests/integration/modules/test_lxc.py @@ -16,7 +16,7 @@ from tests.support.helpers import ( from tests.support.unit import skipIf # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(True, diff --git a/tests/integration/modules/test_mac_brew.py b/tests/integration/modules/test_mac_brew.py index 33001a84a8..942bfa46ad 100644 --- a/tests/integration/modules/test_mac_brew.py +++ b/tests/integration/modules/test_mac_brew.py @@ -12,11 +12,12 @@ from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root # Import Salt Libs -import salt.utils +import salt.utils.path +import salt.utils.platform from salt.exceptions import CommandExecutionError -# Import third party libs -import salt.ext.six as six +# Import 3rd-party libs +from salt.ext import six # Brew doesn't support local package installation - So, let's # Grab some small packages available online for brew @@ -26,8 +27,8 @@ DEL_PKG = 'acme' @destructiveTest @skip_if_not_root -@skipIf(not salt.utils.is_darwin(), 'Test only applies to macOS') -@skipIf(not salt.utils.which('brew'), 'This test requires the brew binary') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only applies to macOS') +@skipIf(not salt.utils.path.which('brew'), 'This test requires the brew binary') class BrewModuleTest(ModuleCase): ''' Integration tests for the brew module diff --git a/tests/integration/modules/test_mac_pkgutil.py b/tests/integration/modules/test_mac_pkgutil.py index 6fc1101f8c..91c4ec371c 100644 --- a/tests/integration/modules/test_mac_pkgutil.py +++ b/tests/integration/modules/test_mac_pkgutil.py @@ -3,7 +3,7 @@ integration tests for mac_pkgutil ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os @@ -12,8 +12,9 @@ from tests.support.case import ModuleCase from tests.support.paths import TMP from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform TEST_PKG_URL = 'https://distfiles.macports.org/MacPorts/MacPorts-2.3.4-10.11-ElCapitan.pkg' TEST_PKG_NAME = 'org.macports.MacPorts' @@ -30,10 +31,10 @@ class MacPkgutilModuleTest(ModuleCase): ''' Get current settings ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.skipTest('Test only available on macOS') - if not salt.utils.which('pkgutil'): + if not salt.utils.path.which('pkgutil'): self.skipTest('Test requires pkgutil binary') def tearDown(self): diff --git a/tests/integration/modules/test_mac_ports.py b/tests/integration/modules/test_mac_ports.py index ee9de7c984..362fd17642 100644 --- a/tests/integration/modules/test_mac_ports.py +++ b/tests/integration/modules/test_mac_ports.py @@ -3,15 +3,16 @@ integration tests for mac_ports ''' -# Import python libs +# Import Python libs from __future__ import absolute_import, print_function # Import Salt Testing libs from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform @skip_if_not_root @@ -25,10 +26,10 @@ class MacPortsModuleTest(ModuleCase): ''' Get current settings ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.skipTest('Test only available on macOS') - if not salt.utils.which('port'): + if not salt.utils.path.which('port'): self.skipTest('Test requires port binary') self.AGREE_INSTALLED = 'agree' in self.run_function('pkg.list_pkgs') diff --git a/tests/integration/modules/test_mac_power.py b/tests/integration/modules/test_mac_power.py index aea67bd41f..cda74d83ad 100644 --- a/tests/integration/modules/test_mac_power.py +++ b/tests/integration/modules/test_mac_power.py @@ -3,7 +3,7 @@ integration tests for mac_power ''' -# Import python libs +# Import Python libs from __future__ import absolute_import, print_function # Import Salt Testing libs @@ -11,14 +11,15 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root, flaky -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform @skip_if_not_root @flaky -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacPowerModuleTest(ModuleCase): ''' Validate the mac_power module @@ -142,8 +143,8 @@ class MacPowerModuleTest(ModuleCase): @skip_if_not_root @flaky -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacPowerModuleTestSleepOnPowerButton(ModuleCase): ''' Test power.get_sleep_on_power_button @@ -193,8 +194,8 @@ class MacPowerModuleTestSleepOnPowerButton(ModuleCase): @skip_if_not_root @flaky -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacPowerModuleTestRestartPowerFailure(ModuleCase): ''' Test power.get_restart_power_failure @@ -243,8 +244,8 @@ class MacPowerModuleTestRestartPowerFailure(ModuleCase): @skip_if_not_root @flaky -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacPowerModuleTestWakeOnNet(ModuleCase): ''' Test power.get_wake_on_network @@ -290,8 +291,8 @@ class MacPowerModuleTestWakeOnNet(ModuleCase): @skip_if_not_root @flaky -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacPowerModuleTestWakeOnModem(ModuleCase): ''' Test power.get_wake_on_modem diff --git a/tests/integration/modules/test_mac_service.py b/tests/integration/modules/test_mac_service.py index da6396bf47..8b21700297 100644 --- a/tests/integration/modules/test_mac_service.py +++ b/tests/integration/modules/test_mac_service.py @@ -3,7 +3,7 @@ integration tests for mac_service ''' -# Import python libs +# Import Python libs from __future__ import absolute_import, print_function # Import Salt Testing libs @@ -11,13 +11,14 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('launchctl'), 'Test requires launchctl binary') -@skipIf(not salt.utils.which('plutil'), 'Test requires plutil binary') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('launchctl'), 'Test requires launchctl binary') +@skipIf(not salt.utils.path.which('plutil'), 'Test requires plutil binary') @skip_if_not_root class MacServiceModuleTest(ModuleCase): ''' diff --git a/tests/integration/modules/test_mac_shadow.py b/tests/integration/modules/test_mac_shadow.py index b6bb66c956..13ed8728c8 100644 --- a/tests/integration/modules/test_mac_shadow.py +++ b/tests/integration/modules/test_mac_shadow.py @@ -3,7 +3,7 @@ integration tests for mac_system ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import datetime import random @@ -14,8 +14,9 @@ from tests.support.unit import skipIf from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform from salt.ext.six.moves import range @@ -34,9 +35,9 @@ NO_USER = __random_string() @skip_if_not_root -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('dscl'), '\'dscl\' binary not found in $PATH') -@skipIf(not salt.utils.which('pwpolicy'), '\'pwpolicy\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('dscl'), '\'dscl\' binary not found in $PATH') +@skipIf(not salt.utils.path.which('pwpolicy'), '\'pwpolicy\' binary not found in $PATH') class MacShadowModuleTest(ModuleCase): ''' Validate the mac_system module diff --git a/tests/integration/modules/test_mac_softwareupdate.py b/tests/integration/modules/test_mac_softwareupdate.py index f74ece4b22..4a00067e12 100644 --- a/tests/integration/modules/test_mac_softwareupdate.py +++ b/tests/integration/modules/test_mac_softwareupdate.py @@ -3,7 +3,7 @@ integration tests for mac_softwareupdate ''' -# Import python libs +# Import Python libs from __future__ import absolute_import # Import Salt Testing libs @@ -11,13 +11,14 @@ from tests.support.unit import skipIf from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform @skip_if_not_root -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('softwareupdate'), '\'softwareupdate\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('softwareupdate'), '\'softwareupdate\' binary not found in $PATH') class MacSoftwareUpdateModuleTest(ModuleCase): ''' Validate the mac_softwareupdate module diff --git a/tests/integration/modules/test_mac_system.py b/tests/integration/modules/test_mac_system.py index 308187c08a..ab0b0f1398 100644 --- a/tests/integration/modules/test_mac_system.py +++ b/tests/integration/modules/test_mac_system.py @@ -14,7 +14,8 @@ from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root # Import salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform from salt.ext.six.moves import range @@ -33,8 +34,8 @@ SET_SUBNET_NAME = __random_string() @skip_if_not_root -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacSystemModuleTest(ModuleCase): ''' Validate the mac_system module diff --git a/tests/integration/modules/test_mac_timezone.py b/tests/integration/modules/test_mac_timezone.py index 18e5e5f9de..bdfe0d0549 100644 --- a/tests/integration/modules/test_mac_timezone.py +++ b/tests/integration/modules/test_mac_timezone.py @@ -10,7 +10,7 @@ Time sync do the following: - Set time to 'Do not sync' ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import datetime @@ -19,13 +19,14 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform @skip_if_not_root -@skipIf(not salt.utils.is_darwin(), 'Test only available on macOS') -@skipIf(not salt.utils.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') +@skipIf(not salt.utils.platform.is_darwin(), 'Test only available on macOS') +@skipIf(not salt.utils.path.which('systemsetup'), '\'systemsetup\' binary not found in $PATH') class MacTimezoneModuleTest(ModuleCase): ''' Validate the mac_timezone module diff --git a/tests/integration/modules/test_mac_xattr.py b/tests/integration/modules/test_mac_xattr.py index ff5165b661..52c9016970 100644 --- a/tests/integration/modules/test_mac_xattr.py +++ b/tests/integration/modules/test_mac_xattr.py @@ -3,7 +3,7 @@ integration tests for mac_xattr ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os @@ -11,8 +11,9 @@ import os from tests.support.case import ModuleCase from tests.support.paths import TMP -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform TEST_FILE = os.path.join(TMP, 'xattr_test_file.txt') NO_FILE = os.path.join(TMP, 'xattr_no_file.txt') @@ -27,10 +28,10 @@ class MacXattrModuleTest(ModuleCase): ''' Create test file for testing extended attributes ''' - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.skipTest('Test only available on macOS') - if not salt.utils.which('xattr'): + if not salt.utils.path.which('xattr'): self.skipTest('Test requires xattr binary') self.run_function('file.touch', [TEST_FILE]) diff --git a/tests/integration/modules/test_mysql.py b/tests/integration/modules/test_mysql.py index 20b79da908..c6c82b7165 100644 --- a/tests/integration/modules/test_mysql.py +++ b/tests/integration/modules/test_mysql.py @@ -11,11 +11,11 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils +import salt.utils.path from salt.modules import mysql as mysqlmod # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin log = logging.getLogger(__name__) @@ -26,7 +26,7 @@ try: except Exception: NO_MYSQL = True -if not salt.utils.which('mysqladmin'): +if not salt.utils.path.which('mysqladmin'): NO_MYSQL = True diff --git a/tests/integration/modules/test_nilrt_ip.py b/tests/integration/modules/test_nilrt_ip.py index c199a0a8f8..1412cffb2d 100644 --- a/tests/integration/modules/test_nilrt_ip.py +++ b/tests/integration/modules/test_nilrt_ip.py @@ -3,7 +3,7 @@ integration tests for nilirt_ip ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import time @@ -12,12 +12,12 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform @skip_if_not_root -@skipIf(not salt.utils.is_linux(), 'These tests can only be run on linux') +@skipIf(not salt.utils.platform.is_linux(), 'These tests can only be run on linux') class Nilrt_ipModuleTest(ModuleCase): ''' Validate the nilrt_ip module diff --git a/tests/integration/modules/test_pip.py b/tests/integration/modules/test_pip.py index 362b08a1bc..dfebb96925 100644 --- a/tests/integration/modules/test_pip.py +++ b/tests/integration/modules/test_pip.py @@ -21,12 +21,12 @@ from tests.support.paths import TMP from tests.support.helpers import skip_if_not_root # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') class PipModuleTest(ModuleCase): def setUp(self): diff --git a/tests/integration/modules/test_pkg.py b/tests/integration/modules/test_pkg.py index 10d481da6d..6c90dc9eb6 100644 --- a/tests/integration/modules/test_pkg.py +++ b/tests/integration/modules/test_pkg.py @@ -14,9 +14,9 @@ from tests.support.helpers import ( ) from tests.support.unit import skipIf -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.pkg +import salt.utils.platform class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): @@ -24,7 +24,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): Validate the pkg module ''' def setUp(self): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): self.run_function('pkg.refresh_db') def test_list(self): @@ -266,7 +266,7 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin): @requires_network() @destructiveTest - @skipIf(salt.utils.is_windows(), 'pkg.upgrade not available on Windows') + @skipIf(salt.utils.platform.is_windows(), 'pkg.upgrade not available on Windows') def test_pkg_upgrade_has_pending_upgrades(self): ''' Test running a system upgrade when there are packages that need upgrading diff --git a/tests/integration/modules/test_shadow.py b/tests/integration/modules/test_shadow.py index bb8cde6261..5d8393f3a2 100644 --- a/tests/integration/modules/test_shadow.py +++ b/tests/integration/modules/test_shadow.py @@ -3,7 +3,7 @@ integration tests for shadow linux ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import random import string @@ -14,14 +14,14 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform from salt.ext.six.moves import range @skip_if_not_root -@skipIf(not salt.utils.is_linux(), 'These tests can only be run on linux') +@skipIf(not salt.utils.platform.is_linux(), 'These tests can only be run on linux') class ShadowModuleTest(ModuleCase): ''' Validate the linux shadow system module diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index 9f44badf7f..9b1d33ce3b 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Import python libs +# Import Python libs from __future__ import absolute_import import os import shutil @@ -14,13 +14,14 @@ from tests.support.unit import skipIf from tests.support.paths import TMP from tests.support.mixins import SaltReturnAssertsMixin -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.path +import salt.utils.platform from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): @@ -108,7 +109,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): self.assertTrue(isinstance(ret['__sls__'], type(None))) for state, ret in sls2.items(): - self.assertTrue(isinstance(ret['__sls__'], str)) + self.assertTrue(isinstance(ret['__sls__'], six.string_types)) def _remove_request_cache_file(self): ''' @@ -155,14 +156,14 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): ''' self._remove_request_cache_file() - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): self.run_function('state.request', mods='modules.state.requested_win') else: self.run_function('state.request', mods='modules.state.requested') ret = self.run_function('state.run_request') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run' else: key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run' @@ -208,7 +209,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): fi ''') - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): contents += os.linesep contents += textwrap.dedent('''\ @@ -218,7 +219,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): fi ''') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): new_contents = contents.splitlines() contents = os.linesep.join(new_contents) contents += os.linesep @@ -272,7 +273,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): fi ''') - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): new_contents = expected.splitlines() expected = os.linesep.join(new_contents) expected += os.linesep @@ -365,7 +366,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin): if os.path.isfile(fname): os.remove(fname) - @skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') + @skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') def test_issue_2068_template_str(self): venv_dir = os.path.join( TMP, 'issue-2068-template-str' diff --git a/tests/integration/modules/test_supervisord.py b/tests/integration/modules/test_supervisord.py index 0329baef5b..4d390558fa 100644 --- a/tests/integration/modules/test_supervisord.py +++ b/tests/integration/modules/test_supervisord.py @@ -12,16 +12,16 @@ from tests.support.unit import skipIf from tests.support.paths import TMP # Import salt libs -import salt.utils +import salt.utils.path from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(six.PY3, 'supervisor does not work under python 3') -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') -@skipIf(salt.utils.which('supervisorctl') is None, 'supervisord not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which('supervisorctl') is None, 'supervisord not installed') class SupervisordModuleTest(ModuleCase): ''' Validates the supervisorctl functions. diff --git a/tests/integration/modules/test_system.py b/tests/integration/modules/test_system.py index 173de5e230..9a3a42d983 100644 --- a/tests/integration/modules/test_system.py +++ b/tests/integration/modules/test_system.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Import python libs +# Import Python libs from __future__ import absolute_import import datetime import logging @@ -13,16 +13,17 @@ from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.path +import salt.utils.platform import salt.states.file from salt.ext.six.moves import range log = logging.getLogger(__name__) -@skipIf(not salt.utils.is_linux(), 'These tests can only be run on linux') +@skipIf(not salt.utils.platform.is_linux(), 'These tests can only be run on linux') class SystemModuleTest(ModuleCase): ''' Validate the date/time functions in the system module @@ -300,7 +301,7 @@ class SystemModuleTest(ModuleCase): ''' res = self.run_function('system.get_computer_desc') - hostname_cmd = salt.utils.which('hostnamectl') + hostname_cmd = salt.utils.path.which('hostnamectl') if hostname_cmd: desc = self.run_function('cmd.run', ["hostnamectl status --pretty"]) self.assertEqual(res, desc) diff --git a/tests/integration/modules/test_useradd.py b/tests/integration/modules/test_useradd.py index 8e8b2138e2..0791f2057e 100644 --- a/tests/integration/modules/test_useradd.py +++ b/tests/integration/modules/test_useradd.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Import python libs +# Import Python libs from __future__ import absolute_import import string import random @@ -14,15 +14,15 @@ from tests.support.helpers import ( requires_system_grains ) -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin @destructiveTest -@skipIf(not salt.utils.is_linux(), 'These tests can only be run on linux') +@skipIf(not salt.utils.platform.is_linux(), 'These tests can only be run on linux') @skip_if_not_root class UseraddModuleTestLinux(ModuleCase): @@ -108,7 +108,7 @@ class UseraddModuleTestLinux(ModuleCase): @destructiveTest -@skipIf(not salt.utils.is_windows(), 'These tests can only be run on Windows') +@skipIf(not salt.utils.platform.is_windows(), 'These tests can only be run on Windows') @skip_if_not_root class UseraddModuleTestWindows(ModuleCase): diff --git a/tests/integration/modules/test_virtualenv.py b/tests/integration/modules/test_virtualenv.py index 79541fdc09..dc9da0ce32 100644 --- a/tests/integration/modules/test_virtualenv.py +++ b/tests/integration/modules/test_virtualenv.py @@ -11,11 +11,11 @@ from tests.support.unit import skipIf from tests.support.paths import TMP # Import salt libs -import salt.utils +import salt.utils.path from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') class VirtualenvModuleTest(ModuleCase): ''' Validate the virtualenv module diff --git a/tests/integration/netapi/rest_cherrypy/test_app.py b/tests/integration/netapi/rest_cherrypy/test_app.py index af01725135..78a9071301 100644 --- a/tests/integration/netapi/rest_cherrypy/test_app.py +++ b/tests/integration/netapi/rest_cherrypy/test_app.py @@ -6,6 +6,7 @@ import json # Import salt libs import salt.utils +import salt.utils.stringutils # Import test support libs import tests.support.cherrypy_testclasses as cptc @@ -195,7 +196,7 @@ class TestArgKwarg(cptc.BaseRestCherryPyTest): 'Accept': 'application/json', } ) - resp = json.loads(salt.utils.to_str(response.body[0])) + resp = json.loads(salt.utils.stringutils.to_str(response.body[0])) self.assertEqual(resp['return'][0]['args'], [1234]) self.assertEqual(resp['return'][0]['kwargs'], {'ext_source': 'redis'}) @@ -253,6 +254,6 @@ class TestJobs(cptc.BaseRestCherryPyTest): 'X-Auth-Token': self._token(), }) - resp = json.loads(salt.utils.to_str(response.body[0])) + resp = json.loads(salt.utils.stringutils.to_str(response.body[0])) self.assertIn('test.ping', str(resp['return'])) self.assertEqual(response.status, '200 OK') diff --git a/tests/integration/netapi/rest_cherrypy/test_app_pam.py b/tests/integration/netapi/rest_cherrypy/test_app_pam.py index 5c16868d22..9fda228c08 100644 --- a/tests/integration/netapi/rest_cherrypy/test_app_pam.py +++ b/tests/integration/netapi/rest_cherrypy/test_app_pam.py @@ -3,7 +3,7 @@ Integration Tests for restcherry salt-api with pam eauth ''' -# Import python libs +# Import Python libs from __future__ import absolute_import # Import test support libs @@ -13,7 +13,7 @@ from tests.support.helpers import destructiveTest, skip_if_not_root import tests.support.cherrypy_testclasses as cptc # Import Salt Libs -import salt.utils +import salt.utils.platform # Import 3rd-party libs from salt.ext.six.moves.urllib.parse import urlencode # pylint: disable=no-name-in-module,import-error @@ -42,8 +42,13 @@ class TestAuthPAM(cptc.BaseRestCherryPyTest, ModuleCase): super(TestAuthPAM, self).setUp() try: add_user = self.run_function('user.add', [USERA], createhome=False) - add_pwd = self.run_function('shadow.set_password', - [USERA, USERA_PWD if salt.utils.is_darwin() else HASHED_USERA_PWD]) + add_pwd = self.run_function( + 'shadow.set_password', + [ + USERA, + USERA_PWD if salt.utils.platform.is_darwin() else HASHED_USERA_PWD + ] + ) self.assertTrue(add_user) self.assertTrue(add_pwd) user_list = self.run_function('user.list_users') diff --git a/tests/integration/netapi/rest_tornado/test_app.py b/tests/integration/netapi/rest_tornado/test_app.py index 1712f80d45..6597329a7c 100644 --- a/tests/integration/netapi/rest_tornado/test_app.py +++ b/tests/integration/netapi/rest_tornado/test_app.py @@ -9,6 +9,7 @@ import threading # Import Salt Libs import salt.utils +import salt.utils.stringutils from salt.netapi.rest_tornado import saltnado from salt.utils.versions import StrictVersion @@ -18,7 +19,7 @@ from tests.support.helpers import flaky from tests.support.unit import skipIf # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import zmq from zmq.eventloop.ioloop import ZMQIOLoop @@ -574,7 +575,7 @@ class TestWebhookSaltAPIHandler(_SaltnadoIntegrationTestCase): self.assertIn('headers', event['data']) self.assertEqual( event['data']['post'], - {'foo': salt.utils.to_bytes('bar')} + {'foo': salt.utils.stringutils.to_bytes('bar')} ) finally: self._future_resolved.clear() diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py index 305e7c591f..9cb9cb5331 100644 --- a/tests/integration/pillar/test_git_pillar.py +++ b/tests/integration/pillar/test_git_pillar.py @@ -85,7 +85,8 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON from tests.support.unit import skipIf # Import Salt libs -import salt.utils +import salt.utils.path +import salt.utils.platform from salt.utils.gitfs import GITPYTHON_MINVER, PYGIT2_MINVER from salt.utils.versions import LooseVersion from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES as VIRTUALENV_NAMES @@ -106,9 +107,9 @@ try: except ImportError: HAS_PYGIT2 = False -HAS_SSHD = bool(salt.utils.which('sshd')) -HAS_NGINX = bool(salt.utils.which('nginx')) -HAS_VIRTUALENV = bool(salt.utils.which_bin(VIRTUALENV_NAMES)) +HAS_SSHD = bool(salt.utils.path.which('sshd')) +HAS_NGINX = bool(salt.utils.path.which('nginx')) +HAS_VIRTUALENV = bool(salt.utils.path.which_bin(VIRTUALENV_NAMES)) def _rand_key_name(length): @@ -118,7 +119,7 @@ def _rand_key_name(length): def _windows_or_mac(): - return salt.utils.is_windows() or salt.utils.is_darwin() + return salt.utils.platform.is_windows() or salt.utils.platform.is_darwin() class GitPythonMixin(object): diff --git a/tests/integration/renderers/test_pydsl.py b/tests/integration/renderers/test_pydsl.py index f128862bd7..0d904b4ce0 100644 --- a/tests/integration/renderers/test_pydsl.py +++ b/tests/integration/renderers/test_pydsl.py @@ -9,8 +9,8 @@ import textwrap from tests.support.case import ModuleCase # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.platform class PyDSLRendererIncludeTestCase(ModuleCase): @@ -37,7 +37,7 @@ class PyDSLRendererIncludeTestCase(ModuleCase): # Windows adds `linefeed` in addition to `newline`. There's also an # unexplainable space before the `linefeed`... - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): expected = 'X1 \r\n' \ 'X2 \r\n' \ 'X3 \r\n' \ diff --git a/tests/integration/runners/test_fileserver.py b/tests/integration/runners/test_fileserver.py index 813201baf8..5f18dcbf5e 100644 --- a/tests/integration/runners/test_fileserver.py +++ b/tests/integration/runners/test_fileserver.py @@ -10,8 +10,8 @@ import contextlib from tests.support.case import ShellCase from tests.support.unit import skipIf -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform class FileserverTest(ShellCase): @@ -163,7 +163,7 @@ class FileserverTest(ShellCase): # Git doesn't handle symlinks in Windows. See the thread below: # http://stackoverflow.com/questions/5917249/git-symlinks-in-windows - @skipIf(salt.utils.is_windows(), + @skipIf(salt.utils.platform.is_windows(), 'Git for Windows does not preserve symbolic links when cloning') def test_symlink_list(self): ''' diff --git a/tests/integration/runners/test_runner_returns.py b/tests/integration/runners/test_runner_returns.py index 21b0d31c58..86820a9668 100644 --- a/tests/integration/runners/test_runner_returns.py +++ b/tests/integration/runners/test_runner_returns.py @@ -15,7 +15,7 @@ from tests.support.runtests import RUNTIME_VARS # Import salt libs import salt.payload -import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.jid @@ -61,9 +61,9 @@ class RunnerReturnsTest(ShellCase): ''' # Remove pub_kwargs data['fun_args'][1] = \ - salt.utils.clean_kwargs(**data['fun_args'][1]) + salt.utils.args.clean_kwargs(**data['fun_args'][1]) data['return']['kwargs'] = \ - salt.utils.clean_kwargs(**data['return']['kwargs']) + salt.utils.args.clean_kwargs(**data['return']['kwargs']) # Pop off the timestamp (do not provide a 2nd argument, if the stamp is # missing we want to know!) diff --git a/tests/integration/runners/test_state.py b/tests/integration/runners/test_state.py index 8beeacbf90..e14dd06251 100644 --- a/tests/integration/runners/test_state.py +++ b/tests/integration/runners/test_state.py @@ -22,6 +22,7 @@ from tests.support.paths import TMP # Import Salt Libs import salt.utils +import salt.utils.platform import salt.utils.event import salt.utils.files @@ -102,7 +103,7 @@ class StateRunnerTest(ShellCase): server_thread.join() -@skipIf(salt.utils.is_windows(), '*NIX-only test') +@skipIf(salt.utils.platform.is_windows(), '*NIX-only test') class OrchEventTest(ShellCase): ''' Tests for orchestration events diff --git a/tests/integration/shell/test_auth.py b/tests/integration/shell/test_auth.py index 3c4c7000a2..31b7aa11f4 100644 --- a/tests/integration/shell/test_auth.py +++ b/tests/integration/shell/test_auth.py @@ -4,8 +4,9 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ''' -# Import python libs +# Import Python libs from __future__ import absolute_import +import logging import pwd import grp import random @@ -14,13 +15,15 @@ import random from tests.support.case import ShellCase from tests.support.helpers import destructiveTest, skip_if_not_root -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform from salt.utils.pycrypto import gen_hash # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin +log = logging.getLogger(__name__) + def gen_password(): ''' @@ -54,7 +57,6 @@ class AuthTest(ShellCase): group = 'saltops' def setUp(self): - # This is a little wasteful but shouldn't be a problem for user in (self.userA, self.userB): try: pwd.getpwnam(user) @@ -68,6 +70,21 @@ class AuthTest(ShellCase): self.run_call('group.add {0}'.format(self.group)) self.run_call('user.chgroups {0} {1} True'.format(self.userB, self.group)) + def tearDown(self): + for user in (self.userA, self.userB): + try: + pwd.getpwnam(user) + except KeyError: + pass + else: + self.run_call('user.delete {0}'.format(user)) + try: + grp.getgrnam(self.group) + except KeyError: + pass + else: + self.run_call('group.delete {0}'.format(self.group)) + def test_pam_auth_valid_user(self): ''' test that pam auth mechanism works with a valid user @@ -77,7 +94,7 @@ class AuthTest(ShellCase): # set user password set_pw_cmd = "shadow.set_password {0} '{1}'".format( self.userA, - password if salt.utils.is_darwin() else hashed_pwd + password if salt.utils.platform.is_darwin() else hashed_pwd ) self.run_call(set_pw_cmd) @@ -85,6 +102,7 @@ class AuthTest(ShellCase): cmd = ('-a pam "*" test.ping ' '--username {0} --password {1}'.format(self.userA, password)) resp = self.run_salt(cmd) + log.debug('resp = %s', resp) self.assertTrue( 'minion:' in resp ) @@ -109,7 +127,7 @@ class AuthTest(ShellCase): # set user password set_pw_cmd = "shadow.set_password {0} '{1}'".format( self.userB, - password if salt.utils.is_darwin() else hashed_pwd + password if salt.utils.platform.is_darwin() else hashed_pwd ) self.run_call(set_pw_cmd) @@ -121,10 +139,3 @@ class AuthTest(ShellCase): self.assertTrue( 'minion:' in resp ) - - def test_zzzz_tearDown(self): - for user in (self.userA, self.userB): - if pwd.getpwnam(user): - self.run_call('user.delete {0}'.format(user)) - if grp.getgrnam(self.group): - self.run_call('group.delete {0}'.format(self.group)) diff --git a/tests/integration/shell/test_call.py b/tests/integration/shell/test_call.py index 4c1d1c24b1..a86cf1a822 100644 --- a/tests/integration/shell/test_call.py +++ b/tests/integration/shell/test_call.py @@ -30,7 +30,7 @@ from tests.integration.utils import testprogram # Import salt libs import salt.utils.files -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/tests/integration/shell/test_cp.py b/tests/integration/shell/test_cp.py index 55b0b5ccab..1e7a5978e1 100644 --- a/tests/integration/shell/test_cp.py +++ b/tests/integration/shell/test_cp.py @@ -27,7 +27,7 @@ from tests.support.mixins import ShellCaseCommonTestsMixin import salt.utils.files # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class CopyTest(ShellCase, ShellCaseCommonTestsMixin): diff --git a/tests/integration/shell/test_key.py b/tests/integration/shell/test_key.py index c12cf24bc5..005f82d452 100644 --- a/tests/integration/shell/test_key.py +++ b/tests/integration/shell/test_key.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Import python libs +# Import Python libs from __future__ import absolute_import import os import shutil @@ -14,9 +14,9 @@ from tests.support.mixins import ShellCaseCommonTestsMixin # Import 3rd-party libs import yaml -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform USERA = 'saltdev' USERA_PWD = 'saltdev' @@ -37,7 +37,7 @@ class KeyTest(ShellCase, ShellCaseCommonTestsMixin): try: add_user = self.run_call('user.add {0} createhome=False'.format(USERA)) add_pwd = self.run_call('shadow.set_password {0} \'{1}\''.format(USERA, - USERA_PWD if salt.utils.is_darwin() else HASHED_USERA_PWD)) + USERA_PWD if salt.utils.platform.is_darwin() else HASHED_USERA_PWD)) self.assertTrue(add_user) self.assertTrue(add_pwd) user_list = self.run_call('user.list_users') diff --git a/tests/integration/shell/test_minion.py b/tests/integration/shell/test_minion.py index d31bfc1a20..6f440ae683 100644 --- a/tests/integration/shell/test_minion.py +++ b/tests/integration/shell/test_minion.py @@ -29,7 +29,7 @@ from tests.support.mixins import ShellCaseCommonTestsMixin from tests.integration.utils import testprogram # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.utils.files diff --git a/tests/integration/shell/test_runner.py b/tests/integration/shell/test_runner.py index fa3458ebd1..3cdaa7cffb 100644 --- a/tests/integration/shell/test_runner.py +++ b/tests/integration/shell/test_runner.py @@ -4,7 +4,7 @@ Tests for the salt-run command ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os import shutil @@ -19,9 +19,9 @@ from tests.support.helpers import skip_if_not_root # Import 3rd-party libs import yaml -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform USERA = 'saltdev' USERA_PWD = 'saltdev' @@ -42,7 +42,7 @@ class RunTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin) try: add_user = self.run_call('user.add {0} createhome=False'.format(USERA)) add_pwd = self.run_call('shadow.set_password {0} \'{1}\''.format(USERA, - USERA_PWD if salt.utils.is_darwin() else HASHED_USERA_PWD)) + USERA_PWD if salt.utils.platform.is_darwin() else HASHED_USERA_PWD)) self.assertTrue(add_user) self.assertTrue(add_pwd) user_list = self.run_call('user.list_users') @@ -118,7 +118,7 @@ class RunTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin) with_retcode=True ) try: - self.assertIn("'doc.runner:'", ret[0]) + self.assertIn('doc.runner:', ret[0]) self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:'))) except AssertionError: if os.path.exists('/dev/log') and ret[2] != 2: diff --git a/tests/integration/states/test_archive.py b/tests/integration/states/test_archive.py index acf33860c4..d8063ee261 100644 --- a/tests/integration/states/test_archive.py +++ b/tests/integration/states/test_archive.py @@ -2,7 +2,7 @@ ''' Tests for the archive state ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import errno import logging @@ -14,17 +14,16 @@ from tests.support.helpers import skip_if_not_root, Webserver from tests.support.mixins import SaltReturnAssertsMixin from tests.support.paths import FILES -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform # Setup logging log = logging.getLogger(__name__) -if salt.utils.is_windows(): - ARCHIVE_DIR = os.path.join('c:/', 'tmp') -else: - ARCHIVE_DIR = '/tmp/archive' +ARCHIVE_DIR = os.path.join('c:/', 'tmp') \ + if salt.utils.platform.is_windows() \ + else '/tmp/archive' ARCHIVE_NAME = 'custom.tar.gz' ARCHIVE_TAR_SOURCE = 'http://localhost:{0}/{1}'.format(9999, ARCHIVE_NAME) @@ -106,7 +105,7 @@ class ArchiveTest(ModuleCase, SaltReturnAssertsMixin): test archive.extracted with user and group set to "root" ''' r_group = 'root' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): r_group = 'wheel' ret = self.run_state('archive.extracted', name=ARCHIVE_DIR, source=self.archive_tar_source, archive_format='tar', diff --git a/tests/integration/states/test_bower.py b/tests/integration/states/test_bower.py index 47ad72bb32..afbc2c4b98 100644 --- a/tests/integration/states/test_bower.py +++ b/tests/integration/states/test_bower.py @@ -13,10 +13,10 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils +import salt.utils.path -@skipIf(salt.utils.which('bower') is None, 'bower not installed') +@skipIf(salt.utils.path.which('bower') is None, 'bower not installed') class BowerStateTest(ModuleCase, SaltReturnAssertsMixin): @destructiveTest diff --git a/tests/integration/states/test_cmd.py b/tests/integration/states/test_cmd.py index e6c8f770df..ca40ae06e5 100644 --- a/tests/integration/states/test_cmd.py +++ b/tests/integration/states/test_cmd.py @@ -2,7 +2,7 @@ ''' Tests for the file state ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import errno import os @@ -14,11 +14,11 @@ from tests.support.case import ModuleCase from tests.support.paths import TMP_STATE_TREE from tests.support.mixins import SaltReturnAssertsMixin -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform -IS_WINDOWS = salt.utils.is_windows() +IS_WINDOWS = salt.utils.platform.is_windows() class CMDTest(ModuleCase, SaltReturnAssertsMixin): diff --git a/tests/integration/states/test_docker_container.py b/tests/integration/states/test_docker_container.py index 0a796239fe..9f31e9de6a 100644 --- a/tests/integration/states/test_docker_container.py +++ b/tests/integration/states/test_docker_container.py @@ -22,8 +22,8 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import Salt Libs -import salt.utils import salt.utils.files +import salt.utils.path # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin @@ -50,8 +50,8 @@ def with_random_name(func): @destructiveTest -@skipIf(not salt.utils.which('busybox'), 'Busybox not installed') -@skipIf(not salt.utils.which('dockerd'), 'Docker not installed') +@skipIf(not salt.utils.path.which('busybox'), 'Busybox not installed') +@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed') class DockerContainerTestCase(ModuleCase, SaltReturnAssertsMixin): ''' Test docker_container states diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index 643fa44605..d145efdbb1 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -4,7 +4,7 @@ Tests for the file state ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import errno import glob @@ -31,9 +31,11 @@ from tests.support.helpers import ( ) from tests.support.mixins import SaltReturnAssertsMixin -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils # Can be removed once normalize_mode is moved import salt.utils.files +import salt.utils.path +import salt.utils.platform HAS_PWD = True try: @@ -48,10 +50,10 @@ except ImportError: HAS_GRP = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin -IS_WINDOWS = salt.utils.is_windows() +IS_WINDOWS = salt.utils.platform.is_windows() STATE_DIR = os.path.join(FILES, 'file', 'base') if IS_WINDOWS: @@ -547,9 +549,9 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): Test file.managed passing a basic check_cmd kwarg. See Issue #38111. ''' r_group = 'root' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): r_group = 'wheel' - if not salt.utils.which('visudo'): + if not salt.utils.path.which('visudo'): self.fail('sudo is missing') try: ret = self.run_state( @@ -2058,66 +2060,62 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): This is more generic than just a file test. Feel free to move ''' - # Get a path to the temporary file - # 한국어 시험 (korean) - # '\xed\x95\x9c\xea\xb5\xad\xec\x96\xb4 \xec\x8b\x9c\xed\x97\x98' (utf-8) - # u'\ud55c\uad6d\uc5b4 \uc2dc\ud5d8' (unicode) - korean_1 = '한국어 시험' - korean_utf8_1 = ('\xed\x95\x9c\xea\xb5\xad\xec\x96\xb4' - ' \xec\x8b\x9c\xed\x97\x98') - korean_unicode_1 = u'\ud55c\uad6d\uc5b4 \uc2dc\ud5d8' - korean_2 = '첫 번째 행' - korean_utf8_2 = '\xec\xb2\xab \xeb\xb2\x88\xec\xa7\xb8 \xed\x96\x89' - korean_unicode_2 = u'\uccab \ubc88\uc9f8 \ud589' - korean_3 = '마지막 행' - korean_utf8_3 = '\xeb\xa7\x88\xec\xa7\x80\xeb\xa7\x89 \xed\x96\x89' - korean_unicode_3 = u'\ub9c8\uc9c0\ub9c9 \ud589' - test_file = os.path.join(TMP, - 'salt_utf8_tests/'+korean_utf8_1+'.txt' + korean_1 = u'한국어 시험' + korean_2 = u'첫 번째 행' + korean_3 = u'마지막 행' + test_file = os.path.join( + TMP, + u'salt_utf8_tests', + u'{0}.txt'.format(korean_1) ) + test_file_encoded = salt.utils.stringutils.to_str(test_file) template_path = os.path.join(TMP_STATE_TREE, 'issue-8947.sls') # create the sls template template_lines = [ - '# -*- coding: utf-8 -*-', - 'some-utf8-file-create:', - ' file.managed:', - " - name: '{0}'".format(test_file), - " - contents: {0}".format(korean_utf8_1), - ' - makedirs: True', - ' - replace: True', - ' - show_diff: True', - 'some-utf8-file-create2:', - ' file.managed:', - " - name: '{0}'".format(test_file), - ' - contents: |', - ' {0}'.format(korean_utf8_2), - ' {0}'.format(korean_utf8_1), - ' {0}'.format(korean_utf8_3), - ' - replace: True', - ' - show_diff: True', - 'some-utf8-file-exists:', - ' file.exists:', - " - name: '{0}'".format(test_file), - ' - require:', - ' - file: some-utf8-file-create2', - 'some-utf8-file-content-test:', - ' cmd.run:', - ' - name: \'cat "{0}"\''.format(test_file), - ' - require:', - ' - file: some-utf8-file-exists', - 'some-utf8-file-content-remove:', - ' cmd.run:', - ' - name: \'rm -f "{0}"\''.format(test_file), - ' - require:', - ' - cmd: some-utf8-file-content-test', - 'some-utf8-file-removed:', - ' file.missing:', - " - name: '{0}'".format(test_file), - ' - require:', - ' - cmd: some-utf8-file-content-remove', + u'# -*- coding: utf-8 -*-', + u'some-utf8-file-create:', + u' file.managed:', + u" - name: '{0}'".format(test_file), + u" - contents: {0}".format(korean_1), + u' - makedirs: True', + u' - replace: True', + u' - show_diff: True', + u'some-utf8-file-create2:', + u' file.managed:', + u" - name: '{0}'".format(test_file), + u' - contents: |', + u' {0}'.format(korean_2), + u' {0}'.format(korean_1), + u' {0}'.format(korean_3), + u' - replace: True', + u' - show_diff: True', + u'some-utf8-file-exists:', + u' file.exists:', + u" - name: '{0}'".format(test_file), + u' - require:', + u' - file: some-utf8-file-create2', + u'some-utf8-file-content-test:', + u' cmd.run:', + u' - name: \'cat "{0}"\''.format(test_file), + u' - require:', + u' - file: some-utf8-file-exists', + u'some-utf8-file-content-remove:', + u' cmd.run:', + u' - name: \'rm -f "{0}"\''.format(test_file), + u' - require:', + u' - cmd: some-utf8-file-content-test', + u'some-utf8-file-removed:', + u' file.missing:', + u" - name: '{0}'".format(test_file), + u' - require:', + u' - cmd: some-utf8-file-content-remove', ] - with salt.utils.files.fopen(template_path, 'w') as fp_: - fp_.write(os.linesep.join(template_lines)) + with salt.utils.files.fopen(template_path, 'wb') as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + os.linesep.join(template_lines) + ) + ) try: ret = self.run_function('state.sls', mods='issue-8947') if not isinstance(ret, dict): @@ -2130,54 +2128,52 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): utf_diff = '--- \n+++ \n@@ -1,1 +1,3 @@\n' else: utf_diff = '--- \n+++ \n@@ -1 +1,3 @@\n' - utf_diff += '+\xec\xb2\xab \xeb\xb2\x88\xec\xa7\xb8 \xed\x96\x89\n \xed\x95\x9c\xea\xb5\xad\xec\x96\xb4 \xec\x8b\x9c\xed\x97\x98\n+\xeb\xa7\x88\xec\xa7\x80\xeb\xa7\x89 \xed\x96\x89\n' + #utf_diff += '+\xec\xb2\xab \xeb\xb2\x88\xec\xa7\xb8 \xed\x96\x89\n \xed\x95\x9c\xea\xb5\xad\xec\x96\xb4 \xec\x8b\x9c\xed\x97\x98\n+\xeb\xa7\x88\xec\xa7\x80\xeb\xa7\x89 \xed\x96\x89\n' + utf_diff += salt.utils.stringutils.to_str( + u'+첫 번째 행\n' + u' 한국어 시험\n' + u'+마지막 행\n' + ) # using unicode.encode('utf-8') we should get the same as # an utf-8 string expected = { - ('file_|-some-utf8-file-create_|-{0}' - '_|-managed').format(test_file): { - 'name': '{0}'.format(test_file), + 'file_|-some-utf8-file-create_|-{0}_|-managed'.format(test_file_encoded): { + 'name': '{0}'.format(test_file_encoded), '__run_num__': 0, - 'comment': 'File {0} updated'.format(test_file), + 'comment': 'File {0} updated'.format(test_file_encoded), 'diff': 'New file' }, - ('file_|-some-utf8-file-create2_|-{0}' - '_|-managed').format(test_file): { - 'name': '{0}'.format(test_file), + 'file_|-some-utf8-file-create2_|-{0}_|-managed'.format(test_file_encoded): { + 'name': '{0}'.format(test_file_encoded), '__run_num__': 1, - 'comment': 'File {0} updated'.format(test_file), + 'comment': 'File {0} updated'.format(test_file_encoded), 'diff': utf_diff }, - ('file_|-some-utf8-file-exists_|-{0}' - '_|-exists').format(test_file): { - 'name': '{0}'.format(test_file), + 'file_|-some-utf8-file-exists_|-{0}_|-exists'.format(test_file_encoded): { + 'name': '{0}'.format(test_file_encoded), '__run_num__': 2, - 'comment': 'Path {0} exists'.format(test_file) + 'comment': 'Path {0} exists'.format(test_file_encoded) }, - ('cmd_|-some-utf8-file-content-test_|-cat "{0}"' - '_|-run').format(test_file): { - 'name': 'cat "{0}"'.format(test_file), + 'cmd_|-some-utf8-file-content-test_|-cat "{0}"_|-run'.format(test_file_encoded): { + 'name': 'cat "{0}"'.format(test_file_encoded), '__run_num__': 3, - 'comment': 'Command "cat "{0}"" run'.format(test_file), + 'comment': 'Command "cat "{0}"" run'.format(test_file_encoded), 'stdout': '{0}\n{1}\n{2}'.format( - korean_unicode_2.encode('utf-8'), - korean_unicode_1.encode('utf-8'), - korean_unicode_3.encode('utf-8') + salt.utils.stringutils.to_str(korean_2), + salt.utils.stringutils.to_str(korean_1), + salt.utils.stringutils.to_str(korean_3), ) }, - ('cmd_|-some-utf8-file-content-remove_|-rm -f "{0}"' - '_|-run').format(test_file): { - 'name': 'rm -f "{0}"'.format(test_file), + 'cmd_|-some-utf8-file-content-remove_|-rm -f "{0}"_|-run'.format(test_file_encoded): { + 'name': 'rm -f "{0}"'.format(test_file_encoded), '__run_num__': 4, - 'comment': 'Command "rm -f "{0}"" run'.format(test_file), + 'comment': 'Command "rm -f "{0}"" run'.format(test_file_encoded), 'stdout': '' }, - ('file_|-some-utf8-file-removed_|-{0}' - '_|-missing').format(test_file): { - 'name': '{0}'.format(test_file), + 'file_|-some-utf8-file-removed_|-{0}_|-missing'.format(test_file_encoded): { + 'name': '{0}'.format(test_file_encoded), '__run_num__': 5, - 'comment': - 'Path {0} is missing'.format(test_file), + 'comment': 'Path {0} is missing'.format(test_file_encoded), } } result = {} @@ -2197,11 +2193,12 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): self.maxDiff = None self.assertEqual(expected, result) - cat_id = ('cmd_|-some-utf8-file-content-test_|-cat "{0}"' - '_|-run').format(test_file) + cat_id = 'cmd_|-some-utf8-file-content-test_|-cat "{0}"_|-run'.format(test_file_encoded) self.assertEqual( result[cat_id]['stdout'], - korean_2 + '\n' + korean_1 + '\n' + korean_3 + salt.utils.stringutils.to_str( + korean_2 + '\n' + korean_1 + '\n' + korean_3 + ) ) finally: if os.path.isdir(test_file): @@ -2248,7 +2245,7 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): self.assertEqual(pwd.getpwuid(onestats.st_uid).pw_name, user) self.assertEqual(pwd.getpwuid(twostats.st_uid).pw_name, 'root') self.assertEqual(grp.getgrgid(onestats.st_gid).gr_name, group) - if salt.utils.which('id'): + if salt.utils.path.which('id'): root_group = self.run_function('user.primary_group', ['root']) self.assertEqual(grp.getgrgid(twostats.st_gid).gr_name, root_group) finally: diff --git a/tests/integration/states/test_git.py b/tests/integration/states/test_git.py index 121961f74e..e4601574ae 100644 --- a/tests/integration/states/test_git.py +++ b/tests/integration/states/test_git.py @@ -20,8 +20,8 @@ from tests.support.paths import TMP from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.utils.versions import LooseVersion as _LooseVersion @@ -33,7 +33,7 @@ def __check_git_version(caller, min_version, skip_msg): actual_setup = getattr(caller, 'setUp', None) def setUp(self, *args, **kwargs): - if not salt.utils.which('git'): + if not salt.utils.path.which('git'): self.skipTest('git is not installed') git_version = self.run_function('git.version') if _LooseVersion(git_version) < _LooseVersion(min_version): @@ -45,7 +45,7 @@ def __check_git_version(caller, min_version, skip_msg): @functools.wraps(caller) def wrapper(self, *args, **kwargs): - if not salt.utils.which('git'): + if not salt.utils.path.which('git'): self.skipTest('git is not installed') git_version = self.run_function('git.version') if _LooseVersion(git_version) < _LooseVersion(min_version): diff --git a/tests/integration/states/test_mysql.py b/tests/integration/states/test_mysql.py index fc62d09095..0b3cdef412 100644 --- a/tests/integration/states/test_mysql.py +++ b/tests/integration/states/test_mysql.py @@ -14,8 +14,8 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils -import salt.ext.six as six +import salt.utils.path +from salt.ext import six from salt.modules import mysql as mysqlmod log = logging.getLogger(__name__) @@ -26,7 +26,7 @@ try: except ImportError: NO_MYSQL = True -if not salt.utils.which('mysqladmin'): +if not salt.utils.path.which('mysqladmin'): NO_MYSQL = True diff --git a/tests/integration/states/test_npm.py b/tests/integration/states/test_npm.py index 6aad2a6094..f7c80042a5 100644 --- a/tests/integration/states/test_npm.py +++ b/tests/integration/states/test_npm.py @@ -14,10 +14,14 @@ from tests.support.helpers import destructiveTest, requires_network from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils +import salt.modules.cmdmod as cmd +import salt.utils.path +from salt.utils.versions import LooseVersion + +MAX_NPM_VERSION = '5.0.0' -@skipIf(salt.utils.which('npm') is None, 'npm not installed') +@skipIf(salt.utils.path.which('npm') is None, 'npm not installed') class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): @requires_network() @@ -53,6 +57,8 @@ class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('npm.installed', name=None, pkgs=['pm2', 'grunt']) self.assertSaltTrueReturn(ret) + @skipIf(salt.utils.path.which('npm') and LooseVersion(cmd.run('npm -v')) >= LooseVersion(MAX_NPM_VERSION), + 'Skip with npm >= 5.0.0 until #41770 is fixed') @destructiveTest def test_npm_cache_clean(self): ''' diff --git a/tests/integration/states/test_pip.py b/tests/integration/states/test_pip.py index 7d5ccbc53d..5cd95d4788 100644 --- a/tests/integration/states/test_pip.py +++ b/tests/integration/states/test_pip.py @@ -26,14 +26,15 @@ from tests.support.helpers import ( skip_if_not_root ) # Import salt libs -from tests.support.case import ModuleCase -import salt.utils import salt.utils.files +import salt.utils.path +import salt.utils.versions from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES from salt.exceptions import CommandExecutionError +from tests.support.case import ModuleCase # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class VirtualEnv(object): @@ -50,7 +51,7 @@ class VirtualEnv(object): shutil.rmtree(self.venv_dir) -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') class PipStateTest(ModuleCase, SaltReturnAssertsMixin): @skip_if_not_root @@ -238,7 +239,7 @@ class PipStateTest(ModuleCase, SaltReturnAssertsMixin): ) except (AssertionError, CommandExecutionError): pip_version = self.run_function('pip.version', [venv_dir]) - if salt.utils.compare_versions(ver1=pip_version, oper='>=', ver2='7.0.0'): + if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'): self.skipTest('the --mirrors arg has been deprecated and removed in pip==7.0.0') finally: if os.path.isdir(venv_dir): diff --git a/tests/integration/states/test_pkg.py b/tests/integration/states/test_pkg.py index f5e1be2bbc..27103072c7 100644 --- a/tests/integration/states/test_pkg.py +++ b/tests/integration/states/test_pkg.py @@ -3,7 +3,7 @@ ''' tests for pkg state ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import logging import os @@ -20,8 +20,9 @@ from tests.support.helpers import ( flaky ) -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.path +import salt.utils.platform # Import 3rd-party libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin @@ -100,7 +101,7 @@ def pkgmgr_avail(run_function, grains): Return True if any locks are found for path ''' # Try lsof if it's available - if salt.utils.which('lsof'): + if salt.utils.path.which('lsof'): lock = run_function('cmd.run', ['lsof {0}'.format(path)]) return True if len(lock) else False @@ -160,7 +161,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): self.run_function('pkg.refresh_db') __testcontext__['refresh'] = True - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_001_installed(self, grains=None): ''' @@ -191,7 +192,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_002_installed_with_version(self, grains=None): ''' @@ -239,7 +240,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_003_installed_multipkg(self, grains=None): ''' @@ -271,7 +272,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=None, pkgs=pkg_targets) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_004_installed_multipkg_with_version(self, grains=None): ''' @@ -320,7 +321,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=None, pkgs=pkg_targets) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_005_installed_32bit(self, grains=None): ''' @@ -357,7 +358,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_006_installed_32bit_with_version(self, grains=None): ''' @@ -405,7 +406,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_007_with_dot_in_pkgname(self, grains=None): ''' @@ -436,7 +437,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_008_epoch_in_version(self, grains=None): ''' @@ -470,7 +471,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') def test_pkg_009_latest_with_epoch(self): ''' This tests for the following issue: @@ -487,7 +488,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): refresh=False) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_salt_modules('pkg.info_installed') def test_pkg_010_latest_with_epoch_and_info_installed(self): ''' @@ -507,7 +508,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_function('pkg.info_installed', [package]) self.assertTrue(pkgquery in str(ret)) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_011_latest(self, grains=None): ''' @@ -539,7 +540,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkg_012_latest_only_upgrade(self, grains=None): ''' @@ -725,6 +726,43 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) + def test_pkg_014_installed_missing_release(self, grains=None): # pylint: disable=unused-argument + ''' + Tests that a version number missing the release portion still resolves + as correctly installed. For example, version 2.0.2 instead of 2.0.2-1.el7 + ''' + os_family = grains.get('os_family', '') + + if os_family.lower() != 'redhat': + self.skipTest('Test only runs on RedHat OS family') + + pkg_targets = _PKG_TARGETS.get(os_family, []) + + # Make sure that we have targets that match the os_family. If this + # fails then the _PKG_TARGETS dict above needs to have an entry added, + # with two packages that are not installed before these tests are run + self.assertTrue(pkg_targets) + + target = pkg_targets[0] + version = self.run_function('pkg.version', [target]) + + # If this assert fails, we need to find new targets, this test needs to + # be able to test successful installation of packages, so this package + # needs to not be installed before we run the states below + self.assertFalse(version) + + ret = self.run_state( + 'pkg.installed', + name=target, + version=salt.utils.str_version_to_evr(version)[1], + refresh=False, + ) + self.assertSaltTrueReturn(ret) + + # Clean up + ret = self.run_state('pkg.removed', name=target) + self.assertSaltTrueReturn(ret) + @requires_salt_modules('pkg.group_install') @requires_system_grains def test_group_installed_handle_missing_package_group(self, grains=None): # pylint: disable=unused-argument diff --git a/tests/integration/states/test_pkgrepo.py b/tests/integration/states/test_pkgrepo.py index eeef55140b..9c4ece89d6 100644 --- a/tests/integration/states/test_pkgrepo.py +++ b/tests/integration/states/test_pkgrepo.py @@ -15,11 +15,11 @@ from tests.support.helpers import ( requires_system_grains ) -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin): @@ -27,7 +27,7 @@ class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin): pkgrepo state tests ''' @destructiveTest - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') @requires_system_grains def test_pkgrepo_01_managed(self, grains): ''' @@ -57,7 +57,7 @@ class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin): self.assertSaltTrueReturn(dict([(state_id, state_result)])) @destructiveTest - @skipIf(salt.utils.is_windows(), 'minion is windows') + @skipIf(salt.utils.platform.is_windows(), 'minion is windows') def test_pkgrepo_02_absent(self): ''' This is a destructive test as it removes the repository added in the diff --git a/tests/integration/states/test_renderers.py b/tests/integration/states/test_renderers.py index c7e916e8e3..256aabd230 100644 --- a/tests/integration/states/test_renderers.py +++ b/tests/integration/states/test_renderers.py @@ -21,3 +21,12 @@ class TestJinjaRenderer(ModuleCase): ret = self.run_function('state.sls', ['jinja_dot_notation']) for state_ret in ret.values(): self.assertTrue(state_ret['result']) + + def test_salt_contains_function(self): + ''' + Test if we are able to check if a function exists inside the "salt" + wrapper (AliasLoader) which is available on Jinja templates. + ''' + ret = self.run_function('state.sls', ['jinja_salt_contains_function']) + for state_ret in ret.values(): + self.assertTrue(state_ret['result']) diff --git a/tests/integration/states/test_service.py b/tests/integration/states/test_service.py index ec50bad4d8..955c009f03 100644 --- a/tests/integration/states/test_service.py +++ b/tests/integration/states/test_service.py @@ -12,14 +12,14 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils +import salt.utils.path INIT_DELAY = 5 SERVICE_NAME = 'crond' @destructiveTest -@skipIf(salt.utils.which('crond') is None, 'crond not installed') +@skipIf(salt.utils.path.which('crond') is None, 'crond not installed') class ServiceTest(ModuleCase, SaltReturnAssertsMixin): ''' Validate the service state diff --git a/tests/integration/states/test_supervisord.py b/tests/integration/states/test_supervisord.py index 5593617321..c4192a627a 100644 --- a/tests/integration/states/test_supervisord.py +++ b/tests/integration/states/test_supervisord.py @@ -17,16 +17,16 @@ from tests.support.paths import TMP from tests.support.mixins import SaltReturnAssertsMixin # Import salt libs -import salt.utils +import salt.utils.path from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(six.PY3, 'supervisor does not work under python 3') -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') -@skipIf(salt.utils.which('supervisorctl') is None, 'supervisord not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which('supervisorctl') is None, 'supervisord not installed') class SupervisordTest(ModuleCase, SaltReturnAssertsMixin): ''' Validate the supervisord states. diff --git a/tests/integration/states/test_user.py b/tests/integration/states/test_user.py index 1317f12f97..9ec182f6c5 100644 --- a/tests/integration/states/test_user.py +++ b/tests/integration/states/test_user.py @@ -7,7 +7,7 @@ user present user present with custom homedir ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os import sys @@ -20,10 +20,10 @@ from tests.support.unit import skipIf from tests.support.helpers import destructiveTest, requires_system_grains, skip_if_not_root from tests.support.mixins import SaltReturnAssertsMixin -# Import salt libs -import salt.utils +# Import Salt libs +import salt.utils.platform -if salt.utils.is_darwin(): +if salt.utils.platform.is_darwin(): USER = 'macuser' GROUP = 'macuser' GID = randint(400, 500) @@ -45,7 +45,7 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): user_home = '/var/lib/salt_test' def setUp(self): - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): #on mac we need to add user, because there is #no creationtime for nobody user. add_user = self.run_function('user.add', [USER], gid=GID) @@ -84,7 +84,7 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): And then destroys that user. Assume that it will break any system you run it on. ''' - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): HOMEDIR = '/Users/home_of_' + self.user_name else: HOMEDIR = '/home/home_of_' + self.user_name @@ -104,7 +104,7 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('user.present', name=self.user_name, home=self.user_home) self.assertSaltTrueReturn(ret) - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.assertTrue(os.path.isdir(self.user_home)) @requires_system_grains @@ -128,7 +128,7 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): self.assertReturnNonEmptySaltType(ret) group_name = grp.getgrgid(ret['gid']).gr_name - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.assertTrue(os.path.isdir(self.user_home)) if grains['os_family'] in ('Suse',): self.assertEqual(group_name, 'users') @@ -153,7 +153,7 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): self.assertReturnNonEmptySaltType(ret) group_name = grp.getgrgid(ret['gid']).gr_name - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.assertTrue(os.path.isdir(self.user_home)) self.assertEqual(group_name, self.user_name) ret = self.run_state('user.absent', name=self.user_name) @@ -229,13 +229,13 @@ class UserTest(ModuleCase, SaltReturnAssertsMixin): self.assertReturnNonEmptySaltType(ret) self.assertEqual('', ret['fullname']) # MacOS does not supply the following GECOS fields - if not salt.utils.is_darwin(): + if not salt.utils.platform.is_darwin(): self.assertEqual('', ret['roomnumber']) self.assertEqual('', ret['workphone']) self.assertEqual('', ret['homephone']) def tearDown(self): - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): check_user = self.run_function('user.list_users') if USER in check_user: del_user = self.run_function('user.delete', [USER], remove=True) diff --git a/tests/integration/states/test_virtualenv.py b/tests/integration/states/test_virtualenv.py index 815abcc1c1..3d2d7aa4da 100644 --- a/tests/integration/states/test_virtualenv.py +++ b/tests/integration/states/test_virtualenv.py @@ -7,7 +7,7 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os import shutil @@ -19,13 +19,14 @@ from tests.support.helpers import destructiveTest, skip_if_not_root from tests.support.mixins import SaltReturnAssertsMixin from tests.support.runtests import RUNTIME_VARS -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.path +import salt.utils.platform from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES -@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') +@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed') class VirtualenvTest(ModuleCase, SaltReturnAssertsMixin): @destructiveTest @skip_if_not_root @@ -35,7 +36,7 @@ class VirtualenvTest(ModuleCase, SaltReturnAssertsMixin): uinfo = self.run_function('user.info', [user]) - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): # MacOS does not support createhome with user.present self.assertSaltTrueReturn(self.run_state('file.directory', name=uinfo['home'], user=user, group=uinfo['groups'][0], dir_mode=755)) diff --git a/tests/integration/states/test_zookeeper.py b/tests/integration/states/test_zookeeper.py index 262cb6fcb9..aa611c806b 100644 --- a/tests/integration/states/test_zookeeper.py +++ b/tests/integration/states/test_zookeeper.py @@ -13,7 +13,7 @@ from tests.support.helpers import destructiveTest from tests.support.mixins import SaltReturnAssertsMixin # Import Salt Libs -import salt.utils +import salt.utils.path try: import kazoo # pylint: disable=import-error,unused-import @@ -23,7 +23,7 @@ except ImportError: @destructiveTest -@skipIf(not salt.utils.which('dockerd'), 'Docker not installed') +@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed') @skipIf(not HAS_KAZOO, 'kazoo python library not installed') class ZookeeperTestCase(ModuleCase, SaltReturnAssertsMixin): ''' diff --git a/tests/integration/utils/testprogram.py b/tests/integration/utils/testprogram.py index e792e5160c..80d2b9128d 100644 --- a/tests/integration/utils/testprogram.py +++ b/tests/integration/utils/testprogram.py @@ -25,7 +25,7 @@ import salt.utils.files import salt.utils.process import salt.utils.psutil_compat as psutils import salt.defaults.exitcodes as exitcodes -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin from tests.support.unit import TestCase diff --git a/tests/integration/wheel/test_client.py b/tests/integration/wheel/test_client.py index 2fab5c72f4..7b6fe3f58e 100644 --- a/tests/integration/wheel/test_client.py +++ b/tests/integration/wheel/test_client.py @@ -10,7 +10,7 @@ from tests.support.mixins import AdaptedConfigurationTestCaseMixin # Import Salt libs import salt.auth import salt.wheel -import salt.utils +import salt.utils.platform class WheelModuleTest(TestCase, AdaptedConfigurationTestCaseMixin): @@ -81,7 +81,7 @@ class WheelModuleTest(TestCase, AdaptedConfigurationTestCaseMixin): # Remove this skipIf when Issue #39616 is resolved # https://github.com/saltstack/salt/issues/39616 - @skipIf(salt.utils.is_windows(), + @skipIf(salt.utils.platform.is_windows(), 'Causes pickling error on Windows: Issue #39616') def test_cmd_async(self): low = { diff --git a/tests/jenkins.py b/tests/jenkins.py index aea08efead..2eda9878f9 100644 --- a/tests/jenkins.py +++ b/tests/jenkins.py @@ -21,8 +21,8 @@ import subprocess import random # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.stringutils try: from salt.utils.nb_popen import NonBlockingPopen except ImportError: @@ -463,7 +463,7 @@ def run(opts): delete_vm(opts) sys.exit(retcode) - outstr = salt.utils.to_str(stdout).strip() + outstr = salt.utils.stringutils.to_str(stdout).strip() if not outstr: print('Failed to get the bootstrapped minion version(no output). Exit code: {0}'.format(retcode)) sys.stdout.flush() @@ -533,9 +533,9 @@ def run(opts): stdout, stderr = proc.communicate() if stdout: - print(salt.utils.to_str(stdout)) + print(salt.utils.stringutils.to_str(stdout)) if stderr: - print(salt.utils.to_str(stderr)) + print(salt.utils.stringutils.to_str(stderr)) sys.stdout.flush() retcode = proc.returncode @@ -573,7 +573,7 @@ def run(opts): # DO NOT print the state return here! print('Cloud configuration files provisioned via pillar.') if stderr: - print(salt.utils.to_str(stderr)) + print(salt.utils.stringutils.to_str(stderr)) sys.stdout.flush() retcode = proc.returncode @@ -609,9 +609,9 @@ def run(opts): stdout, stderr = proc.communicate() if stdout: - print(salt.utils.to_str(stdout)) + print(salt.utils.stringutils.to_str(stdout)) if stderr: - print(salt.utils.to_str(stderr)) + print(salt.utils.stringutils.to_str(stderr)) sys.stdout.flush() retcode = proc.returncode @@ -667,7 +667,7 @@ def run(opts): sys.exit(retcode) print('matches!') except ValueError: - print('Failed to load any JSON from \'{0}\''.format(salt.utils.to_str(stdout).strip())) + print('Failed to load any JSON from \'{0}\''.format(salt.utils.stringutils.to_str(stdout).strip())) if opts.test_git_commit is not None: test_git_commit_downtime = random.randint(1, opts.splay) @@ -714,7 +714,7 @@ def run(opts): sys.exit(retcode) print('matches!') except ValueError: - print('Failed to load any JSON from \'{0}\''.format(salt.utils.to_str(stdout).strip())) + print('Failed to load any JSON from \'{0}\''.format(salt.utils.stringutils.to_str(stdout).strip())) # Run tests here test_begin_downtime = random.randint(3, opts.splay) @@ -737,11 +737,11 @@ def run(opts): ) stdout, stderr = proc.communicate() - outstr = salt.utils.to_str(stdout) + outstr = salt.utils.stringutils.to_str(stdout) if outstr: print(outstr) if stderr: - print(salt.utils.to_str(stderr)) + print(salt.utils.stringutils.to_str(stderr)) sys.stdout.flush() try: @@ -781,9 +781,9 @@ def run(opts): stdout, stderr = proc.communicate() if stdout: - print(salt.utils.to_str(stdout)) + print(salt.utils.stringutils.to_str(stdout)) if stderr: - print(salt.utils.to_str(stderr)) + print(salt.utils.stringutils.to_str(stderr)) sys.stdout.flush() # Grab packages and log file (or just log file if build failed) diff --git a/tests/minionswarm.py b/tests/minionswarm.py index e5ea7f1d71..ab4ece1fb5 100644 --- a/tests/minionswarm.py +++ b/tests/minionswarm.py @@ -26,7 +26,7 @@ import salt # Import third party libs import yaml -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin diff --git a/tests/perf/fire_fake.py b/tests/perf/fire_fake.py index 6b3cadd9ec..224bacca8d 100644 --- a/tests/perf/fire_fake.py +++ b/tests/perf/fire_fake.py @@ -3,7 +3,6 @@ from __future__ import absolute_import, print_function # Import system libs import sys -import time import datetime # Import salt libs diff --git a/tests/runtests.py b/tests/runtests.py index 21c457a9b3..b698adc077 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -57,9 +57,9 @@ except ImportError as exc: raise exc from tests.integration import TestDaemon # pylint: disable=W0403 -import salt.utils +import salt.utils.platform -if not salt.utils.is_windows(): +if not salt.utils.platform.is_windows(): import resource # Import Salt Testing libs @@ -435,8 +435,10 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): # Turn on expensive tests execution os.environ['EXPENSIVE_TESTS'] = 'True' - import salt.utils - if salt.utils.is_windows(): + # This fails even with salt.utils.platform imported in the global + # scope, unless we import it again here. + import salt.utils.platform + if salt.utils.platform.is_windows(): import salt.utils.win_functions current_user = salt.utils.win_functions.get_current_user() if current_user == 'SYSTEM': @@ -490,7 +492,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): return self.run_suite(full_path, display_name, suffix='test_*.py') def start_daemons_only(self): - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): self.set_filehandle_limits('integration') try: print_header( @@ -566,7 +568,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): for integration tests or unit tests ''' # Get current limits - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): import win32file prev_hard = win32file._getmaxstdio() prev_soft = 512 @@ -602,7 +604,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): '{0}, hard: {1}'.format(soft, hard) ) try: - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hard = 2048 if hard > 2048 else hard win32file._setmaxstdio(hard) else: @@ -639,7 +641,7 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): # `unit.` to --name. We don't need the tests daemon # running return [True] - if not salt.utils.is_windows(): + if not salt.utils.platform.is_windows(): self.set_filehandle_limits('integration') try: diff --git a/tests/support/case.py b/tests/support/case.py index 56b0116b0a..1a757f5350 100644 --- a/tests/support/case.py +++ b/tests/support/case.py @@ -35,7 +35,7 @@ from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTe from tests.support.paths import ScriptPathMixin, INTEGRATION_TEST_DIR, CODE_DIR, PYEXEC, SCRIPT_DIR # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import cStringIO # pylint: disable=import-error STATE_FUNCTION_RUNNING_RE = re.compile( @@ -371,7 +371,7 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin): finally: try: if os.path.exists(tmp_file.name): - if isinstance(tmp_file.name, str): + if isinstance(tmp_file.name, six.string_types): # tmp_file.name is an int when using SpooledTemporaryFiles # int types cannot be used with os.remove() in Python 3 os.remove(tmp_file.name) @@ -403,7 +403,7 @@ class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin): finally: try: if os.path.exists(tmp_file.name): - if isinstance(tmp_file.name, str): + if isinstance(tmp_file.name, six.string_types): # tmp_file.name is an int when using SpooledTemporaryFiles # int types cannot be used with os.remove() in Python 3 os.remove(tmp_file.name) diff --git a/tests/support/cptestcase.py b/tests/support/cptestcase.py index ea2845f46d..cb61020840 100644 --- a/tests/support/cptestcase.py +++ b/tests/support/cptestcase.py @@ -37,7 +37,7 @@ from tests.support.case import TestCase # Import 3rd-party libs # pylint: disable=import-error import cherrypy # pylint: disable=3rd-party-module-not-gated -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO # pylint: enable=import-error diff --git a/tests/support/gitfs.py b/tests/support/gitfs.py index 211a9a828a..0f794ec934 100644 --- a/tests/support/gitfs.py +++ b/tests/support/gitfs.py @@ -18,8 +18,8 @@ import time import yaml # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.fileserver import gitfs from salt.pillar import git_pillar from salt.ext.six.moves import range # pylint: disable=redefined-builtin @@ -501,7 +501,7 @@ class GitPillarSSHTestBase(GitPillarTestBase, SSHDMixin): super(GitPillarSSHTestBase, self).setUp() self.sshd_proc = self.find_proc(name='sshd', search=self.sshd_config) - self.sshd_bin = salt.utils.which('sshd') + self.sshd_bin = salt.utils.path.which('sshd') if self.sshd_proc is None: self.spawn_server() diff --git a/tests/support/helpers.py b/tests/support/helpers.py index 4318a76a25..13c7158aaa 100644 --- a/tests/support/helpers.py +++ b/tests/support/helpers.py @@ -32,7 +32,7 @@ import types # Import 3rd-party libs import psutil # pylint: disable=3rd-party-module-not-gated -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range, builtins # pylint: disable=import-error,redefined-builtin try: from pytestsalt.utils import get_unused_localhost_port # pylint: disable=unused-import @@ -1025,6 +1025,7 @@ def requires_salt_modules(*names): def skip_if_binaries_missing(*binaries, **kwargs): import salt.utils + import salt.utils.path if len(binaries) == 1: if isinstance(binaries[0], (list, tuple, set, frozenset)): binaries = binaries[0] @@ -1039,14 +1040,14 @@ def skip_if_binaries_missing(*binaries, **kwargs): ) if check_all: for binary in binaries: - if salt.utils.which(binary) is None: + if salt.utils.path.which(binary) is None: return skip( '{0}The {1!r} binary was not found'.format( message and '{0}. '.format(message) or '', binary ) ) - elif salt.utils.which_bin(binaries) is None: + elif salt.utils.path.which_bin(binaries) is None: return skip( '{0}None of the following binaries was found: {1}'.format( message and '{0}. '.format(message) or '', diff --git a/tests/support/mixins.py b/tests/support/mixins.py index bb5ec86687..64a4bd4fbc 100644 --- a/tests/support/mixins.py +++ b/tests/support/mixins.py @@ -30,9 +30,12 @@ from tests.support.runtests import RUNTIME_VARS from tests.support.paths import CODE_DIR # Import salt libs -#import salt.config -import salt.utils +import salt.config +import salt.utils # Can be removed once namespaced_function is moved +import salt.utils.event import salt.utils.files +import salt.utils.path +import salt.utils.stringutils import salt.version import salt.exceptions from salt.utils.verify import verify_env @@ -41,7 +44,7 @@ from salt._compat import ElementTree as etree # Import 3rd-party libs import yaml -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin log = logging.getLogger(__name__) @@ -245,9 +248,8 @@ class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin): def test_salt_with_git_version(self): if getattr(self, '_call_binary_', None) is None: self.skipTest('\'_call_binary_\' not defined.') - from salt.utils import which from salt.version import __version_info__, SaltStackVersion - git = which('git') + git = salt.utils.path.which('git') if not git: self.skipTest('The git binary is not available') @@ -273,7 +275,7 @@ class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin): self.skipTest( 'Failed to get the output of \'git describe\'. ' 'Error: \'{0}\''.format( - salt.utils.to_str(err) + salt.utils.stringutils.to_str(err) ) ) diff --git a/tests/support/mock.py b/tests/support/mock.py index 40e3d1f999..4046f2e05e 100644 --- a/tests/support/mock.py +++ b/tests/support/mock.py @@ -15,7 +15,7 @@ from __future__ import absolute_import import sys # Import salt libs -import salt.ext.six as six +from salt.ext import six try: if sys.version_info >= (3,): @@ -68,7 +68,7 @@ except ImportError as exc: class MagicMock(object): - __name__ = '{0}.fakemock'.format(__name__) + __name__ = '{0}.fakemock'.format(__name__) # future lint: disable=non-unicode-string def __init__(self, *args, **kwargs): pass diff --git a/tests/support/parser/__init__.py b/tests/support/parser/__init__.py index 7584163926..429ad4f0cf 100644 --- a/tests/support/parser/__init__.py +++ b/tests/support/parser/__init__.py @@ -32,7 +32,7 @@ from tests.support.unit import TestLoader, TextTestRunner from tests.support.xmlunit import HAS_XMLRUNNER, XMLTestRunner # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: from tests.support.ext import console WIDTH, HEIGHT = console.getTerminalSize() diff --git a/tests/support/paths.py b/tests/support/paths.py index 5f7bcb663d..46bc1a3cbb 100644 --- a/tests/support/paths.py +++ b/tests/support/paths.py @@ -81,9 +81,9 @@ SCRIPT_TEMPLATES = { ], 'common': [ 'from salt.scripts import salt_{0}\n', - 'from salt.utils import is_windows\n\n', + 'import salt.utils.platform\n\n', 'if __name__ == \'__main__\':\n', - ' if is_windows():\n', + ' if salt.utils.platform.is_windows():\n', ' import os.path\n', ' import py_compile\n', ' cfile = os.path.splitext(__file__)[0] + ".pyc"\n', diff --git a/tests/support/runtests.py b/tests/support/runtests.py index 6bee7d4c54..2e8bc9b0a2 100644 --- a/tests/support/runtests.py +++ b/tests/support/runtests.py @@ -59,7 +59,7 @@ import multiprocessing import tests.support.paths as paths # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six try: import coverage # pylint: disable=import-error HAS_COVERAGE = True diff --git a/tests/support/unit.py b/tests/support/unit.py index 57c61d5132..841e714e32 100644 --- a/tests/support/unit.py +++ b/tests/support/unit.py @@ -26,7 +26,7 @@ from __future__ import absolute_import import os import sys import logging -import salt.ext.six as six +from salt.ext import six try: import psutil HAS_PSUTIL = True diff --git a/tests/support/xmlunit.py b/tests/support/xmlunit.py index 1196aa0f8c..5c7d350a5f 100644 --- a/tests/support/xmlunit.py +++ b/tests/support/xmlunit.py @@ -19,7 +19,7 @@ import sys import logging # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/tests/unit/beacons/test_adb_beacon.py b/tests/unit/beacons/test_adb_beacon.py index 1ae3c2e7c4..e0d9f738c1 100644 --- a/tests/unit/beacons/test_adb_beacon.py +++ b/tests/unit/beacons/test_adb_beacon.py @@ -27,7 +27,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): } def test_no_adb_command(self): - with patch('salt.utils.which') as mock: + with patch('salt.utils.path.which') as mock: mock.return_value = None ret = adb.__virtual__() @@ -36,7 +36,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertFalse(ret) def test_with_adb_command(self): - with patch('salt.utils.which') as mock: + with patch('salt.utils.path.which') as mock: mock.return_value = '/usr/bin/adb' ret = adb.__virtual__() @@ -44,50 +44,49 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock.assert_called_once_with('adb') self.assertEqual(ret, 'adb') - def test_non_dict_config(self): - config = [] - - log_mock = Mock() - with patch.object(adb, 'log', log_mock): - - ret = adb.beacon(config) - - self.assertEqual(ret, []) - log_mock.info.assert_called_once_with('Configuration for adb beacon must be a dict.') - - def test_empty_config(self): + def test_non_list_config(self): config = {} - log_mock = Mock() - with patch.object(adb, 'log', log_mock): - ret = adb.beacon(config) + ret = adb.validate(config) - self.assertEqual(ret, []) - log_mock.info.assert_called_once_with('Configuration for adb beacon must include a states array.') + self.assertEqual(ret, (False, 'Configuration for adb beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = adb.validate(config) + + self.assertEqual(ret, (False, 'Configuration for adb beacon must' + ' include a states array.')) def test_invalid_states(self): - config = {'states': ['Random', 'Failings']} + config = [{'states': ['Random', 'Failings']}] - log_mock = Mock() - with patch.object(adb, 'log', log_mock): + ret = adb.validate(config) - ret = adb.beacon(config) - - self.assertEqual(ret, []) - log_mock.info.assert_called_once_with('Need a one of the following adb states:' - ' offline, bootloader, device, host, recovery, ' - 'no permissions, sideload, unauthorized, unknown, missing') + self.assertEqual(ret, (False, 'Need a one of the following' + ' adb states: offline, bootloader,' + ' device, host, recovery, no' + ' permissions, sideload,' + ' unauthorized, unknown, missing')) def test_device_state(self): - config = {'states': ['device']} + config = [{'states': ['device']}] mock = Mock(return_value='List of devices attached\nHTC\tdevice',) with patch.dict(adb.__salt__, {'cmd.run': mock}): + + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) def test_device_state_change(self): - config = {'states': ['offline']} + config = [{'states': ['offline']}] out = [ 'List of devices attached\nHTC\tdevice', @@ -97,14 +96,19 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, []) ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'offline', 'tag': 'offline'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'offline', + 'tag': 'offline'}]) def test_multiple_devices(self): - config = {'states': ['offline', 'device']} + config = [{'states': ['offline', 'device']}] out = [ 'List of devices attached\nHTC\tdevice', @@ -114,8 +118,13 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) ret = adb.beacon(config) self.assertEqual(ret, [ @@ -124,16 +133,19 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ]) def test_no_devices_with_different_states(self): - config = {'states': ['offline'], 'no_devices_event': True} + config = [{'states': ['offline'], 'no_devices_event': True}] mock = Mock(return_value='List of devices attached\nHTC\tdevice') with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, []) def test_no_devices_no_repeat(self): - config = {'states': ['offline', 'device'], 'no_devices_event': True} + config = [{'states': ['offline', 'device'], 'no_devices_event': True}] out = [ 'List of devices attached\nHTC\tdevice', @@ -144,8 +156,13 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) ret = adb.beacon(config) self.assertEqual(ret, [{'tag': 'no_devices'}]) @@ -154,7 +171,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_no_devices(self): - config = {'states': ['offline', 'device'], 'no_devices_event': True} + config = [{'states': ['offline', 'device'], 'no_devices_event': True}] out = [ 'List of devices attached', @@ -164,6 +181,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'tag': 'no_devices'}]) @@ -171,7 +191,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_device_missing(self): - config = {'states': ['device', 'missing']} + config = [{'states': ['device', 'missing']}] out = [ 'List of devices attached\nHTC\tdevice', @@ -183,37 +203,56 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): - ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'missing', 'tag': 'missing'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'missing', + 'tag': 'missing'}]) + + ret = adb.beacon(config) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) ret = adb.beacon(config) self.assertEqual(ret, []) def test_with_startup(self): - config = {'states': ['device']} + config = [{'states': ['device']}] mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) def test_with_user(self): - config = {'states': ['device'], 'user': 'fred'} + config = [{'states': ['device'], 'user': 'fred'}] mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice') with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) mock.assert_called_once_with('adb devices', runas='fred') - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) def test_device_low_battery(self): - config = {'states': ['device'], 'battery_low': 30} + config = [{'states': ['device'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -221,12 +260,15 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) def test_device_no_repeat(self): - config = {'states': ['device'], 'battery_low': 30} + config = [{'states': ['device'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -236,6 +278,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) @@ -244,7 +289,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_device_no_repeat_capacity_increase(self): - config = {'states': ['device'], 'battery_low': 75} + config = [{'states': ['device'], 'battery_low': 75}] out = [ 'List of devices attached\nHTC\tdevice', @@ -254,6 +299,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) @@ -262,7 +310,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_device_no_repeat_with_not_found_state(self): - config = {'states': ['offline'], 'battery_low': 30} + config = [{'states': ['offline'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -272,6 +320,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) @@ -279,7 +330,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_device_battery_charged(self): - config = {'states': ['device'], 'battery_low': 30} + config = [{'states': ['device'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -287,11 +338,16 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) - self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) + self.assertEqual(ret, [{'device': 'HTC', + 'state': 'device', + 'tag': 'device'}]) def test_device_low_battery_equal(self): - config = {'states': ['device'], 'battery_low': 25} + config = [{'states': ['device'], 'battery_low': 25}] out = [ 'List of devices attached\nHTC\tdevice', @@ -299,12 +355,15 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) def test_device_battery_not_found(self): - config = {'states': ['device'], 'battery_low': 25} + config = [{'states': ['device'], 'battery_low': 25}] out = [ 'List of devices attached\nHTC\tdevice', @@ -312,11 +371,14 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) def test_device_repeat_multi(self): - config = {'states': ['offline'], 'battery_low': 35} + config = [{'states': ['offline'], 'battery_low': 35}] out = [ 'List of devices attached\nHTC\tdevice', @@ -330,6 +392,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) @@ -343,7 +408,7 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_weird_batteries(self): - config = {'states': ['device'], 'battery_low': 25} + config = [{'states': ['device'], 'battery_low': 25}] out = [ 'List of devices attached\nHTC\tdevice', @@ -351,11 +416,14 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}]) def test_multiple_batteries(self): - config = {'states': ['device'], 'battery_low': 30} + config = [{'states': ['device'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -363,12 +431,15 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) def test_multiple_low_batteries(self): - config = {'states': ['device'], 'battery_low': 30} + config = [{'states': ['device'], 'battery_low': 30}] out = [ 'List of devices attached\nHTC\tdevice', @@ -376,6 +447,9 @@ class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin): ] mock = Mock(side_effect=out) with patch.dict(adb.__salt__, {'cmd.run': mock}): + ret = adb.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = adb.beacon(config) self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}, {'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}]) diff --git a/tests/unit/beacons/test_avahi_announce_beacon.py b/tests/unit/beacons/test_avahi_announce_beacon.py new file mode 100644 index 0000000000..baf5d0cb3e --- /dev/null +++ b/tests/unit/beacons/test_avahi_announce_beacon.py @@ -0,0 +1,44 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.avahi_announce as avahi_announce + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class AvahiAnnounceBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.avahi_announce + ''' + + def setup_loader_modules(self): + return { + avahi_announce: { + 'last_state': {}, + 'last_state_extra': {'no_devices': False} + } + } + + def test_non_list_config(self): + config = {} + + ret = avahi_announce.validate(config) + + self.assertEqual(ret, (False, 'Configuration for avahi_announce' + ' beacon must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = avahi_announce.validate(config) + + self.assertEqual(ret, (False, 'Configuration for avahi_announce' + ' beacon must contain servicetype, port' + ' and txt items.')) diff --git a/tests/unit/beacons/test_bonjour_announce_beacon.py b/tests/unit/beacons/test_bonjour_announce_beacon.py new file mode 100644 index 0000000000..f10aa2a42e --- /dev/null +++ b/tests/unit/beacons/test_bonjour_announce_beacon.py @@ -0,0 +1,44 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.bonjour_announce as bonjour_announce + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class BonjourAnnounceBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.avahi_announce + ''' + + def setup_loader_modules(self): + return { + bonjour_announce: { + 'last_state': {}, + 'last_state_extra': {'no_devices': False} + } + } + + def test_non_list_config(self): + config = {} + + ret = bonjour_announce.validate(config) + + self.assertEqual(ret, (False, 'Configuration for bonjour_announce' + ' beacon must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = bonjour_announce.validate(config) + + self.assertEqual(ret, (False, 'Configuration for bonjour_announce' + ' beacon must contain servicetype, port' + ' and txt items.')) diff --git a/tests/unit/beacons/test_diskusage_beacon.py b/tests/unit/beacons/test_diskusage_beacon.py new file mode 100644 index 0000000000..b771220dd0 --- /dev/null +++ b/tests/unit/beacons/test_diskusage_beacon.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +from collections import namedtuple + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.diskusage as diskusage + +STUB_DISK_PARTITION = namedtuple( + 'partition', + 'device mountpoint fstype, opts')( + '/dev/disk0s2', '/', 'hfs', + 'rw,local,rootfs,dovolfs,journaled,multilabel') +STUB_DISK_USAGE = namedtuple('usage', + 'total used free percent')(1000, 500, 500, 50) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class DiskUsageBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.adb + ''' + + def setup_loader_modules(self): + return {} + + def test_non_list_config(self): + config = {} + + ret = diskusage.validate(config) + + self.assertEqual(ret, (False, 'Configuration for diskusage beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = diskusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + def test_diskusage_match(self): + with patch('psutil.disk_partitions', + MagicMock(return_value=[STUB_DISK_PARTITION])), \ + patch('psutil.disk_usage', + MagicMock(return_value=STUB_DISK_USAGE)): + config = [{'/': '50%'}] + + ret = diskusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = diskusage.beacon(config) + self.assertEqual(ret, [{'diskusage': 50, 'mount': '/'}]) + + def test_diskusage_nomatch(self): + with patch('psutil.disk_partitions', + MagicMock(return_value=[STUB_DISK_PARTITION])), \ + patch('psutil.disk_usage', + MagicMock(return_value=STUB_DISK_USAGE)): + config = [{'/': '70%'}] + + ret = diskusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = diskusage.beacon(config) + self.assertNotEqual(ret, [{'diskusage': 50, 'mount': '/'}]) + + def test_diskusage_match_regex(self): + with patch('psutil.disk_partitions', + MagicMock(return_value=[STUB_DISK_PARTITION])), \ + patch('psutil.disk_usage', + MagicMock(return_value=STUB_DISK_USAGE)): + config = [{r'^\/': '50%'}] + + ret = diskusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = diskusage.beacon(config) + self.assertEqual(ret, [{'diskusage': 50, 'mount': '/'}]) diff --git a/tests/unit/beacons/test_glxinfo.py b/tests/unit/beacons/test_glxinfo.py deleted file mode 100644 index fae753894d..0000000000 --- a/tests/unit/beacons/test_glxinfo.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding: utf-8 - -# Python libs -from __future__ import absolute_import - -# Salt libs -import salt.beacons.glxinfo as glxinfo - -# Salt testing libs -from tests.support.unit import skipIf, TestCase -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, Mock - -# Import test suite libs -from tests.support.mixins import LoaderModuleMockMixin - - -@skipIf(NO_MOCK, NO_MOCK_REASON) -class GLXInfoBeaconTestCase(TestCase, LoaderModuleMockMixin): - ''' - Test case for salt.beacons.glxinfo - ''' - - def setup_loader_modules(self): - return {glxinfo: {'last_state': {}}} - - def test_no_adb_command(self): - with patch('salt.utils.which') as mock: - mock.return_value = None - - ret = glxinfo.__virtual__() - - mock.assert_called_once_with('glxinfo') - self.assertFalse(ret) - - def test_with_adb_command(self): - with patch('salt.utils.which') as mock: - mock.return_value = '/usr/bin/glxinfo' - - ret = glxinfo.__virtual__() - - mock.assert_called_once_with('glxinfo') - self.assertEqual(ret, 'glxinfo') - - def test_non_dict_config(self): - config = [] - - log_mock = Mock() - with patch.object(glxinfo, 'log', log_mock): - ret = glxinfo.beacon(config) - - self.assertEqual(ret, []) - - def test_no_user(self): - config = {'screen_event': True} - - log_mock = Mock() - with patch.object(glxinfo, 'log', log_mock): - ret = glxinfo.beacon(config) - self.assertEqual(ret, []) - - def test_screen_state(self): - config = {'screen_event': True, 'user': 'frank'} - - mock = Mock(return_value=0) - with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): - ret = glxinfo.beacon(config) - self.assertEqual(ret, [{'tag': 'screen_event', 'screen_available': True}]) - mock.assert_called_once_with('DISPLAY=:0 glxinfo', runas='frank', python_shell=True) - - def test_screen_state_missing(self): - config = {'screen_event': True, 'user': 'frank'} - - mock = Mock(return_value=255) - with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): - ret = glxinfo.beacon(config) - self.assertEqual(ret, [{'tag': 'screen_event', 'screen_available': False}]) - - def test_screen_state_no_repeat(self): - config = {'screen_event': True, 'user': 'frank'} - - mock = Mock(return_value=255) - with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): - ret = glxinfo.beacon(config) - self.assertEqual(ret, [{'tag': 'screen_event', 'screen_available': False}]) - - ret = glxinfo.beacon(config) - self.assertEqual(ret, []) - - def test_screen_state_change(self): - config = {'screen_event': True, 'user': 'frank'} - - mock = Mock(side_effect=[255, 0]) - with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): - ret = glxinfo.beacon(config) - self.assertEqual(ret, [{'tag': 'screen_event', 'screen_available': False}]) - - ret = glxinfo.beacon(config) - self.assertEqual(ret, [{'tag': 'screen_event', 'screen_available': True}]) diff --git a/tests/unit/beacons/test_glxinfo_beacon.py b/tests/unit/beacons/test_glxinfo_beacon.py new file mode 100644 index 0000000000..3457b06743 --- /dev/null +++ b/tests/unit/beacons/test_glxinfo_beacon.py @@ -0,0 +1,118 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt libs +import salt.beacons.glxinfo as glxinfo + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, Mock + +# Import test suite libs +from tests.support.mixins import LoaderModuleMockMixin + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class GLXInfoBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.glxinfo + ''' + + def setup_loader_modules(self): + return {glxinfo: {'last_state': {}}} + + def test_no_glxinfo_command(self): + with patch('salt.utils.path.which') as mock: + mock.return_value = None + + ret = glxinfo.__virtual__() + + mock.assert_called_once_with('glxinfo') + self.assertFalse(ret) + + def test_with_glxinfo_command(self): + with patch('salt.utils.path.which') as mock: + mock.return_value = '/usr/bin/glxinfo' + + ret = glxinfo.__virtual__() + + mock.assert_called_once_with('glxinfo') + self.assertEqual(ret, 'glxinfo') + + def test_non_list_config(self): + config = {} + + ret = glxinfo.validate(config) + + self.assertEqual(ret, (False, 'Configuration for glxinfo ' + 'beacon must be a list.')) + + def test_no_user(self): + config = [{'screen_event': True}] + + _expected = (False, 'Configuration for glxinfo beacon must ' + 'include a user as glxinfo is not ' + 'available to root.') + ret = glxinfo.validate(config) + self.assertEqual(ret, _expected) + + def test_screen_state(self): + config = [{'screen_event': True, + 'user': 'frank'}] + + mock = Mock(return_value=0) + with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): + + ret = glxinfo.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, [{'tag': 'screen_event', + 'screen_available': True}]) + mock.assert_called_once_with('DISPLAY=:0 glxinfo', + runas='frank', python_shell=True) + + def test_screen_state_missing(self): + config = [{'screen_event': True, 'user': 'frank'}] + + mock = Mock(return_value=255) + with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): + ret = glxinfo.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, [{'tag': 'screen_event', + 'screen_available': False}]) + + def test_screen_state_no_repeat(self): + config = [{'screen_event': True, 'user': 'frank'}] + + mock = Mock(return_value=255) + with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): + ret = glxinfo.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, [{'tag': 'screen_event', + 'screen_available': False}]) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, []) + + def test_screen_state_change(self): + config = [{'screen_event': True, 'user': 'frank'}] + + mock = Mock(side_effect=[255, 0]) + with patch.dict(glxinfo.__salt__, {'cmd.retcode': mock}): + ret = glxinfo.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, [{'tag': 'screen_event', + 'screen_available': False}]) + + ret = glxinfo.beacon(config) + self.assertEqual(ret, [{'tag': 'screen_event', + 'screen_available': True}]) diff --git a/tests/unit/beacons/test_haproxy_beacon.py b/tests/unit/beacons/test_haproxy_beacon.py new file mode 100644 index 0000000000..34a5a46eec --- /dev/null +++ b/tests/unit/beacons/test_haproxy_beacon.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.haproxy as haproxy + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class HAProxyBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.haproxy + ''' + + def setup_loader_modules(self): + return { + haproxy: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = haproxy.validate(config) + + self.assertEqual(ret, (False, 'Configuration for haproxy beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = haproxy.validate(config) + + self.assertEqual(ret, (False, 'Configuration for haproxy beacon ' + 'requires backends.')) + + def test_no_servers(self): + config = [{'backends': {'www-backend': {'threshold': 45}}}] + ret = haproxy.validate(config) + + self.assertEqual(ret, (False, 'Backends for haproxy ' + 'beacon require servers.')) + + def test_threshold_reached(self): + config = [{'backends': {'www-backend': {'threshold': 45, + 'servers': ['web1'] + }}}] + ret = haproxy.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + mock = MagicMock(return_value=46) + with patch.dict(haproxy.__salt__, {'haproxy.get_sessions': mock}): + ret = haproxy.beacon(config) + self.assertEqual(ret, [{'threshold': 45, + 'scur': 46, + 'server': 'web1'}]) + + def test_threshold_not_reached(self): + config = [{'backends': {'www-backend': {'threshold': 100, + 'servers': ['web1'] + }}}] + ret = haproxy.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + mock = MagicMock(return_value=50) + with patch.dict(haproxy.__salt__, {'haproxy.get_sessions': mock}): + ret = haproxy.beacon(config) + self.assertEqual(ret, []) diff --git a/tests/unit/beacons/test_inotify_beacon.py b/tests/unit/beacons/test_inotify_beacon.py index c4e36c9c7b..c071d4fde3 100644 --- a/tests/unit/beacons/test_inotify_beacon.py +++ b/tests/unit/beacons/test_inotify_beacon.py @@ -20,6 +20,9 @@ try: except ImportError: HAS_PYINOTIFY = False +import logging +log = logging.getLogger(__name__) + @skipIf(not HAS_PYINOTIFY, 'pyinotify is not available') class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): @@ -37,13 +40,16 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): shutil.rmtree(self.tmpdir, ignore_errors=True) def test_empty_config(self): - config = {} + config = [{}] ret = inotify.beacon(config) self.assertEqual(ret, []) def test_file_open(self): path = os.path.realpath(__file__) - config = {path: {'mask': ['open']}} + config = [{'files': {path: {'mask': ['open']}}}] + ret = inotify.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = inotify.beacon(config) self.assertEqual(ret, []) @@ -55,7 +61,10 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret[0]['change'], 'IN_OPEN') def test_dir_no_auto_add(self): - config = {self.tmpdir: {'mask': ['create']}} + config = [{'files': {self.tmpdir: {'mask': ['create']}}}] + ret = inotify.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = inotify.beacon(config) self.assertEqual(ret, []) fp = os.path.join(self.tmpdir, 'tmpfile') @@ -71,7 +80,10 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, []) def test_dir_auto_add(self): - config = {self.tmpdir: {'mask': ['create', 'open'], 'auto_add': True}} + config = [{'files': {self.tmpdir: {'mask': ['create', 'open'], 'auto_add': True}}}] + ret = inotify.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = inotify.beacon(config) self.assertEqual(ret, []) fp = os.path.join(self.tmpdir, 'tmpfile') @@ -98,7 +110,10 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): fp = os.path.join(dp2, 'tmpfile') with salt.utils.files.fopen(fp, 'w') as f: pass - config = {self.tmpdir: {'mask': ['open'], 'recurse': True}} + config = [{'files': {self.tmpdir: {'mask': ['open'], 'recurse': True}}}] + ret = inotify.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = inotify.beacon(config) self.assertEqual(ret, []) with salt.utils.files.fopen(fp) as f: @@ -115,9 +130,12 @@ class INotifyBeaconTestCase(TestCase, LoaderModuleMockMixin): def test_dir_recurse_auto_add(self): dp1 = os.path.join(self.tmpdir, 'subdir1') os.mkdir(dp1) - config = {self.tmpdir: {'mask': ['create', 'delete'], - 'recurse': True, - 'auto_add': True}} + config = [{'files': {self.tmpdir: {'mask': ['create', 'delete'], + 'recurse': True, + 'auto_add': True}}}] + ret = inotify.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + ret = inotify.beacon(config) self.assertEqual(ret, []) dp2 = os.path.join(dp1, 'subdir2') diff --git a/tests/unit/beacons/test_journald_beacon.py b/tests/unit/beacons/test_journald_beacon.py new file mode 100644 index 0000000000..7b9b109bf0 --- /dev/null +++ b/tests/unit/beacons/test_journald_beacon.py @@ -0,0 +1,120 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +import datetime +from uuid import UUID + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.journald as journald +import salt.utils + +import logging +log = logging.getLogger(__name__) + +_STUB_JOURNALD_ENTRY = {'_BOOT_ID': UUID('ad3915a5-9008-4fec-a635-140606525497'), + '__MONOTONIC_TIMESTAMP': (datetime.timedelta(4, 28586, 703069), + UUID('ad3915a5-9008-4fec-a635-140606525497')), + '_AUDIT_LOGINUID': 1000, 'SYSLOG_FACILITY': 10, + '_SYSTEMD_SLICE': u'system.slice', + '_GID': 0, + '__REALTIME_TIMESTAMP': datetime.datetime(2017, 6, 27, + 20, 8, 16, + 468468), + '_AUDIT_SESSION': 351, 'PRIORITY': 6, + '_TRANSPORT': u'syslog', + '_HOSTNAME': u'hostname', + '_CAP_EFFECTIVE': u'3fffffffff', + '_SYSTEMD_UNIT': u'ssh.service', + '_MACHINE_ID': UUID('14fab5bb-228d-414b-bdf4-cbc62cb7ba54'), + '_PID': 15091, + 'SYSLOG_IDENTIFIER': u'sshd', + '_SOURCE_REALTIME_TIMESTAMP': datetime.datetime(2017, + 6, + 27, + 20, + 8, + 16, + 468454), + '_SYSTEMD_CGROUP': u'/system.slice/ssh.service', + '__CURSOR': 's=7711ee01b03446309383870171dd5839;i=a74e;b=ad3915a590084feca635140606525497;m=571f43f8 dd;t=552fc7ed1cdf4;x=4ca0a3d4f1905736', + '_COMM': u'sshd', + '_CMDLINE': u'sshd: gareth [priv]', + '_SYSTEMD_INVOCATION_ID': u'38a5d5aad292426d93bfaab72a69c2ab', + '_EXE': u'/usr/sbin/sshd', + '_UID': 0, + 'SYSLOG_PID': 15091, + 'MESSAGE': u'pam_unix(sshd:session): session opened for user username by (uid=0)'} + + +class SystemdJournaldMock(Mock): + ''' Request Mock''' + + returned_once = False + + def get_next(self, *args, **kwargs): + if not self.returned_once: + self.returned_once = True + return _STUB_JOURNALD_ENTRY + else: + return None + + def seek_tail(self, *args, **kwargs): + return {} + + def get_previous(self, *args, **kwargs): + return {} + + +SYSTEMD_MOCK = SystemdJournaldMock() + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class JournaldBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.journald + ''' + + def setup_loader_modules(self): + return { + journald: { + '__context__': { + 'systemd.journald': SYSTEMD_MOCK, + }, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = journald.validate(config) + + self.assertEqual(ret, (False, 'Configuration for journald beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = journald.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + def test_journald_match(self): + config = [{'services': {'sshd': {'SYSLOG_IDENTIFIER': 'sshd', + 'PRIORITY': 6}}}] + + ret = journald.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = salt.utils.simple_types_filter(_STUB_JOURNALD_ENTRY) + _expected_return['tag'] = 'sshd' + + ret = journald.beacon(config) + self.assertEqual(ret, [_expected_return]) diff --git a/tests/unit/beacons/test_load_beacon.py b/tests/unit/beacons/test_load_beacon.py new file mode 100644 index 0000000000..37288b53ad --- /dev/null +++ b/tests/unit/beacons/test_load_beacon.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.load as load + +import logging +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class LoadBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.load + ''' + + def setup_loader_modules(self): + return { + load: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = load.validate(config) + + self.assertEqual(ret, (False, 'Configuration for load beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = load.validate(config) + + self.assertEqual(ret, (False, 'Averages configuration is required' + ' for load beacon.')) + + def test_load_match(self): + with patch('os.getloadavg', + MagicMock(return_value=(1.82, 1.84, 1.56))): + config = [{'averages': {'1m': [0.0, 2.0], + '5m': [0.0, 1.5], + '15m': [0.0, 1.0]}}] + + ret = load.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = [{'15m': 1.56, '1m': 1.82, '5m': 1.84}] + ret = load.beacon(config) + self.assertEqual(ret, _expected_return) diff --git a/tests/unit/beacons/test_log_beacon.py b/tests/unit/beacons/test_log_beacon.py new file mode 100644 index 0000000000..8551c48673 --- /dev/null +++ b/tests/unit/beacons/test_log_beacon.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, mock_open +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.log as log + +import logging +_log = logging.getLogger(__name__) + + +_STUB_LOG_ENTRY = 'Jun 29 12:58:51 hostname sshd[6536]: ' \ + 'pam_unix(sshd:session): session opened ' \ + 'for user username by (uid=0)\n' + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class LogBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.log + ''' + + def setup_loader_modules(self): + return { + log: { + '__context__': {'log.loc': 2}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = log.validate(config) + + self.assertEqual(ret, (False, 'Configuration for log beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = log.validate(config) + + self.assertEqual(ret, (False, 'Configuration for log beacon ' + 'must contain file option.')) + + def test_log_match(self): + with patch('salt.utils.files.fopen', + mock_open(read_data=_STUB_LOG_ENTRY)): + config = [{'file': '/var/log/auth.log', + 'tags': {'sshd': {'regex': '.*sshd.*'}} + }] + + ret = log.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = [{'error': '', + 'match': 'yes', + 'raw': _STUB_LOG_ENTRY.rstrip('\n'), + 'tag': 'sshd' + }] + ret = log.beacon(config) + self.assertEqual(ret, _expected_return) diff --git a/tests/unit/beacons/test_memusage_beacon.py b/tests/unit/beacons/test_memusage_beacon.py new file mode 100644 index 0000000000..e3219073d7 --- /dev/null +++ b/tests/unit/beacons/test_memusage_beacon.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +from collections import namedtuple + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.memusage as memusage + +STUB_MEMORY_USAGE = namedtuple('vmem', 'total available percent used free active inactive buffers cached shared')( + 15722012672, 9329594368, 40.7, 5137018880, + 4678086656, 6991405056, 2078953472, + 1156378624, 4750528512, 898908160) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class MemUsageBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.memusage + ''' + + def setup_loader_modules(self): + return {} + + def test_non_list_config(self): + config = {} + + ret = memusage.validate(config) + + self.assertEqual(ret, (False, 'Configuration for memusage beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = memusage.validate(config) + + self.assertEqual(ret, (False, 'Configuration for memusage beacon ' + 'requires percent.')) + + def test_memusage_match(self): + with patch('psutil.virtual_memory', + MagicMock(return_value=STUB_MEMORY_USAGE)): + + config = [{'percent': '40%'}, {'interval': 30}] + + ret = memusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = memusage.beacon(config) + self.assertEqual(ret, [{'memusage': 40.7}]) + + def test_memusage_nomatch(self): + with patch('psutil.virtual_memory', + MagicMock(return_value=STUB_MEMORY_USAGE)): + + config = [{'percent': '70%'}] + + ret = memusage.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = memusage.beacon(config) + self.assertNotEqual(ret, [{'memusage': 50}]) diff --git a/tests/unit/beacons/test_network_info_beacon.py b/tests/unit/beacons/test_network_info_beacon.py new file mode 100644 index 0000000000..0b76c50ecd --- /dev/null +++ b/tests/unit/beacons/test_network_info_beacon.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +from collections import namedtuple + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.network_info as network_info + +import logging +log = logging.getLogger(__name__) + +STUB_NET_IO_COUNTERS = {'eth0': namedtuple('snetio', + 'bytes_sent bytes_recv \ + packets_sent packets_recv \ + errin errout \ + dropin \ + dropout')(93662618, 914626664, + 465694, 903802, 0, + 0, 0, 0)} + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class NetworkInfoBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.network_info + ''' + + def setup_loader_modules(self): + return { + network_info: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = network_info.validate(config) + + self.assertEqual(ret, (False, 'Configuration for network_info beacon' + ' must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = network_info.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + def test_network_info_equal(self): + with patch('salt.utils.psutil_compat.net_io_counters', + MagicMock(return_value=STUB_NET_IO_COUNTERS)): + config = [{'interfaces': {'eth0': {'type': 'equal', + 'bytes_sent': 914626664, + 'bytes_recv': 93662618, + 'packets_sent': 465694, + 'packets_recv': 903802, + 'errin': 0, + 'errout': 0, + 'dropin': 0, + 'dropout': 0}}}] + + ret = network_info.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = [{'interface': 'eth0', + 'network_info': {'bytes_recv': 914626664, + 'bytes_sent': 93662618, + 'dropin': 0, + 'dropout': 0, + 'errin': 0, + 'errout': 0, + 'packets_recv': 903802, + 'packets_sent': 465694}}] + + ret = network_info.beacon(config) + self.assertEqual(ret, _expected_return) + + def test_network_info_greater_than(self): + with patch('salt.utils.psutil_compat.net_io_counters', + MagicMock(return_value=STUB_NET_IO_COUNTERS)): + config = [{'interfaces': {'eth0': {'type': 'greater', + 'bytes_sent': 100000, + 'bytes_recv': 100000, + 'packets_sent': 100000, + 'packets_recv': 100000, + 'errin': 0, + 'errout': 0, + 'dropin': 0, + 'dropout': 0}}}] + + ret = network_info.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = [{'interface': 'eth0', + 'network_info': {'bytes_recv': 914626664, + 'bytes_sent': 93662618, + 'dropin': 0, + 'dropout': 0, + 'errin': 0, + 'errout': 0, + 'packets_recv': 903802, + 'packets_sent': 465694}}] + + ret = network_info.beacon(config) + self.assertEqual(ret, _expected_return) diff --git a/tests/unit/beacons/test_network_settings_beacon.py b/tests/unit/beacons/test_network_settings_beacon.py new file mode 100644 index 0000000000..c09e2f33fb --- /dev/null +++ b/tests/unit/beacons/test_network_settings_beacon.py @@ -0,0 +1,45 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.network_settings as network_settings + +import logging +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class NetworkSettingsBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.network_settings + ''' + + def setup_loader_modules(self): + return { + network_settings: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = network_settings.validate(config) + + self.assertEqual(ret, (False, 'Configuration for network_settings' + ' beacon must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = network_settings.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) diff --git a/tests/unit/beacons/test_ps_beacon.py b/tests/unit/beacons/test_ps_beacon.py new file mode 100644 index 0000000000..2ae5a5735b --- /dev/null +++ b/tests/unit/beacons/test_ps_beacon.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.ps as ps + +PATCH_OPTS = dict(autospec=True, spec_set=True) + + +class FakeProcess(object): + + def __init__(self, _name, pid): + self._name = _name + self.pid = pid + + def name(self): + return self._name + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class PSBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.[s] + ''' + + def setup_loader_modules(self): + return {} + + def test_non_list_config(self): + config = {} + + ret = ps.validate(config) + + self.assertEqual(ret, (False, 'Configuration for ps beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = ps.validate(config) + + self.assertEqual(ret, (False, 'Configuration for ps ' + 'beacon requires processes.')) + + def test_ps_running(self): + with patch('salt.utils.psutil_compat.process_iter', **PATCH_OPTS) as mock_process_iter: + mock_process_iter.return_value = [FakeProcess(_name='salt-master', pid=3), + FakeProcess(_name='salt-minion', pid=4)] + config = [{'processes': {'salt-master': 'running'}}] + + ret = ps.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = ps.beacon(config) + self.assertEqual(ret, [{'salt-master': 'Running'}]) + + def test_ps_not_running(self): + with patch('salt.utils.psutil_compat.process_iter', **PATCH_OPTS) as mock_process_iter: + mock_process_iter.return_value = [FakeProcess(_name='salt-master', pid=3), + FakeProcess(_name='salt-minion', pid=4)] + config = [{'processes': {'mysql': 'stopped'}}] + + ret = ps.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = ps.beacon(config) + self.assertEqual(ret, [{'mysql': 'Stopped'}]) diff --git a/tests/unit/beacons/test_salt_proxy_beacon.py b/tests/unit/beacons/test_salt_proxy_beacon.py new file mode 100644 index 0000000000..d02c75cdb8 --- /dev/null +++ b/tests/unit/beacons/test_salt_proxy_beacon.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +from collections import namedtuple + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.salt_proxy as salt_proxy + +PATCH_OPTS = dict(autospec=True, spec_set=True) + +FakeProcess = namedtuple('Process', 'cmdline pid') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SaltProxyBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.[s] + ''' + + def setup_loader_modules(self): + return { + salt_proxy: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = salt_proxy.validate(config) + + self.assertEqual(ret, (False, 'Configuration for salt_proxy beacon' + ' must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = salt_proxy.validate(config) + + self.assertEqual(ret, (False, 'Configuration for salt_proxy ' + 'beacon requires proxies.')) + + def test_salt_proxy_running(self): + mock = MagicMock(return_value={'result': True}) + with patch.dict(salt_proxy.__salt__, {'salt_proxy.is_running': mock}): + config = [{'proxies': {'p8000': ''}}] + + ret = salt_proxy.validate(config) + + ret = salt_proxy.beacon(config) + self.assertEqual(ret, [{'p8000': 'Proxy p8000 is already running'}]) + + def test_salt_proxy_not_running(self): + is_running_mock = MagicMock(return_value={'result': False}) + configure_mock = MagicMock(return_value={'result': True, + 'changes': {'new': 'Salt Proxy: Started proxy process for p8000', + 'old': []}}) + cmd_run_mock = MagicMock(return_value={'pid': 1000, + 'retcode': 0, + 'stderr': '', + 'stdout': ''}) + with patch.dict(salt_proxy.__salt__, + {'salt_proxy.is_running': is_running_mock}), \ + patch.dict(salt_proxy.__salt__, + {'salt_proxy.configure_proxy': configure_mock}), \ + patch.dict(salt_proxy.__salt__, + {'cmd.run_all': cmd_run_mock}): + config = [{'proxies': {'p8000': ''}}] + + ret = salt_proxy.validate(config) + + ret = salt_proxy.beacon(config) + self.assertEqual(ret, [{'p8000': 'Proxy p8000 was started'}]) diff --git a/tests/unit/beacons/test_sensehat_beacon.py b/tests/unit/beacons/test_sensehat_beacon.py new file mode 100644 index 0000000000..bd36ee500c --- /dev/null +++ b/tests/unit/beacons/test_sensehat_beacon.py @@ -0,0 +1,104 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.sensehat as sensehat + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class SensehatBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.[s] + ''' + + def setup_loader_modules(self): + + self.HUMIDITY_MOCK = MagicMock(return_value=80) + self.TEMPERATURE_MOCK = MagicMock(return_value=30) + self.PRESSURE_MOCK = MagicMock(return_value=1500) + + self.addCleanup(delattr, self, 'HUMIDITY_MOCK') + self.addCleanup(delattr, self, 'TEMPERATURE_MOCK') + self.addCleanup(delattr, self, 'PRESSURE_MOCK') + + return { + sensehat: { + '__context__': {}, + '__salt__': {'sensehat.get_humidity': self.HUMIDITY_MOCK, + 'sensehat.get_temperature': self.TEMPERATURE_MOCK, + 'sensehat.get_pressure': self.PRESSURE_MOCK + }, + } + } + + def test_non_list_config(self): + config = {} + + ret = sensehat.validate(config) + + self.assertEqual(ret, (False, 'Configuration for sensehat beacon' + ' must be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = sensehat.validate(config) + + self.assertEqual(ret, (False, 'Configuration for sensehat ' + 'beacon requires sensors.')) + + def test_sensehat_humidity_match(self): + + config = [{'sensors': {'humidity': '70%'}}] + + ret = sensehat.validate(config) + + ret = sensehat.beacon(config) + self.assertEqual(ret, [{'tag': 'sensehat/humidity', + 'humidity': 80}]) + + def test_sensehat_temperature_match(self): + + config = [{'sensors': {'temperature': 20}}] + + ret = sensehat.validate(config) + + ret = sensehat.beacon(config) + self.assertEqual(ret, [{'tag': 'sensehat/temperature', + 'temperature': 30}]) + + def test_sensehat_temperature_match_range(self): + + config = [{'sensors': {'temperature': [20, 29]}}] + + ret = sensehat.validate(config) + + ret = sensehat.beacon(config) + self.assertEqual(ret, [{'tag': 'sensehat/temperature', + 'temperature': 30}]) + + def test_sensehat_pressure_match(self): + + config = [{'sensors': {'pressure': '1400'}}] + + ret = sensehat.validate(config) + + ret = sensehat.beacon(config) + self.assertEqual(ret, [{'tag': 'sensehat/pressure', + 'pressure': 1500}]) + + def test_sensehat_no_match(self): + + config = [{'sensors': {'pressure': '1600'}}] + + ret = sensehat.validate(config) + + ret = sensehat.beacon(config) + self.assertEqual(ret, []) diff --git a/tests/unit/beacons/test_service_beacon.py b/tests/unit/beacons/test_service_beacon.py new file mode 100644 index 0000000000..3f9ac318dc --- /dev/null +++ b/tests/unit/beacons/test_service_beacon.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import +from collections import namedtuple + +# Salt testing libs +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock +from tests.support.mixins import LoaderModuleMockMixin + +# Salt libs +import salt.beacons.service as service_beacon + +PATCH_OPTS = dict(autospec=True, spec_set=True) + +FakeProcess = namedtuple('Process', 'cmdline pid') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ServiceBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.service + ''' + + def setup_loader_modules(self): + return { + service_beacon: { + '__context__': {}, + '__salt__': {}, + } + } + + def test_non_list_config(self): + config = {} + + ret = service_beacon.validate(config) + + self.assertEqual(ret, (False, 'Configuration for service beacon must' + ' be a list.')) + + def test_empty_config(self): + config = [{}] + + ret = service_beacon.validate(config) + + self.assertEqual(ret, (False, 'Configuration for service ' + 'beacon requires services.')) + + def test_service_running(self): + with patch.dict(service_beacon.__salt__, + {'service.status': MagicMock(return_value=True)}): + config = [{'services': {'salt-master': {}}}] + + ret = service_beacon.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = service_beacon.beacon(config) + self.assertEqual(ret, [{'service_name': 'salt-master', + 'tag': 'salt-master', + 'salt-master': {'running': True}}]) + + def test_service_not_running(self): + with patch.dict(service_beacon.__salt__, + {'service.status': MagicMock(return_value=False)}): + config = [{'services': {'salt-master': {}}}] + + ret = service_beacon.validate(config) + + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + ret = service_beacon.beacon(config) + self.assertEqual(ret, [{'service_name': 'salt-master', + 'tag': 'salt-master', + 'salt-master': {'running': False}}]) diff --git a/tests/unit/beacons/test_status.py b/tests/unit/beacons/test_status_beacon.py similarity index 100% rename from tests/unit/beacons/test_status.py rename to tests/unit/beacons/test_status_beacon.py diff --git a/tests/unit/beacons/test_telegram_bot_msg_beacon.py b/tests/unit/beacons/test_telegram_bot_msg_beacon.py index 06efe6333e..837f0ac7bc 100644 --- a/tests/unit/beacons/test_telegram_bot_msg_beacon.py +++ b/tests/unit/beacons/test_telegram_bot_msg_beacon.py @@ -19,6 +19,9 @@ try: except ImportError: HAS_TELEGRAM = False +import logging +log = logging.getLogger(__name__) + @skipIf(not HAS_TELEGRAM, 'telegram is not available') @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -30,49 +33,49 @@ class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin): return {telegram_bot_msg: {}} def test_validate_empty_config(self, *args, **kwargs): - ret = telegram_bot_msg.__validate__(None) + ret = telegram_bot_msg.validate(None) self.assertEqual(ret, (False, ('Configuration for telegram_bot_msg ' - 'beacon must be a dictionary.'))) + 'beacon must be a list.'))) def test_validate_missing_accept_from_config(self, *args, **kwargs): - ret = telegram_bot_msg.__validate__({ + ret = telegram_bot_msg.validate([{ 'token': 'bcd' - }) + }]) self.assertEqual(ret, (False, ('Not all required configuration for ' 'telegram_bot_msg are set.'))) def test_validate_missing_token_config(self, *args, **kwargs): - ret = telegram_bot_msg.__validate__({ + ret = telegram_bot_msg.validate([{ 'accept_from': [] - }) + }]) self.assertEqual(ret, (False, ('Not all required configuration for ' 'telegram_bot_msg are set.'))) def test_validate_config_not_list_in_accept_from(self, *args, **kwargs): - ret = telegram_bot_msg.__validate__({ + ret = telegram_bot_msg.validate([{ 'token': 'bcd', 'accept_from': {'nodict': "1"} - }) + }]) self.assertEqual(ret, (False, ('Configuration for telegram_bot_msg, ' 'accept_from must be a list of ' 'usernames.'))) def test_validate_valid_config(self, *args, **kwargs): - ret = telegram_bot_msg.__validate__({ + ret = telegram_bot_msg.validate([{ 'token': 'bcd', 'accept_from': [ 'username' ] - }) + }]) self.assertEqual(ret, (True, 'Valid beacon configuration.')) def test_call_no_updates(self): with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api: token = 'abc' - config = { + config = [{ 'token': token, 'accept_from': ['tester'] - } + }] inst = MagicMock(name='telegram.Bot()') telegram_api.Bot = MagicMock(name='telegram', return_value=inst) inst.get_updates.return_value = [] @@ -86,18 +89,19 @@ class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin): with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api: token = 'abc' username = 'tester' - config = { + config = [{ 'token': token, 'accept_from': [username] - } + }] inst = MagicMock(name='telegram.Bot()') telegram_api.Bot = MagicMock(name='telegram', return_value=inst) + log.debug('telegram {}'.format(telegram)) username = 'different_user' - user = telegram.User(id=1, first_name='', username=username) - chat = telegram.Chat(1, 'private', username=username) + user = telegram.user.User(id=1, first_name='', username=username) + chat = telegram.chat.Chat(1, 'private', username=username) date = datetime.datetime(2016, 12, 18, 0, 0) - message = telegram.Message(1, user, date=date, chat=chat) + message = telegram.message.Message(1, user, date=date, chat=chat) update = telegram.update.Update(update_id=1, message=message) inst.get_updates.return_value = [update] @@ -111,10 +115,10 @@ class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin): with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api: token = 'abc' username = 'tester' - config = { + config = [{ 'token': token, 'accept_from': [username] - } + }] inst = MagicMock(name='telegram.Bot()') telegram_api.Bot = MagicMock(name='telegram', return_value=inst) diff --git a/tests/unit/beacons/test_twilio_txt_msg_beacon.py b/tests/unit/beacons/test_twilio_txt_msg_beacon.py new file mode 100644 index 0000000000..5223e06d28 --- /dev/null +++ b/tests/unit/beacons/test_twilio_txt_msg_beacon.py @@ -0,0 +1,165 @@ +# coding: utf-8 + +# Python libs +from __future__ import absolute_import + +# Salt libs +from salt.beacons import twilio_txt_msg + +# Salt testing libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch + +# Import 3rd Party libs +try: + import twilio + # Grab version, ensure elements are ints + twilio_version = tuple([int(x) for x in twilio.__version_info__]) + if twilio_version > (5, ): + TWILIO_5 = False + else: + TWILIO_5 = True + HAS_TWILIO = True +except ImportError: + HAS_TWILIO = False + +import logging +log = logging.getLogger(__name__) + + +class MockTwilioRestException(Exception): + ''' + Mock TwilioRestException class + ''' + def __init__(self): + self.code = 'error code' + self.msg = 'Exception error' + self.status = 'Not send' + super(MockTwilioRestException, self).__init__(self.msg) + + +class MockMessages(object): + ''' + Mock SMS class + ''' + flag = None + + def __init__(self): + self.sid = '011' + self.price = '200' + self.price_unit = '1' + self.status = 'Sent' + self.num_segments = '2' + self.num_media = '0' + self.body = None + self.date_sent = '01-01-2015' + self.date_created = '01-01-2015' + self.to = None + self.from_ = None + + def create(self, body, to, from_): + ''' + Mock create method + ''' + msg = MockMessages() + if self.flag == 1: + raise MockTwilioRestException() + msg.body = body + msg.to = to + msg.from_ = from_ + return msg + + def list(self, to): + ''' + Mock list method + ''' + msg = MockMessages() + return [msg] + + def delete(self): + ''' + Mock delete method + ''' + return None + + +class MockSMS(object): + ''' + Mock SMS class + ''' + def __init__(self): + self.messages = MockMessages() + + +class MockTwilioRestClient(object): + ''' + Mock TwilioRestClient class + ''' + def __init__(self): + if TWILIO_5: + self.sms = MockSMS() + else: + self.messages = MockMessages() + + +@skipIf(not HAS_TWILIO, 'twilio.rest is not available') +@skipIf(NO_MOCK, NO_MOCK_REASON) +class TwilioMsgTxtBeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test case for salt.beacons.twilio_txt_msg + ''' + def setup_loader_modules(self): + return {twilio_txt_msg: {}} + + def test_validate_dictionary_config(self): + ''' + Test empty configuration + ''' + config = {} + ret = twilio_txt_msg.validate(config) + self.assertEqual(ret, (False, ('Configuration for twilio_txt_msg ' + 'beacon must be a list.'))) + + def test_validate_empty_config(self): + ''' + Test empty configuration + ''' + config = [{}] + ret = twilio_txt_msg.validate(config) + self.assertEqual(ret, (False, ('Configuration for twilio_txt_msg ' + 'beacon must contain account_sid, ' + 'auth_token and twilio_number items.'))) + + def test_validate_missing_config_item(self): + ''' + Test empty configuration + ''' + config = [{'account_sid': 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', + 'twilio_number': '+15555555555'}] + + ret = twilio_txt_msg.validate(config) + self.assertEqual(ret, (False, ('Configuration for twilio_txt_msg ' + 'beacon must contain account_sid, ' + 'auth_token and twilio_number items.'))) + + def test_receive_message(self): + ''' + Test receive a message + ''' + config = [{'account_sid': 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', + 'auth_token': 'my_token', + 'twilio_number': '+15555555555'}] + + ret = twilio_txt_msg.validate(config) + self.assertEqual(ret, (True, 'Valid beacon configuration')) + + _expected_return = [{'texts': [{'body': 'None', + 'images': [], + 'from': 'None', + 'id': '011', + 'sent': '01-01-2015'}]}] + mock = MagicMock(return_value=MockTwilioRestClient()) + with patch.object(twilio_txt_msg, 'TwilioRestClient', mock): + ret = twilio_txt_msg.beacon(config) + self.assertEqual(ret, _expected_return) diff --git a/tests/unit/cache/test_localfs.py b/tests/unit/cache/test_localfs.py index 0c754cd668..f3bfbf04f4 100644 --- a/tests/unit/cache/test_localfs.py +++ b/tests/unit/cache/test_localfs.py @@ -26,7 +26,7 @@ import salt.cache.localfs as localfs from salt.exceptions import SaltCacheError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/config/test_api.py b/tests/unit/config/test_api.py index b569a25e06..a7c55b18f5 100644 --- a/tests/unit/config/test_api.py +++ b/tests/unit/config/test_api.py @@ -18,14 +18,14 @@ from tests.support.mock import ( # Import Salt libs import salt.config -import salt.utils +import salt.utils.platform MOCK_MASTER_DEFAULT_OPTS = { 'log_file': '/var/log/salt/master', 'pidfile': '/var/run/salt-master.pid', 'root_dir': '/' } -if salt.utils.is_windows(): +if salt.utils.platform.is_windows(): MOCK_MASTER_DEFAULT_OPTS = { 'log_file': 'c:\\salt\\var\\log\\salt\\master', 'pidfile': 'c:\\salt\\var\\run\\salt-master.pid', @@ -55,7 +55,7 @@ class APIConfigTestCase(TestCase): with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): expected = '/var/log/salt/api' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): expected = 'c:\\salt\\var\\log\\salt\\api' ret = salt.config.api_config('/some/fake/path') @@ -70,7 +70,7 @@ class APIConfigTestCase(TestCase): with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): expected = '/var/run/salt-api.pid' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): expected = 'c:\\salt\\var\\run\\salt-api.pid' ret = salt.config.api_config('/some/fake/path') @@ -85,7 +85,7 @@ class APIConfigTestCase(TestCase): ''' foo_dir = '/foo/bar/baz' hello_dir = '/hello/world' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): foo_dir = 'c:\\foo\\bar\\baz' hello_dir = 'c:\\hello\\world' @@ -119,7 +119,7 @@ class APIConfigTestCase(TestCase): mock_master_config = MOCK_MASTER_DEFAULT_OPTS.copy() mock_master_config['root_dir'] = '/mock/root/' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): mock_log = 'c:\\mock\\root\\var\\log\\salt\\api' mock_pid = 'c:\\mock\\root\\var\\run\\salt-api.pid' mock_master_config['root_dir'] = 'c:\\mock\\root' diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index c88d8d2fd1..878d3db4c9 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -109,7 +109,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): fpath = tempfile.mktemp() temp_config = 'root_dir: /\n'\ 'key_logfile: key\n' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): temp_config = 'root_dir: c:\\\n'\ 'key_logfile: key\n' try: @@ -119,7 +119,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): config = sconfig.master_config(fpath) expect_path_join = os.path.join('/', 'key') expect_sep_join = '//key' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): expect_path_join = os.path.join('c:\\', 'key') expect_sep_join = 'c:\\\\key' @@ -168,7 +168,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): shutil.rmtree(tempdir) @skipIf( - salt.utils.is_windows(), + salt.utils.platform.is_windows(), 'You can\'t set an environment dynamically in Windows') def test_load_master_config_from_environ_var(self): original_environ = os.environ.copy() @@ -215,7 +215,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): shutil.rmtree(tempdir) @skipIf( - salt.utils.is_windows(), + salt.utils.platform.is_windows(), 'You can\'t set an environment dynamically in Windows') def test_load_minion_config_from_environ_var(self): original_environ = os.environ.copy() @@ -605,7 +605,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): search_paths = sconfig.cloud_config('/etc/salt/cloud').get('deploy_scripts_search_path') etc_deploy_path = '/salt/cloud.deploy.d' deploy_path = '/salt/cloud/deploy' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): etc_deploy_path = '/salt\\cloud.deploy.d' deploy_path = '\\salt\\cloud\\deploy' @@ -1027,7 +1027,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): # other cloud configuration tests @skipIf( - salt.utils.is_windows(), + salt.utils.platform.is_windows(), 'You can\'t set an environment dynamically in Windows') def test_load_cloud_config_from_environ_var(self): original_environ = os.environ.copy() @@ -1086,7 +1086,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): default_config = sconfig.cloud_config(config_file_path) default_config['deploy_scripts_search_path'] = deploy_dir_path with salt.utils.files.fopen(config_file_path, 'w') as cfd: - cfd.write(yaml.dump(default_config)) + cfd.write(yaml.dump(default_config, default_flow_style=False)) default_config = sconfig.cloud_config(config_file_path) diff --git a/tests/unit/fileserver/test_fileclient.py b/tests/unit/fileserver/test_fileclient.py index 42023fb23a..38db095b98 100644 --- a/tests/unit/fileserver/test_fileclient.py +++ b/tests/unit/fileserver/test_fileclient.py @@ -19,7 +19,7 @@ from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON # Import salt libs import salt.utils.files from salt import fileclient -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) diff --git a/tests/unit/fileserver/test_roots.py b/tests/unit/fileserver/test_roots.py index 504cc5a1b1..4956fdac7a 100644 --- a/tests/unit/fileserver/test_roots.py +++ b/tests/unit/fileserver/test_roots.py @@ -15,11 +15,11 @@ from tests.support.paths import FILES, TMP, TMP_STATE_TREE from tests.support.unit import TestCase, skipIf from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON -# Import salt libs -from salt.fileserver import roots -from salt import fileclient -import salt.utils +# Import Salt libs +import salt.fileserver.roots +import salt.fileclient import salt.utils.files +import salt.utils.platform try: import win32file @@ -37,14 +37,14 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix empty_dir = os.path.join(TMP_STATE_TREE, 'empty_dir') if not os.path.isdir(empty_dir): os.makedirs(empty_dir) - return {roots: {'__opts__': self.opts}} + return {salt.fileserver.roots: {'__opts__': self.opts}} @classmethod def setUpClass(cls): ''' Create special file_roots for symlink test on Windows ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): root_dir = tempfile.mkdtemp(dir=TMP) source_sym = os.path.join(root_dir, 'source_sym') with salt.utils.files.fopen(source_sym, 'w') as fp_: @@ -64,7 +64,7 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix ''' Remove special file_roots for symlink test ''' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): try: salt.utils.files.rm_rf(cls.test_symlink_list_file_roots['base'][0]) except OSError: @@ -74,25 +74,25 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix del self.opts def test_file_list(self): - ret = roots.file_list({'saltenv': 'base'}) + ret = salt.fileserver.roots.file_list({'saltenv': 'base'}) self.assertIn('testfile', ret) def test_find_file(self): - ret = roots.find_file('testfile') + ret = salt.fileserver.roots.find_file('testfile') self.assertEqual('testfile', ret['rel']) full_path_to_file = os.path.join(FILES, 'file', 'base', 'testfile') self.assertEqual(full_path_to_file, ret['path']) def test_serve_file(self): - with patch.dict(roots.__opts__, {'file_buffer_size': 262144}): + with patch.dict(salt.fileserver.roots.__opts__, {'file_buffer_size': 262144}): load = {'saltenv': 'base', 'path': os.path.join(FILES, 'file', 'base', 'testfile'), 'loc': 0 } fnd = {'path': os.path.join(FILES, 'file', 'base', 'testfile'), 'rel': 'testfile'} - ret = roots.serve_file(load, fnd) + ret = salt.fileserver.roots.serve_file(load, fnd) data = 'Scene 24\n\n \n OLD MAN: Ah, hee he he ha!\n ' \ 'ARTHUR: And this enchanter of whom you speak, he ' \ @@ -107,7 +107,7 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix 'OLD MAN: Seek you the Bridge of Death.\n ARTHUR: ' \ 'The Bridge of Death, which leads to the Grail?\n ' \ 'OLD MAN: Hee hee ha ha!\n\n' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): data = 'Scene 24\r\n\r\n \r\n OLD MAN: Ah, hee he he ' \ 'ha!\r\n ARTHUR: And this enchanter of whom you ' \ 'speak, he has seen the grail?\r\n OLD MAN: Ha ha ' \ @@ -141,12 +141,12 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix 'path': os.path.join(FILES, 'file', 'base', 'testfile'), 'rel': 'testfile' } - ret = roots.file_hash(load, fnd) + ret = salt.fileserver.roots.file_hash(load, fnd) # Hashes are different in Windows. May be how git translates line # endings hsum = 'baba5791276eb99a7cc498fb1acfbc3b4bd96d24cfe984b4ed6b5be2418731df' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): hsum = '754aa260e1f3e70f43aaf92149c7d1bad37f708c53304c37660e628d7553f687' self.assertDictEqual( @@ -158,17 +158,17 @@ class RootsTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMix ) def test_file_list_emptydirs(self): - ret = roots.file_list_emptydirs({'saltenv': 'base'}) + ret = salt.fileserver.roots.file_list_emptydirs({'saltenv': 'base'}) self.assertIn('empty_dir', ret) def test_dir_list(self): - ret = roots.dir_list({'saltenv': 'base'}) + ret = salt.fileserver.roots.dir_list({'saltenv': 'base'}) self.assertIn('empty_dir', ret) def test_symlink_list(self): if self.test_symlink_list_file_roots: self.opts['file_roots'] = self.test_symlink_list_file_roots - ret = roots.symlink_list({'saltenv': 'base'}) + ret = salt.fileserver.roots.symlink_list({'saltenv': 'base'}) self.assertDictEqual(ret, {'dest_sym': 'source_sym'}) @@ -184,7 +184,7 @@ class RootsLimitTraversalTest(TestCase, AdaptedConfigurationTestCaseMixin): file_client_opts = self.get_temp_config('master') file_client_opts['fileserver_limit_traversal'] = True - ret = fileclient.Client(file_client_opts).list_states('base') + ret = salt.fileclient.Client(file_client_opts).list_states('base') self.assertIn('test_deep.test', ret) self.assertIn('test_deep.a.test', ret) self.assertNotIn('test_deep.b.2.test', ret) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 1c87294c0d..5fb2d85531 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -20,10 +20,11 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils +import salt.utils.platform import salt.grains.core as core # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -34,7 +35,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {core: {}} - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_gnu_slash_linux_in_os_name(self): ''' Test to return a list of all enabled services @@ -68,7 +69,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): return orig_import(name, *args) # Skip the first if statement - with patch.object(salt.utils, 'is_proxy', + with patch.object(salt.utils.platform, 'is_proxy', MagicMock(return_value=False)): # Skip the selinux/systemd stuff (not pertinent) with patch.object(core, '_linux_bin_exists', @@ -124,7 +125,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(os_grains.get('os_family'), 'Debian') - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_from_cpe_data(self): ''' Test if 'os' grain is parsed from CPE_NAME of /etc/os-release @@ -165,7 +166,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): return orig_import(name, *args) # Skip the first if statement - with patch.object(salt.utils, 'is_proxy', + with patch.object(salt.utils.platform, 'is_proxy', MagicMock(return_value=False)): # Skip the selinux/systemd stuff (not pertinent) with patch.object(core, '_linux_bin_exists', @@ -213,7 +214,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): return orig_import(name, *args) # Skip the first if statement - with patch.object(salt.utils, 'is_proxy', + with patch.object(salt.utils.platform, 'is_proxy', MagicMock(return_value=False)): # Skip the selinux/systemd stuff (not pertinent) with patch.object(core, '_linux_bin_exists', @@ -249,7 +250,7 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): self.assertListEqual(list(os_grains.get('osrelease_info')), os_release_map['osrelease_info']) self.assertEqual(os_grains.get('osmajorrelease'), os_release_map['osmajorrelease']) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_sles11sp3(self): ''' Test if OS grains are parsed correctly in SLES 11 SP3 @@ -268,7 +269,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_sles11sp4(self): ''' Test if OS grains are parsed correctly in SLES 11 SP4 @@ -292,7 +293,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_sles12(self): ''' Test if OS grains are parsed correctly in SLES 12 @@ -316,7 +317,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_sles12sp1(self): ''' Test if OS grains are parsed correctly in SLES 12 SP1 @@ -340,7 +341,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_opensuse_leap_42_1(self): ''' Test if OS grains are parsed correctly in openSUSE Leap 42.1 @@ -364,7 +365,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_suse_os_grains_tumbleweed(self): ''' Test if OS grains are parsed correctly in openSUSE Tumbleweed @@ -388,7 +389,7 @@ PATCHLEVEL = 3 } self._run_suse_os_grains_tests(_os_release_map) - @skipIf(not salt.utils.is_linux(), 'System is not Linux') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_ubuntu_os_grains(self): ''' Test if OS grains are parsed correctly in Ubuntu Xenial Xerus @@ -429,7 +430,7 @@ PATCHLEVEL = 3 return orig_import(name, *args) # Skip the first if statement - with patch.object(salt.utils, 'is_proxy', + with patch.object(salt.utils.platform, 'is_proxy', MagicMock(return_value=False)): # Skip the selinux/systemd stuff (not pertinent) with patch.object(core, '_linux_bin_exists', diff --git a/tests/unit/loader/test_globals.py b/tests/unit/loader/test_globals.py index ba2aef6b62..9935188e46 100644 --- a/tests/unit/loader/test_globals.py +++ b/tests/unit/loader/test_globals.py @@ -18,7 +18,7 @@ import inspect import yaml # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class LoaderGlobalsTest(ModuleCase): diff --git a/tests/unit/loader/test_interfaces.py b/tests/unit/loader/test_interfaces.py index c2125b9303..c589f27140 100644 --- a/tests/unit/loader/test_interfaces.py +++ b/tests/unit/loader/test_interfaces.py @@ -13,7 +13,7 @@ from __future__ import absolute_import from tests.support.unit import TestCase # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.config import minion_config import salt.loader diff --git a/tests/unit/loader/test_loader.py b/tests/unit/loader/test_loader.py index 29c44ee4e3..2403dbba21 100644 --- a/tests/unit/loader/test_loader.py +++ b/tests/unit/loader/test_loader.py @@ -25,10 +25,10 @@ from tests.support.paths import TMP # Import Salt libs import salt.config -import salt.utils import salt.utils.files +import salt.utils.stringutils # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: enable=no-name-in-module,redefined-builtin @@ -349,7 +349,7 @@ class LazyLoaderReloadingTest(TestCase): self.count += 1 with salt.utils.files.fopen(self.module_path, 'wb') as fh: fh.write( - salt.utils.to_bytes( + salt.utils.stringutils.to_bytes( module_template.format(count=self.count) ) ) @@ -486,7 +486,7 @@ class LazyLoaderVirtualAliasTest(TestCase): def update_module(self): with salt.utils.files.fopen(self.module_path, 'wb') as fh: - fh.write(salt.utils.to_bytes(virtual_alias_module_template)) + fh.write(salt.utils.stringutils.to_bytes(virtual_alias_module_template)) fh.flush() os.fsync(fh.fileno()) # flush to disk @@ -581,7 +581,7 @@ class LazyLoaderSubmodReloadingTest(TestCase): self.count += 1 with salt.utils.files.fopen(self.module_path, 'wb') as fh: fh.write( - salt.utils.to_bytes( + salt.utils.stringutils.to_bytes( submodule_template.format(self.module_name, count=self.count) ) ) @@ -605,7 +605,7 @@ class LazyLoaderSubmodReloadingTest(TestCase): del sys.modules[modname] with salt.utils.files.fopen(self.lib_path, 'wb') as fh: fh.write( - salt.utils.to_bytes( + salt.utils.stringutils.to_bytes( submodule_lib_template.format(count=self.lib_count) ) ) @@ -740,7 +740,7 @@ class LazyLoaderModulePackageTest(TestCase): if not os.path.exists(dirname): os.makedirs(dirname) with salt.utils.files.fopen(pyfile, 'wb') as fh: - fh.write(salt.utils.to_bytes(contents)) + fh.write(salt.utils.stringutils.to_bytes(contents)) fh.flush() os.fsync(fh.fileno()) # flush to disk @@ -884,7 +884,7 @@ class LazyLoaderDeepSubmodReloadingTest(TestCase): self.lib_count[lib_name] += 1 with salt.utils.files.fopen(path, 'wb') as fh: fh.write( - salt.utils.to_bytes( + salt.utils.stringutils.to_bytes( submodule_lib_template.format(count=self.lib_count[lib_name]) ) ) diff --git a/tests/unit/modules/test_archive.py b/tests/unit/modules/test_archive.py index 778083c436..edf5f7f7d3 100644 --- a/tests/unit/modules/test_archive.py +++ b/tests/unit/modules/test_archive.py @@ -18,8 +18,8 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs import salt.modules.archive as archive +import salt.utils.path from salt.exceptions import CommandNotFoundError -from salt.utils import which_bin # Import 3rd-party libs from salt.ext.six.moves import zip @@ -45,7 +45,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_tar(self): with patch('glob.glob', lambda pathname: [pathname]): - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): ret = archive.tar( @@ -76,7 +76,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): ) def test_tar_raises_exception_if_not_found(self): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): self.assertRaises( @@ -91,7 +91,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_gzip(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): ret = archive.gzip('/tmp/something-to-compress') self.assertEqual(['salt'], ret) mock.assert_called_once_with( @@ -102,7 +102,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_gzip_raises_exception_if_not_found(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertRaises( CommandNotFoundError, archive.gzip, '/tmp/something-to-compress' @@ -112,7 +112,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_gunzip(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): ret = archive.gunzip('/tmp/something-to-decompress.tar.gz') self.assertEqual(['salt'], ret) mock.assert_called_once_with( @@ -123,7 +123,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_gunzip_raises_exception_if_not_found(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertRaises( CommandNotFoundError, archive.gunzip, @@ -133,7 +133,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_cmd_zip(self): with patch('glob.glob', lambda pathname: [pathname]): - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): ret = archive.cmd_zip( @@ -179,7 +179,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_zip_raises_exception_if_not_found(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertRaises( CommandNotFoundError, archive.cmd_zip, @@ -201,7 +201,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'pid': 12345, 'retcode': 0}) - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): mock = _get_mock() with patch.dict(archive.__salt__, {'cmd.run_all': mock}): @@ -317,7 +317,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_unzip_raises_exception_if_not_found(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertRaises( CommandNotFoundError, archive.cmd_unzip, @@ -330,7 +330,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_rar(self): with patch('glob.glob', lambda pathname: [pathname]): - with patch('salt.utils.which', lambda exe: exe): + with patch('salt.utils.path.which', lambda exe: exe): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): ret = archive.rar( @@ -360,7 +360,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): def test_rar_raises_exception_if_not_found(self): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertRaises( CommandNotFoundError, archive.rar, @@ -369,10 +369,10 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): ) self.assertFalse(mock.called) - @skipIf(which_bin(('unrar', 'rar')) is None, 'unrar not installed') + @skipIf(salt.utils.path.which_bin(('unrar', 'rar')) is None, 'unrar not installed') def test_unrar(self): - with patch('salt.utils.which_bin', lambda exe: exe[0] if isinstance(exe, (list, tuple)) else exe): - with patch('salt.utils.which', lambda exe: exe[0] if isinstance(exe, (list, tuple)) else exe): + with patch('salt.utils.path.which_bin', lambda exe: exe[0] if isinstance(exe, (list, tuple)) else exe): + with patch('salt.utils.path.which', lambda exe: exe[0] if isinstance(exe, (list, tuple)) else exe): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): ret = archive.unrar( @@ -402,7 +402,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): ) def test_unrar_raises_exception_if_not_found(self): - with patch('salt.utils.which_bin', lambda exe: None): + with patch('salt.utils.path.which_bin', lambda exe: None): mock = MagicMock(return_value='salt') with patch.dict(archive.__salt__, {'cmd.run': mock}): self.assertRaises( @@ -414,7 +414,7 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): self.assertFalse(mock.called) def test_trim_files(self): - with patch('salt.utils.which_bin', lambda exe: exe): + with patch('salt.utils.path.which_bin', lambda exe: exe): source = 'file.tar.gz' tmp_dir = 'temp' files = [ diff --git a/tests/unit/modules/test_at.py b/tests/unit/modules/test_at.py index b368aba88e..f27d3be7a1 100644 --- a/tests/unit/modules/test_at.py +++ b/tests/unit/modules/test_at.py @@ -17,7 +17,7 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.utils +import salt.utils.path import salt.modules.at as at @@ -99,21 +99,21 @@ class AtTestCase(TestCase, LoaderModuleMockMixin): Tests for remove jobs from the queue. """ with patch('salt.modules.at.atq', MagicMock(return_value=self.atq_output)): - with patch.object(salt.utils, 'which', return_value=None): + with patch.object(salt.utils.path, 'which', return_value=None): self.assertEqual(at.atrm(), "'at.atrm' is not available.") - with patch.object(salt.utils, 'which', return_value=True): + with patch.object(salt.utils.path, 'which', return_value=True): self.assertDictEqual(at.atrm(), {'jobs': {'removed': [], 'tag': None}}) with patch.object(at, '_cmd', return_value=True): - with patch.object(salt.utils, 'which', return_value=True): + with patch.object(salt.utils.path, 'which', return_value=True): self.assertDictEqual(at.atrm('all'), {'jobs': {'removed': ['101'], 'tag': None}}) with patch.object(at, '_cmd', return_value=True): - with patch.object(salt.utils, 'which', return_value=True): + with patch.object(salt.utils.path, 'which', return_value=True): self.assertDictEqual(at.atrm(101), {'jobs': {'removed': ['101'], 'tag': None}}) @@ -149,11 +149,11 @@ class AtTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.at.atq', MagicMock(return_value=self.atq_output)): self.assertDictEqual(at.at(), {'jobs': []}) - with patch.object(salt.utils, 'which', return_value=None): + with patch.object(salt.utils.path, 'which', return_value=None): self.assertEqual(at.at('12:05am', '/sbin/reboot', tag='reboot'), "'at.at' is not available.") - with patch.object(salt.utils, 'which', return_value=True): + with patch.object(salt.utils.path, 'which', return_value=True): with patch.dict(at.__grains__, {'os_family': 'RedHat'}): mock = MagicMock(return_value=None) with patch.dict(at.__salt__, {'cmd.run': mock}): diff --git a/tests/unit/modules/test_augeas_cfg.py b/tests/unit/modules/test_augeas_cfg.py index 6236ac10cd..a2d40ac986 100644 --- a/tests/unit/modules/test_augeas_cfg.py +++ b/tests/unit/modules/test_augeas_cfg.py @@ -15,7 +15,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.augeas_cfg as augeas_cfg from salt.exceptions import SaltInvocationError -import salt.ext.six as six +from salt.ext import six # Make sure augeas python interface is installed if augeas_cfg.HAS_AUGEAS: from augeas import Augeas as _Augeas diff --git a/tests/unit/modules/test_beacons.py b/tests/unit/modules/test_beacons.py new file mode 100644 index 0000000000..708221d638 --- /dev/null +++ b/tests/unit/modules/test_beacons.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Jayesh Kariya ` +''' + +# Import Python Libs +from __future__ import absolute_import +import os + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.paths import TMP +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt Libs +import salt.modules.beacons as beacons +from salt.utils.event import SaltEvent + +SOCK_DIR = os.path.join(TMP, 'test-socks') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class BeaconsTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.beacons + ''' + def setup_loader_modules(self): + return {beacons: {}} + + def test_delete(self): + ''' + Test deleting a beacon. + ''' + comm1 = 'Deleted beacon: ps.' + event_returns = [ + {'complete': True, + 'tag': '/salt/minion/minion_beacons_delete_complete', + 'beacons': {}}, + ] + + with patch.dict(beacons.__opts__, {'beacons': {'ps': [{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]}, 'sock_dir': SOCK_DIR}): + mock = MagicMock(return_value=True) + with patch.dict(beacons.__salt__, {'event.fire': mock}): + with patch.object(SaltEvent, 'get_event', side_effect=event_returns): + self.assertDictEqual(beacons.delete('ps'), + {'comment': comm1, 'result': True}) + + def test_add(self): + ''' + Test adding a beacon + ''' + comm1 = 'Added beacon: ps.' + event_returns = [{'complete': True, + 'tag': '/salt/minion/minion_beacons_list_complete', + 'beacons': {}}, + {'complete': True, + 'tag': '/salt/minion/minion_beacon_add_complete', + 'beacons': {'ps': [{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]}}] + + with patch.dict(beacons.__opts__, {'beacons': {}, 'sock_dir': SOCK_DIR}): + mock = MagicMock(return_value=True) + with patch.dict(beacons.__salt__, {'event.fire': mock}): + with patch.object(SaltEvent, 'get_event', side_effect=event_returns): + self.assertDictEqual(beacons.add('ps', [{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]), + {'comment': comm1, 'result': True}) + + def test_save(self): + ''' + Test saving beacons. + ''' + comm1 = 'Beacons saved to //tmp/beacons.conf.' + with patch.dict(beacons.__opts__, {'config_dir': '', 'beacons': {}, + 'default_include': '/tmp/', + 'sock_dir': SOCK_DIR}): + + mock = MagicMock(return_value=True) + with patch.dict(beacons.__salt__, {'event.fire': mock}): + _ret_value = {'complete': True, 'beacons': {}} + with patch.object(SaltEvent, 'get_event', return_value=_ret_value): + self.assertDictEqual(beacons.save(), + {'comment': comm1, 'result': True}) + + def test_disable(self): + ''' + Test disabling beacons + ''' + comm1 = 'Disabled beacons on minion.' + event_returns = [{'complete': True, + 'tag': '/salt/minion/minion_beacons_disabled_complete', + 'beacons': {'enabled': False, + 'ps': [{'processes': {'salt-master': 'stopped', + 'apache2': 'stopped'}}]}}] + + with patch.dict(beacons.__opts__, {'beacons': {}, 'sock_dir': SOCK_DIR}): + mock = MagicMock(return_value=True) + with patch.dict(beacons.__salt__, {'event.fire': mock}): + with patch.object(SaltEvent, 'get_event', side_effect=event_returns): + self.assertDictEqual(beacons.disable(), + {'comment': comm1, 'result': True}) + + def test_enable(self): + ''' + Test enabling beacons + ''' + comm1 = 'Enabled beacons on minion.' + event_returns = [{'complete': True, + 'tag': '/salt/minion/minion_beacon_enabled_complete', + 'beacons': {'enabled': True, + 'ps': [{'processes': {'salt-master': 'stopped', + 'apache2': 'stopped'}}]}}] + + with patch.dict(beacons.__opts__, {'beacons': {}, 'sock_dir': SOCK_DIR}): + mock = MagicMock(return_value=True) + with patch.dict(beacons.__salt__, {'event.fire': mock}): + with patch.object(SaltEvent, 'get_event', side_effect=event_returns): + self.assertDictEqual(beacons.enable(), + {'comment': comm1, 'result': True}) diff --git a/tests/unit/modules/test_boto_elasticsearch_domain.py b/tests/unit/modules/test_boto_elasticsearch_domain.py index 174bf6ebd5..41565ecbc5 100644 --- a/tests/unit/modules/test_boto_elasticsearch_domain.py +++ b/tests/unit/modules/test_boto_elasticsearch_domain.py @@ -18,7 +18,7 @@ from tests.support.mock import ( ) # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.loader from salt.utils.versions import LooseVersion import salt.modules.boto_elasticsearch_domain as boto_elasticsearch_domain diff --git a/tests/unit/modules/test_boto_elb.py b/tests/unit/modules/test_boto_elb.py index 4582155c6f..8d6d517cc7 100644 --- a/tests/unit/modules/test_boto_elb.py +++ b/tests/unit/modules/test_boto_elb.py @@ -16,28 +16,28 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2, mock_elb + from moto import mock_ec2_deprecated, mock_elb_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_elb unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): pass return stub_function - def mock_elb(self): + def mock_elb_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_elb_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_elb unit tests to use the @mock_elb_deprecated decorator + without a "NameError: name 'mock_elb_deprecated' is not defined" error. ''' def stub_function(self): pass @@ -46,7 +46,7 @@ except ImportError: # Import Salt Libs import salt.config -import salt.ext.six as six +from salt.ext import six import salt.loader import salt.modules.boto_elb as boto_elb import salt.utils.versions @@ -114,8 +114,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): # __virtual__ must be caller in order for _get_conn to be injected boto_elb.__virtual__() - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_register_instances_valid_id_result_true(self): ''' tests that given a valid instance id and valid ELB that @@ -133,8 +133,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(True, register_result) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_register_instances_valid_id_string(self): ''' tests that given a string containing a instance id and valid ELB that @@ -156,8 +156,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): log.debug(load_balancer_refreshed.instances) self.assertEqual([reservations.instances[0].id], registered_instance_ids) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_result_true(self): ''' tests that given an valid id the boto_elb deregister_instances method @@ -177,8 +177,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(True, deregister_result) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_string(self): ''' tests that given an valid id the boto_elb deregister_instances method @@ -203,8 +203,8 @@ class BotoElbTestCase(TestCase, LoaderModuleMockMixin): load_balancer_refreshed.instances] self.assertEqual(actual_instances, expected_instances) - @mock_ec2 - @mock_elb + @mock_ec2_deprecated + @mock_elb_deprecated def test_deregister_instances_valid_id_list(self): ''' tests that given an valid ids in the form of a list that the boto_elb diff --git a/tests/unit/modules/test_boto_lambda.py b/tests/unit/modules/test_boto_lambda.py index fe81c09871..4c26a10a96 100644 --- a/tests/unit/modules/test_boto_lambda.py +++ b/tests/unit/modules/test_boto_lambda.py @@ -31,9 +31,10 @@ import salt.modules.boto_lambda as boto_lambda from salt.exceptions import SaltInvocationError from salt.utils.versions import LooseVersion import salt.utils +import salt.utils.stringutils # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # pylint: disable=import-error,no-name-in-module try: @@ -155,7 +156,7 @@ class TempZipFile(object): suffix='.zip', prefix='salt_test_', delete=False) as tmp: to_write = '###\n' if six.PY3: - to_write = salt.utils.to_bytes(to_write) + to_write = salt.utils.stringutils.to_bytes(to_write) tmp.write(to_write) self.zipfile = tmp.name return self.zipfile diff --git a/tests/unit/modules/test_boto_s3_bucket.py b/tests/unit/modules/test_boto_s3_bucket.py index 1bafb3ea7a..f3acbad072 100644 --- a/tests/unit/modules/test_boto_s3_bucket.py +++ b/tests/unit/modules/test_boto_s3_bucket.py @@ -23,7 +23,7 @@ import salt.modules.boto_s3_bucket as boto_s3_bucket from salt.utils.versions import LooseVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # pylint: disable=import-error,no-name-in-module,unused-import diff --git a/tests/unit/modules/test_boto_secgroup.py b/tests/unit/modules/test_boto_secgroup.py index 7a1d9c0197..ef90958dec 100644 --- a/tests/unit/modules/test_boto_secgroup.py +++ b/tests/unit/modules/test_boto_secgroup.py @@ -29,17 +29,17 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_secgroup unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_secgroup unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): pass @@ -117,7 +117,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): {'to_port': 80, 'from_port': 80, 'ip_protocol': u'tcp', 'cidr_ip': u'0.0.0.0/0'}] self.assertEqual(boto_secgroup._split_rules(rules), split_rules) - @mock_ec2 + @mock_ec2_deprecated def test_create_ec2_classic(self): ''' Test of creation of an EC2-Classic security group. The test ensures @@ -137,7 +137,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_created_group[0].vpc_id] self.assertEqual(expected_create_result, secgroup_create_result) - @mock_ec2 + @mock_ec2_deprecated def test_create_ec2_vpc(self): ''' test of creation of an EC2-VPC security group. The test ensures that a @@ -155,7 +155,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_create_result = [secgroup_created_group[0].name, secgroup_created_group[0].description, secgroup_created_group[0].vpc_id] self.assertEqual(expected_create_result, secgroup_create_result) - @mock_ec2 + @mock_ec2_deprecated def test_get_group_id_ec2_classic(self): ''' tests that given a name of a group in EC2-Classic that the correct @@ -177,7 +177,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): @skipIf(True, 'test skipped because moto does not yet support group' ' filters https://github.com/spulec/moto/issues/154') - @mock_ec2 + @mock_ec2_deprecated def test_get_group_id_ec2_vpc(self): ''' tests that given a name of a group in EC2-VPC that the correct @@ -197,7 +197,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): **conn_parameters) self.assertEqual(group_vpc.id, retrieved_group_id) - @mock_ec2 + @mock_ec2_deprecated def test_get_config_single_rule_group_name(self): ''' tests return of 'config' when given group name. get_config returns an OrderedDict. @@ -213,7 +213,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): group = conn.create_security_group(name=group_name, description=group_name) group.authorize(ip_protocol=ip_protocol, from_port=from_port, to_port=to_port, cidr_ip=cidr_ip) # setup the expected get_config result - expected_get_config_result = OrderedDict([('name', group.name), ('group_id', group.id), ('owner_id', u'111122223333'), + expected_get_config_result = OrderedDict([('name', group.name), ('group_id', group.id), ('owner_id', u'123456789012'), ('description', group.description), ('tags', {}), ('rules', [{'to_port': to_port, 'from_port': from_port, 'ip_protocol': ip_protocol, 'cidr_ip': cidr_ip}]), @@ -221,7 +221,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): secgroup_get_config_result = boto_secgroup.get_config(group_id=group.id, **conn_parameters) self.assertEqual(expected_get_config_result, secgroup_get_config_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_name_classic(self): ''' tests 'true' existence of a group in EC2-Classic when given name @@ -234,11 +234,11 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(name=group_name, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_name_classic(self): pass - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_name_vpc(self): ''' tests 'true' existence of a group in EC2-VPC when given name and vpc_id @@ -250,7 +250,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(name=group_name, vpc_id=vpc_id, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_name_vpc(self): ''' tests 'false' existence of a group in vpc when given name and vpc_id @@ -259,7 +259,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_name, vpc_id=vpc_id, **conn_parameters) self.assertFalse(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_true_group_id(self): ''' tests 'true' existence of a group when given group_id @@ -271,7 +271,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_id=group.id, **conn_parameters) self.assertTrue(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_exists_false_group_id(self): ''' tests 'false' existence of a group when given group_id @@ -280,7 +280,7 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): salt_exists_result = boto_secgroup.exists(group_id=group_id, **conn_parameters) self.assertFalse(salt_exists_result) - @mock_ec2 + @mock_ec2_deprecated def test_delete_group_ec2_classic(self): ''' test deletion of a group in EC2-Classic. Test does the following: @@ -306,11 +306,11 @@ class BotoSecgroupTestCase(TestCase, LoaderModuleMockMixin): actual_groups = [group.id for group in conn.get_all_security_groups()] self.assertEqual(expected_groups, actual_groups) - @mock_ec2 + @mock_ec2_deprecated def test_delete_group_name_ec2_vpc(self): pass - @mock_ec2 + @mock_ec2_deprecated def test__get_conn_true(self): ''' tests ensures that _get_conn returns an boto.ec2.connection.EC2Connection object. diff --git a/tests/unit/modules/test_boto_vpc.py b/tests/unit/modules/test_boto_vpc.py index 2f53edf5e5..cda7fa29f3 100644 --- a/tests/unit/modules/test_boto_vpc.py +++ b/tests/unit/modules/test_boto_vpc.py @@ -26,7 +26,7 @@ from salt.exceptions import SaltInvocationError, CommandExecutionError from salt.modules.boto_vpc import _maybe_set_name_tag, _maybe_set_tags # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # pylint: disable=import-error from salt.ext.six.moves import range # pylint: disable=redefined-builtin # pylint: disable=no-name-in-module,unused-import @@ -40,17 +40,17 @@ except ImportError: try: import moto - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_vpc unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): @@ -63,7 +63,7 @@ except ImportError: # which was added in boto 2.8.0 # https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12 required_boto_version = '2.8.0' -required_moto_version = '0.3.7' +required_moto_version = '1.0.0' region = 'us-east-1' access_key = 'GKTADJGHEIQSXMKKRBJ08H' @@ -272,7 +272,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): TestCase for salt.modules.boto_vpc module ''' - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via id when the vpc already exists @@ -283,7 +283,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_id_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -295,7 +295,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via name when vpc exists @@ -306,7 +306,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_name_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -318,7 +318,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via tag when vpc exists @@ -329,7 +329,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_tags_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -341,7 +341,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_exists_the_vpc_exists_method_returns_true(self): ''' Tests checking vpc existence via cidr when vpc exists @@ -352,7 +352,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_vpc_exists_by_cidr_and_a_vpc_does_not_exist_the_vpc_exists_method_returns_false( self): ''' @@ -364,7 +364,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_vpc_exists_but_providing_no_filters_the_vpc_exists_method_raises_a_salt_invocation_error(self): ''' @@ -375,7 +375,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): 'cidr or tags.'): boto_vpc.exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_name(self): ''' Tests getting vpc id when filtering by name @@ -386,7 +386,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_name(self): ''' Tests getting vpc id when filtering by invalid name @@ -397,7 +397,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_cidr(self): ''' Tests getting vpc id when filtering by cidr @@ -408,7 +408,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_cidr(self): ''' Tests getting vpc id when filtering by invalid cidr @@ -419,7 +419,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_tags(self): ''' Tests getting vpc id when filtering by tags @@ -430,7 +430,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(vpc.id, get_id_result['id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_filtering_by_invalid_tags(self): ''' Tests getting vpc id when filtering by invalid tags @@ -441,7 +441,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(get_id_result['id'], None) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_get_vpc_id_method_when_not_providing_filters_raises_a_salt_invocation_error(self): ''' @@ -450,7 +450,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): with self.assertRaisesRegex(SaltInvocationError, 'At least one of the following must be provided: vpc_id, vpc_name, cidr or tags.'): boto_vpc.get_id(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_get_vpc_id_method_when_more_than_one_vpc_is_matched_raises_a_salt_command_execution_error(self): ''' Tests getting vpc id but providing no filters @@ -461,7 +461,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): with self.assertRaisesRegex(CommandExecutionError, 'Found more than one VPC matching the criteria.'): boto_vpc.get_id(cidr=u'10.0.0.0/24', **conn_parameters) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -470,7 +470,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_and_specifying_a_vpc_name_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -479,7 +479,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_vpc_and_specifying_tags_succeeds_the_create_vpc_method_returns_true(self): ''' tests True VPC created. @@ -488,7 +488,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_a_vpc_fails_the_create_vpc_method_returns_false(self): ''' @@ -499,7 +499,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_creation_result['created']) self.assertTrue('error' in vpc_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_vpc_the_delete_vpc_method_returns_true(self): ''' Tests deleting an existing vpc @@ -510,7 +510,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_vpc_the_delete_vpc_method_returns_false(self): ''' Tests deleting a non-existent vpc @@ -519,7 +519,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(delete_vpc_result['deleted']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_true(self): ''' Tests describing parameters via vpc id if vpc exist @@ -546,7 +546,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_vpc, {'vpc': vpc_properties}) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_by_id_it_returns_the_dict_of_properties_returns_false(self): ''' Tests describing parameters via vpc id if vpc does not exist @@ -557,7 +557,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(describe_vpc['vpc']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_describing_vpc_by_id_on_connection_error_it_returns_error(self): ''' @@ -570,7 +570,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): describe_result = boto_vpc.describe(vpc_id=vpc.id, **conn_parameters) self.assertTrue('error' in describe_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_describing_vpc_but_providing_no_vpc_id_the_describe_method_raises_a_salt_invocation_error(self): ''' Tests describing vpc without vpc id @@ -588,7 +588,7 @@ class BotoVpcTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): .format(required_boto_version, _get_boto_version())) @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_single_subnet(self): ''' tests that given multiple subnet ids in the same VPC that the VPC ID is @@ -601,7 +601,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(vpc.id, subnet_association['vpc_id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_multiple_subnets_same_vpc(self): ''' tests that given multiple subnet ids in the same VPC that the VPC ID is @@ -614,7 +614,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(vpc.id, subnet_association['vpc_id']) - @mock_ec2 + @mock_ec2_deprecated def test_get_subnet_association_multiple_subnets_different_vpc(self): ''' tests that given multiple subnet ids in different VPCs that False is @@ -628,7 +628,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertEqual(set(subnet_association['vpc_ids']), set([vpc_a.id, vpc_b.id])) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully @@ -640,7 +640,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) self.assertTrue('id' in subnet_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_and_specifying_a_name_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully when specifying a name @@ -651,7 +651,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_subnet_and_specifying_tags_succeeds_the_create_subnet_method_returns_true(self): ''' Tests creating a subnet successfully when specifying a tag @@ -663,7 +663,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_a_subnet_fails_the_create_subnet_method_returns_error(self): ''' @@ -675,7 +675,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): subnet_creation_result = boto_vpc.create_subnet(vpc.id, '10.0.0.0/24', **conn_parameters) self.assertTrue('error' in subnet_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_subnet_the_delete_subnet_method_returns_true(self): ''' Tests deleting an existing subnet @@ -687,7 +687,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_deletion_result['deleted']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_subnet_the_delete_vpc_method_returns_false(self): ''' Tests deleting a subnet that doesn't exist @@ -695,7 +695,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): delete_subnet_result = boto_vpc.delete_subnet(subnet_id='1234', **conn_parameters) self.assertTrue('error' in delete_subnet_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self): ''' Tests checking if a subnet exists when it does exist @@ -707,7 +707,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self): ''' Tests checking if a subnet exists which doesn't exist @@ -716,7 +716,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_exists_method_returns_true(self): ''' Tests checking subnet existence by name @@ -728,7 +728,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_name_the_subnet_does_not_exist_the_subnet_method_returns_false(self): ''' Tests checking subnet existence by name when it doesn't exist @@ -740,7 +740,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_exists_method_returns_true(self): ''' Tests checking subnet existence by tag @@ -752,7 +752,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_checking_if_a_subnet_exists_by_tags_the_subnet_does_not_exist_the_subnet_method_returns_false(self): ''' Tests checking subnet existence by tag when subnet doesn't exist @@ -764,7 +764,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(subnet_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_subnet_exists_but_providing_no_filters_the_subnet_exists_method_raises_a_salt_invocation_error(self): ''' @@ -776,7 +776,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): boto_vpc.subnet_exists(**conn_parameters) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_id_for_existing_subnet_returns_correct_data(self): ''' Tests describing a subnet by id. @@ -791,7 +791,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnet'].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_id_for_non_existent_subnet_returns_none(self): ''' Tests describing a non-existent subnet by id. @@ -805,7 +805,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_subnet_results['subnet'], None) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_name_for_existing_subnet_returns_correct_data(self): ''' Tests describing a subnet by name. @@ -820,7 +820,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnet'].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnet_by_name_for_non_existent_subnet_returns_none(self): ''' Tests describing a non-existent subnet by id. @@ -834,7 +834,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(describe_subnet_results['subnet'], None) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnets_by_id_for_existing_subnet_returns_correct_data(self): ''' Tests describing multiple subnets by id. @@ -852,7 +852,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): set(['id', 'cidr_block', 'availability_zone', 'tags'])) @skipIf(True, 'Skip these tests while investigating failures') - @mock_ec2 + @mock_ec2_deprecated def test_that_describe_subnets_by_name_for_existing_subnets_returns_correct_data(self): ''' Tests describing multiple subnets by id. @@ -869,7 +869,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertEqual(set(describe_subnet_results['subnets'][0].keys()), set(['id', 'cidr_block', 'availability_zone', 'tags'])) - @mock_ec2 + @mock_ec2_deprecated def test_create_subnet_passes_availability_zone(self): ''' Tests that the availability_zone kwarg is passed on to _create_resource @@ -890,7 +890,7 @@ class BotoVpcSubnetsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway successfully (with no vpc id or name) @@ -901,7 +901,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): keyid=access_key) self.assertTrue(igw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_non_existent_vpc_the_create_internet_gateway_method_returns_an_error(self): ''' Tests that creating an internet gateway for a non-existent VPC fails. @@ -913,7 +913,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): vpc_name='non-existent-vpc') self.assertTrue('error' in igw_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_vpc_name_specified_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway with vpc name specified. @@ -928,7 +928,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(igw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_internet_gateway_with_vpc_id_specified_the_create_internet_gateway_method_returns_true(self): ''' Tests creating an internet gateway with vpc name specified. @@ -951,7 +951,7 @@ class BotoVpcInternetGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_the_create_nat_gateway_method_returns_true(self): ''' Tests creating an nat gateway successfully (with subnet_id specified) @@ -965,7 +965,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): keyid=access_key) self.assertTrue(ngw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_with_non_existent_subnet_the_create_nat_gateway_method_returns_an_error(self): ''' Tests that creating an nat gateway for a non-existent subnet fails. @@ -977,7 +977,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): subnet_name='non-existent-subnet') self.assertTrue('error' in ngw_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_an_nat_gateway_with_subnet_name_specified_the_create_nat_gateway_method_returns_true(self): ''' Tests creating an nat gateway with subnet name specified. @@ -1000,7 +1000,7 @@ class BotoVpcNatGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_customer_gateway_the_create_customer_gateway_method_returns_true(self): ''' @@ -1010,7 +1010,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): gw_creation_result = boto_vpc.create_customer_gateway('ipsec.1', '10.1.1.1', None) self.assertTrue(gw_creation_result.get('created')) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_checking_if_a_subnet_exists_by_id_the_subnet_exists_method_returns_true(self): ''' @@ -1021,7 +1021,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): gw_exists_result = boto_vpc.customer_gateway_exists(customer_gateway_id=gw_creation_result['id']) self.assertTrue(gw_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_a_subnet_does_not_exist_the_subnet_exists_method_returns_false(self): ''' @@ -1039,7 +1039,7 @@ class BotoVpcCustomerGatewayTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): .format(required_boto_version, _get_boto_version())) @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_succeeds_the_create_dhcp_options_method_returns_true(self): ''' Tests creating dhcp options successfully @@ -1048,7 +1048,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_dhcp_options_and_specifying_a_name_succeeds_the_create_dhcp_options_method_returns_true( self): @@ -1060,7 +1060,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_and_specifying_tags_succeeds_the_create_dhcp_options_method_returns_true( self): ''' @@ -1071,7 +1071,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_dhcp_options_fails_the_create_dhcp_options_method_returns_error(self): ''' @@ -1082,7 +1082,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = dhcp_options_creation_result = boto_vpc.create_dhcp_options(**dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_an_existing_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_true( self): ''' @@ -1096,7 +1096,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_association_result['associated']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_a_non_existent_dhcp_options_set_to_an_existing_vpc_the_associate_dhcp_options_method_returns_error( self): ''' @@ -1108,7 +1108,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in dhcp_options_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_an_existing_dhcp_options_set_to_a_non_existent_vpc_the_associate_dhcp_options_method_returns_false( self): ''' @@ -1121,7 +1121,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in dhcp_options_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_set_to_an_existing_vpc_succeeds_the_associate_new_dhcp_options_method_returns_true( self): ''' @@ -1133,7 +1133,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_creation_result['created']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_creating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception( self): @@ -1147,7 +1147,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_creating_and_associating_dhcp_options_set_to_an_existing_vpc_fails_associating_the_dhcp_options_the_associate_new_dhcp_options_method_raises_exception(self): ''' @@ -1160,7 +1160,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.associate_new_dhcp_options_to_vpc(vpc.id, **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_dhcp_options_set_to_a_non_existent_vpc_the_dhcp_options_the_associate_new_dhcp_options_method_returns_false( self): ''' @@ -1170,7 +1170,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.create_dhcp_options(vpc_name='fake', **dhcp_options_parameters) self.assertTrue('error' in r) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_dhcp_options_exists_the_dhcp_options_exists_method_returns_true(self): ''' Tests existence of dhcp options successfully @@ -1181,7 +1181,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_options_exists_result['exists']) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_dhcp_options_do_not_exist_the_dhcp_options_exists_method_returns_false(self): ''' Tests existence of dhcp options failure @@ -1189,7 +1189,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): r = boto_vpc.dhcp_options_exists('fake', **conn_parameters) self.assertFalse(r['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_dhcp_options_exists_but_providing_no_filters_the_dhcp_options_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1206,7 +1206,7 @@ class BotoVpcDHCPOptionsTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_the_create_network_acl_method_returns_true(self): ''' Tests creation of network acl with existing vpc @@ -1217,7 +1217,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_a_name_the_create_network_acl_method_returns_true( self): ''' @@ -1229,7 +1229,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_an_existing_vpc_and_specifying_tags_the_create_network_acl_method_returns_true( self): ''' @@ -1241,7 +1241,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_network_acl_for_a_non_existent_vpc_the_create_network_acl_method_returns_an_error(self): ''' Tests creation of network acl with a non-existent vpc @@ -1250,7 +1250,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_network_acl_fails_the_create_network_acl_method_returns_false(self): ''' @@ -1264,7 +1264,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_an_existing_network_acl_the_delete_network_acl_method_returns_true(self): ''' Tests deletion of existing network acl successfully @@ -1276,7 +1276,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_deleting_a_non_existent_network_acl_the_delete_network_acl_method_returns_an_error(self): ''' Tests deleting a non-existent network acl @@ -1285,7 +1285,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_network_acl_exists_the_network_acl_exists_method_returns_true(self): ''' Tests existence of network acl @@ -1297,7 +1297,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_deletion_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_a_network_acl_does_not_exist_the_network_acl_exists_method_returns_false(self): ''' Tests checking network acl does not exist @@ -1306,7 +1306,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_deletion_result['exists']) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_network_acl_exists_but_providing_no_filters_the_network_acl_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1318,7 +1318,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ): boto_vpc.dhcp_options_exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_network_acl_entry_successfully_the_create_network_acl_entry_method_returns_true(self): ''' @@ -1333,7 +1333,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_network_acl_entry_for_a_non_existent_network_acl_the_create_network_acl_entry_method_returns_false( self): @@ -1345,7 +1345,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_network_acl_entry_successfully_the_replace_network_acl_entry_method_returns_true( self): @@ -1362,7 +1362,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_network_acl_entry_for_a_non_existent_network_acl_the_replace_network_acl_entry_method_returns_false( self): @@ -1373,7 +1373,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): **conn_parameters) self.assertFalse(network_acl_entry_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_an_existing_network_acl_entry_the_delete_network_acl_entry_method_returns_true(self): ''' @@ -1388,7 +1388,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_entry_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_non_existent_network_acl_entry_the_delete_network_acl_entry_method_returns_false( self): @@ -1400,7 +1400,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_entry_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_an_existing_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_true( self): @@ -1416,7 +1416,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_associating_a_non_existent_network_acl_to_an_existing_subnet_the_associate_network_acl_method_returns_an_error( self): ''' @@ -1430,7 +1430,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_an_existing_network_acl_to_a_non_existent_subnet_the_associate_network_acl_method_returns_false( self): @@ -1445,7 +1445,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1460,7 +1460,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_a_name_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1476,7 +1476,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_subnet_and_specifying_tags_succeeds_the_associate_new_network_acl_to_subnet_method_returns_true( self): @@ -1493,7 +1493,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_and_associating_a_network_acl_to_a_non_existent_subnet_the_associate_new_network_acl_to_subnet_method_returns_false( self): @@ -1507,7 +1507,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(network_acl_creation_and_association_result) - @mock_ec2 + @mock_ec2_deprecated def test_that_when_creating_a_network_acl_to_a_non_existent_vpc_the_associate_new_network_acl_to_subnet_method_returns_an_error( self): ''' @@ -1520,7 +1520,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue('error' in network_acl_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_succeeds_the_disassociate_network_acl_method_should_return_true(self): ''' @@ -1533,7 +1533,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_for_a_non_existent_vpc_the_disassociate_network_acl_method_should_return_false( self): @@ -1547,7 +1547,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_network_acl_for_a_non_existent_subnet_the_disassociate_network_acl_method_should_return_false( self): @@ -1568,7 +1568,7 @@ class BotoVpcNetworkACLTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ' or equal to version {}. Installed: {}' .format(required_boto_version, _get_boto_version())) class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_table_succeeds_the_create_route_table_method_returns_true(self): ''' @@ -1580,7 +1580,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_table_on_a_non_existent_vpc_the_create_route_table_method_returns_false(self): ''' @@ -1590,7 +1590,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_table_succeeds_the_delete_route_table_method_returns_true(self): ''' @@ -1603,7 +1603,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_non_existent_route_table_the_delete_route_table_method_returns_false(self): ''' @@ -1613,7 +1613,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_table_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_route_table_exists_the_route_table_exists_method_returns_true(self): ''' @@ -1626,7 +1626,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_table_existence_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_route_table_does_not_exist_the_route_table_exists_method_returns_false(self): ''' @@ -1636,7 +1636,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_table_existence_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_that_when_checking_if_a_route_table_exists_but_providing_no_filters_the_route_table_exists_method_raises_a_salt_invocation_error(self): ''' @@ -1648,7 +1648,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): ): boto_vpc.dhcp_options_exists(**conn_parameters) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_succeeds_the_associate_route_table_method_should_return_the_association_id( self): @@ -1663,7 +1663,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_with_a_non_existent_route_table_the_associate_route_table_method_should_return_false( self): @@ -1677,7 +1677,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_associating_a_route_table_with_a_non_existent_subnet_the_associate_route_table_method_should_return_false( self): @@ -1691,7 +1691,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(association_id) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_disassociating_a_route_table_succeeds_the_disassociate_route_table_method_should_return_true( self): @@ -1708,7 +1708,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(dhcp_disassociate_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_succeeds_the_create_route_method_should_return_true(self): ''' @@ -1721,7 +1721,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_creating_a_route_with_a_non_existent_route_table_the_create_route_method_should_return_false( self): @@ -1732,7 +1732,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_creation_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_succeeds_the_delete_route_method_should_return_true(self): ''' @@ -1745,7 +1745,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_deleting_a_route_with_a_non_existent_route_table_the_delete_route_method_should_return_false( self): @@ -1756,7 +1756,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(route_deletion_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_route_succeeds_the_replace_route_method_should_return_true(self): ''' @@ -1769,7 +1769,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(route_replacing_result) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Moto has not implemented this feature. Skipping for now.') def test_that_when_replacing_a_route_with_a_non_existent_route_table_the_replace_route_method_should_return_false( self): @@ -1790,7 +1790,7 @@ class BotoVpcRouteTablesTestCase(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): @skipIf(_has_required_moto() is False, 'The moto version must be >= to version {0}'.format(required_moto_version)) class BotoVpcPeeringConnectionsTest(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): - @mock_ec2 + @mock_ec2_deprecated def test_request_vpc_peering_connection(self): ''' Run with 2 vpc ids and returns a message @@ -1803,7 +1803,7 @@ class BotoVpcPeeringConnectionsTest(BotoVpcTestCaseBase, BotoVpcTestCaseMixin): peer_vpc_id=other_vpc.id, **conn_parameters)) - @mock_ec2 + @mock_ec2_deprecated def test_raises_error_if_both_vpc_name_and_vpc_id_are_specified(self): ''' Must specify only one diff --git a/tests/unit/modules/test_btrfs.py b/tests/unit/modules/test_btrfs.py index 3b4de68a52..4b727fd0fc 100644 --- a/tests/unit/modules/test_btrfs.py +++ b/tests/unit/modules/test_btrfs.py @@ -17,7 +17,7 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt +import salt.utils.files import salt.utils.fsutils import salt.modules.btrfs as btrfs from salt.exceptions import CommandExecutionError @@ -88,7 +88,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): 'stdout': 'Salt'}) with patch.dict(btrfs.__salt__, {'cmd.run_all': mock_run}): mock_file = mock_open(read_data='/dev/sda1 / ext4 rw,data=ordered 0 0') - with patch.object(salt.utils, 'fopen', mock_file): + with patch.object(salt.utils.files, 'fopen', mock_file): self.assertListEqual(btrfs.defragment('/dev/sda1'), ret) def test_defragment_error(self): @@ -101,7 +101,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): 'stdout': 'Salt'}) with patch.dict(btrfs.__salt__, {'cmd.run_all': mock_run}): mock_file = mock_open(read_data='/dev/sda1 / ext4 rw,data=ordered 0 0') - with patch.object(salt.utils, 'fopen', mock_file): + with patch.object(salt.utils.files, 'fopen', mock_file): self.assertRaises(CommandExecutionError, btrfs.defragment, '/dev/sda1') @@ -165,7 +165,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(btrfs.__salt__, {'cmd.run_all': mock_cmd, 'btrfs.info': mock_info}): mock_file = mock_open(read_data='/dev/sda1 / ext4 rw,data=ordered 0 0') - with patch.object(salt.utils, 'fopen', mock_file): + with patch.object(salt.utils.files, 'fopen', mock_file): self.assertDictEqual(btrfs.mkfs('/dev/sda1'), {'log': 'Salt'}) def test_mkfs_error(self): @@ -313,7 +313,7 @@ class BtrfsTestCase(TestCase, LoaderModuleMockMixin): mock_blk = MagicMock(return_value={'/dev/sda1': {'type': 'ext4'}}) with patch.object(salt.utils.fsutils, '_blkid_output', mock_blk): mock_file = mock_open(read_data='/dev/sda1 / ext4 rw,data=ordered 0 0') - with patch.object(salt.utils, 'fopen', mock_file): + with patch.object(salt.utils.files, 'fopen', mock_file): self.assertRaises(CommandExecutionError, btrfs.convert, '/dev/sda1') diff --git a/tests/unit/modules/test_cassandra.py b/tests/unit/modules/test_cassandra.py index 90a15593fb..6b34a0768f 100644 --- a/tests/unit/modules/test_cassandra.py +++ b/tests/unit/modules/test_cassandra.py @@ -16,7 +16,7 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.modules.cassandra as cassandra diff --git a/tests/unit/modules/test_chef.py b/tests/unit/modules/test_chef.py index 0899ce3a47..320093b7ce 100644 --- a/tests/unit/modules/test_chef.py +++ b/tests/unit/modules/test_chef.py @@ -25,7 +25,7 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.modules.chef ''' def setup_loader_modules(self): - patcher = patch('salt.utils.which', MagicMock(return_value=True)) + patcher = patch('salt.utils.path.which', MagicMock(return_value=True)) patcher.start() self.addCleanup(patcher.stop) return {chef: {'_exec_cmd': MagicMock(return_value={})}} diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py index 303d052f8f..6309354cb2 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py @@ -11,6 +11,7 @@ import tempfile # Import Salt Libs import salt.utils +import salt.utils.platform import salt.modules.cmdmod as cmdmod from salt.exceptions import CommandExecutionError from salt.log import LOG_LEVELS @@ -121,7 +122,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): and os.path.isfile returns False ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=False)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar') @@ -131,7 +132,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): os.path.isfile returns True, but os.access returns False ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=False)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar') @@ -141,7 +142,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when runas is passed on windows ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): with patch.dict(cmdmod.__grains__, {'os': 'fake_os'}): self.assertRaises(CommandExecutionError, cmdmod._run, @@ -161,7 +162,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when umask is set to zero ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask=0) @@ -171,7 +172,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when an invalid umask is given ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar', umask='baz') @@ -181,7 +182,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when cwd is not an absolute path ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo', 'bar') @@ -191,7 +192,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when cwd is not a dir ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): with patch('os.path.isabs', MagicMock(return_value=True)): @@ -202,7 +203,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when not useing vt and OSError is provided ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=OSError)): @@ -213,19 +214,19 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): Tests error raised when not useing vt and IOError is provided ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): with patch('salt.utils.timed_subprocess.TimedProc', MagicMock(side_effect=IOError)): self.assertRaises(CommandExecutionError, cmdmod._run, 'foo') - @skipIf(salt.utils.is_windows(), 'Do not run on Windows') + @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows') def test_run(self): ''' Tests end result when a command is not found ''' with patch('salt.modules.cmdmod._is_valid_shell', MagicMock(return_value=True)): - with patch('salt.utils.is_windows', MagicMock(return_value=False)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=False)): with patch('os.path.isfile', MagicMock(return_value=True)): with patch('os.access', MagicMock(return_value=True)): ret = cmdmod._run('foo', use_vt=True).get('stderr') @@ -235,10 +236,10 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): ''' Tests return if running on windows ''' - with patch('salt.utils.is_windows', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): self.assertTrue(cmdmod._is_valid_shell('foo')) - @skipIf(salt.utils.is_windows(), 'Do not run on Windows') + @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows') def test_is_valid_shell_none(self): ''' Tests return of when os.path.exists(/etc/shells) isn't available @@ -254,7 +255,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)): self.assertTrue(cmdmod._is_valid_shell('/bin/bash')) - @skipIf(salt.utils.is_windows(), 'Do not run on Windows') + @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows') def test_is_valid_shell_unavailable(self): ''' Tests return when provided shell is not available diff --git a/tests/unit/modules/test_cp.py b/tests/unit/modules/test_cp.py index 4ede87bebd..abff94183b 100644 --- a/tests/unit/modules/test_cp.py +++ b/tests/unit/modules/test_cp.py @@ -20,9 +20,9 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils.files +import salt.utils.templates as templates import salt.transport import salt.modules.cp as cp -from salt.utils import templates from salt.exceptions import CommandExecutionError diff --git a/tests/unit/modules/test_deb_apache.py b/tests/unit/modules/test_deb_apache.py index e646b39a6c..f57341acda 100644 --- a/tests/unit/modules/test_deb_apache.py +++ b/tests/unit/modules/test_deb_apache.py @@ -265,7 +265,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2enconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2enconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2enconf')): mock = MagicMock(return_value=1) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2enconf('security'), @@ -277,7 +277,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2enconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2enconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2enconf')): mock = MagicMock(return_value=0) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2enconf('security'), @@ -289,7 +289,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2enconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2enconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2enconf')): mock = MagicMock(return_value=2) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2enconf('security'), @@ -301,7 +301,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2enconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2enconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2enconf')): mock = MagicMock(side_effect=Exception('error')) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(str(deb_apache.a2enconf('security')), @@ -313,7 +313,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2disconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2disconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2disconf')): mock = MagicMock(return_value=256) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2disconf('security'), @@ -325,7 +325,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2disconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2disconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2disconf')): mock = MagicMock(return_value=0) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2disconf('security'), @@ -337,7 +337,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2disconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2disconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2disconf')): mock = MagicMock(return_value=2) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(deb_apache.a2disconf('security'), @@ -349,7 +349,7 @@ class DebApacheTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it runs a2disconf for the given conf. ''' - with patch('salt.utils.which', MagicMock(return_value='a2disconf')): + with patch('salt.utils.path.which', MagicMock(return_value='a2disconf')): mock = MagicMock(side_effect=Exception('error')) with patch.dict(deb_apache.__salt__, {'cmd.retcode': mock}): self.assertEqual(str(deb_apache.a2disconf('security')), diff --git a/tests/unit/modules/test_deb_postgres.py b/tests/unit/modules/test_deb_postgres.py index f4235972f2..de16a83e00 100644 --- a/tests/unit/modules/test_deb_postgres.py +++ b/tests/unit/modules/test_deb_postgres.py @@ -9,7 +9,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.modules.deb_postgres as deb_postgres LSCLUSTER = '''\ @@ -26,7 +26,7 @@ class PostgresClusterTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.cmd_run_all_mock = Mock(return_value={'stdout': LSCLUSTER}) self.addCleanup(delattr, self, 'cmd_run_all_mock') - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pg_createcluster')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pg_createcluster')) patcher.start() self.addCleanup(patcher.stop) return { @@ -71,7 +71,7 @@ class PostgresLsClusterTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.cmd_run_all_mock = Mock(return_value={'stdout': LSCLUSTER}) self.addCleanup(delattr, self, 'cmd_run_all_mock') - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pg_lsclusters')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pg_lsclusters')) patcher.start() self.addCleanup(patcher.stop) return { @@ -127,7 +127,7 @@ class PostgresDeleteClusterTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.cmd_run_all_mock = Mock(return_value={'stdout': LSCLUSTER}) self.addCleanup(delattr, self, 'cmd_run_all_mock') - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pg_dropcluster')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pg_dropcluster')) patcher.start() self.addCleanup(patcher.stop) return { diff --git a/tests/unit/modules/test_disk.py b/tests/unit/modules/test_disk.py index 1c5459a530..ef08b47fb7 100644 --- a/tests/unit/modules/test_disk.py +++ b/tests/unit/modules/test_disk.py @@ -13,7 +13,7 @@ from tests.support.mock import MagicMock, patch # Import Salt libs import salt.modules.disk as disk -import salt.utils +import salt.utils.path STUB_DISK_USAGE = { '/': {'filesystem': None, '1K-blocks': 10000, 'used': 10000, 'available': 10000, 'capacity': 10000}, @@ -141,8 +141,8 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(len(args[0].split()), 6) self.assertEqual(kwargs, {'python_shell': False}) - @skipIf(not salt.utils.which('sync'), 'sync not found') - @skipIf(not salt.utils.which('mkfs'), 'mkfs not found') + @skipIf(not salt.utils.path.which('sync'), 'sync not found') + @skipIf(not salt.utils.path.which('mkfs'), 'mkfs not found') def test_format(self): ''' unit tests for disk.format @@ -163,7 +163,7 @@ class DiskTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(disk.__salt__, {'cmd.run': mock}): self.assertEqual(disk.fstype(device), fs_type) - @skipIf(not salt.utils.which('resize2fs'), 'resize2fs not found') + @skipIf(not salt.utils.path.which('resize2fs'), 'resize2fs not found') def test_resize2fs(self): ''' unit tests for disk.resize2fs diff --git a/tests/unit/modules/test_djangomod.py b/tests/unit/modules/test_djangomod.py index e052afed9a..6142b66e16 100644 --- a/tests/unit/modules/test_djangomod.py +++ b/tests/unit/modules/test_djangomod.py @@ -25,7 +25,7 @@ class DjangomodTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.modules.djangomod ''' def setup_loader_modules(self): - patcher = patch('salt.utils.which', lambda exe: exe) + patcher = patch('salt.utils.path.which', lambda exe: exe) patcher.start() self.addCleanup(patcher.stop) return {djangomod: {'_get_django_admin': MagicMock(return_value=True)}} @@ -92,7 +92,7 @@ class DjangomodCliCommandTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.modules.djangomod ''' def setup_loader_modules(self): - patcher = patch('salt.utils.which', lambda exe: exe) + patcher = patch('salt.utils.path.which', lambda exe: exe) patcher.start() self.addCleanup(patcher.stop) return {djangomod: {}} diff --git a/tests/unit/modules/test_environ.py b/tests/unit/modules/test_environ.py index 9442942041..e1724e3527 100644 --- a/tests/unit/modules/test_environ.py +++ b/tests/unit/modules/test_environ.py @@ -52,7 +52,7 @@ class EnvironTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(os.environ, {}), \ patch.dict(environ.__salt__, {'reg.set_value': MagicMock(), 'reg.delete_value': MagicMock()}), \ - patch('salt.utils.is_windows', MagicMock(return_value=True)): + patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): environ.setval('key', 'Test', permanent=True) environ.__salt__['reg.set_value'].assert_called_with('HKCU', 'Environment', 'key', 'Test') diff --git a/tests/unit/modules/test_esxdatacenter.py b/tests/unit/modules/test_esxdatacenter.py new file mode 100644 index 0000000000..a6e0c12a49 --- /dev/null +++ b/tests/unit/modules/test_esxdatacenter.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for functions in salt.modules.esxdatacenter +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import Salt Libs +import salt.modules.esxdatacenter as esxdatacenter + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class GetDetailsTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.modules.esxdatacenter.get_details''' + def setup_loader_modules(self): + return {esxdatacenter: {'__virtual__': + MagicMock(return_value='esxdatacenter'), + '__proxy__': {}}} + + def test_get_details(self): + mock_get_details = MagicMock() + with patch.dict(esxdatacenter.__proxy__, + {'esxdatacenter.get_details': mock_get_details}): + esxdatacenter.get_details() + mock_get_details.assert_called_once_with() diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index c8b0531720..eb440350c1 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -641,7 +641,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): def test_patch(self): with patch('os.path.isdir', return_value=False) as mock_isdir, \ - patch('salt.utils.which', return_value='/bin/patch') as mock_which: + patch('salt.utils.path.which', return_value='/bin/patch') as mock_which: cmd_mock = MagicMock(return_value='test_retval') with patch.dict(filemod.__salt__, {'cmd.run_all': cmd_mock}): ret = filemod.patch('/path/to/file', '/path/to/patch') @@ -652,7 +652,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): def test_patch_dry_run(self): with patch('os.path.isdir', return_value=False) as mock_isdir, \ - patch('salt.utils.which', return_value='/bin/patch') as mock_which: + patch('salt.utils.path.which', return_value='/bin/patch') as mock_which: cmd_mock = MagicMock(return_value='test_retval') with patch.dict(filemod.__salt__, {'cmd.run_all': cmd_mock}): ret = filemod.patch('/path/to/file', '/path/to/patch', dry_run=True) @@ -663,7 +663,7 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): def test_patch_dir(self): with patch('os.path.isdir', return_value=True) as mock_isdir, \ - patch('salt.utils.which', return_value='/bin/patch') as mock_which: + patch('salt.utils.path.which', return_value='/bin/patch') as mock_which: cmd_mock = MagicMock(return_value='test_retval') with patch.dict(filemod.__salt__, {'cmd.run_all': cmd_mock}): ret = filemod.patch('/path/to/dir', '/path/to/patch') diff --git a/tests/unit/modules/test_genesis.py b/tests/unit/modules/test_genesis.py index af135a0409..3c7bd7abfe 100644 --- a/tests/unit/modules/test_genesis.py +++ b/tests/unit/modules/test_genesis.py @@ -65,7 +65,7 @@ class GenesisTestCase(TestCase, LoaderModuleMockMixin): ''' Test for Return which platforms are available ''' - with patch('salt.utils.which', MagicMock(return_value=False)): + with patch('salt.utils.path.which', MagicMock(return_value=False)): self.assertFalse(genesis.avail_platforms()['deb']) def test_pack(self): diff --git a/tests/unit/modules/test_grains.py b/tests/unit/modules/test_grains.py index 7eb4be077e..e9ae6c5116 100644 --- a/tests/unit/modules/test_grains.py +++ b/tests/unit/modules/test_grains.py @@ -19,7 +19,7 @@ from tests.support.mock import ( # Import Salt libs from salt.exceptions import SaltException import salt.modules.grains as grainsmod -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate # Import 3rd-party libs from salt.utils.odict import OrderedDict diff --git a/tests/unit/modules/test_inspect_fsdb.py b/tests/unit/modules/test_inspect_fsdb.py index 65bfc64d13..a9d2999476 100644 --- a/tests/unit/modules/test_inspect_fsdb.py +++ b/tests/unit/modules/test_inspect_fsdb.py @@ -30,7 +30,7 @@ from salt.modules.inspectlib.fsdb import CsvDB from salt.modules.inspectlib.entities import CsvDBEntity from salt.utils.odict import OrderedDict -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO diff --git a/tests/unit/modules/test_junos.py b/tests/unit/modules/test_junos.py index 208e6fc30d..e8b1c6d99a 100644 --- a/tests/unit/modules/test_junos.py +++ b/tests/unit/modules/test_junos.py @@ -52,6 +52,7 @@ class Test_Junos_Module(TestCase, LoaderModuleMockMixin, XMLEqualityMixin): host='1.1.1.1', user='test', password='test123', + fact_style='old', gather_facts=False) self.dev.open() self.dev.timeout = 30 diff --git a/tests/unit/modules/test_launchctl.py b/tests/unit/modules/test_launchctl.py index 32d21fb6ef..108ba90c34 100644 --- a/tests/unit/modules/test_launchctl.py +++ b/tests/unit/modules/test_launchctl.py @@ -17,8 +17,9 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.ext.six as six +from salt.ext import six import salt.utils +import salt.utils.stringutils import salt.modules.launchctl as launchctl @@ -88,7 +89,7 @@ class LaunchctlTestCase(TestCase, LoaderModuleMockMixin): return_value={'plist': {'Label': 'A'}}): if six.PY3: - launchctl_data = salt.utils.to_bytes(launchctl_data) + launchctl_data = salt.utils.stringutils.to_bytes(launchctl_data) with patch.object(launchctl, '_get_launchctl_data', return_value=launchctl_data): self.assertTrue(launchctl.status('job_label')) diff --git a/tests/unit/modules/test_linux_acl.py b/tests/unit/modules/test_linux_acl.py index 9fc07569be..429f45190d 100644 --- a/tests/unit/modules/test_linux_acl.py +++ b/tests/unit/modules/test_linux_acl.py @@ -6,7 +6,7 @@ from __future__ import absolute_import # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs import salt.modules.linux_acl as linux_acl @@ -84,63 +84,70 @@ class LinuxAclTestCase(TestCase, LoaderModuleMockMixin): def test_modfacl__u_w_single_arg(self): linux_acl.modfacl(*(self.u_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__u_w_multiple_args(self): linux_acl.modfacl(*(self.u_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__user_w_single_arg(self): linux_acl.modfacl(*(self.user_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__user_w_multiple_args(self): linux_acl.modfacl(*(self.user_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__g_w_single_arg(self): linux_acl.modfacl(*(self.g_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__g_w_multiple_args(self): linux_acl.modfacl(*(self.g_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__group_w_single_arg(self): linux_acl.modfacl(*(self.group_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__group_w_multiple_args(self): linux_acl.modfacl(*(self.group_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.group_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__d_u_w_single_arg(self): linux_acl.modfacl(*(self.d_u_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__d_u_w_multiple_args(self): linux_acl.modfacl(*(self.d_u_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__d_user_w_single_arg(self): linux_acl.modfacl(*(self.d_user_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__d_user_w_multiple_args(self): linux_acl.modfacl(*(self.d_user_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__default_user_w_single_arg(self): linux_acl.modfacl(*(self.default_user_acl + [self.file])) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd, self.quoted_file]), python_shell=False, raise_err=False) def test_modfacl__default_user_w_multiple_args(self): linux_acl.modfacl(*(self.default_user_acl + self.files)) - self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -m ' + ' '.join([self.default_user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) def test_modfacl__recursive_w_multiple_args(self): linux_acl.modfacl(*(self.user_acl + self.files), recursive=True) - self.cmdrun.assert_called_once_with('setfacl -R -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False) + self.cmdrun.assert_called_once_with('setfacl -R -m ' + ' '.join([self.user_acl_cmd] + self.quoted_files), python_shell=False, raise_err=False) + + def test_modfacl_raise_err(self): + mock = MagicMock(side_effect=CommandExecutionError('Custom err')) + with patch.dict(linux_acl.__salt__, {'cmd.run': mock}): + with self.assertRaises(CommandExecutionError) as excinfo: + linux_acl.modfacl(*(self.user_acl + self.files), raise_err=True) + self.assertEqual(excinfo.exception.strerror, 'Custom err') def test_delfacl_wo_args(self): for acl in [self.u_acl, self.user_acl, self.g_acl, self.group_acl]: diff --git a/tests/unit/modules/test_localemod.py b/tests/unit/modules/test_localemod.py index 0c9c9bed48..23c6bf7ea0 100644 --- a/tests/unit/modules/test_localemod.py +++ b/tests/unit/modules/test_localemod.py @@ -113,7 +113,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): Tests the return of gen_locale when the provided locale is not found ''' with patch.dict(localemod.__grains__, {'os': 'Debian'}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch.dict(localemod.__salt__, {'file.search': MagicMock(return_value=False)}): self.assertFalse(localemod.gen_locale('foo')) @@ -124,7 +124,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337} with patch.dict(localemod.__grains__, {'os': 'Debian'}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch.dict(localemod.__salt__, {'file.search': MagicMock(return_value=True), 'file.replace': MagicMock(return_value=True), @@ -146,7 +146,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337} with patch.dict(localemod.__grains__, {'os': 'Debian'}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch.dict(localemod.__salt__, {'file.search': file_search, 'file.replace': MagicMock(return_value=True), @@ -163,7 +163,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): 'file.touch': MagicMock(return_value=None), 'file.append': MagicMock(return_value=None), 'cmd.run_all': MagicMock(return_value=ret)}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch('os.listdir', MagicMock(return_value=['en_US'])), \ patch.dict(localemod.__grains__, {'os': 'Ubuntu'}): self.assertTrue(localemod.gen_locale('en_US.UTF-8')) @@ -174,7 +174,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ''' ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337} with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \ patch.dict(localemod.__salt__, {'file.search': MagicMock(return_value=True), @@ -197,7 +197,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): ret = {'stdout': 'saltines', 'stderr': 'biscuits', 'retcode': 0, 'pid': 1337} with patch.dict(localemod.__grains__, {'os_family': 'Gentoo'}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch('os.listdir', MagicMock(return_value=['en_US.UTF-8'])), \ patch.dict(localemod.__salt__, {'file.search': file_search, @@ -213,7 +213,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(localemod.__salt__, {'cmd.run_all': MagicMock(return_value=ret), 'file.replace': MagicMock()}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch('os.listdir', MagicMock(return_value=['en_US'])): self.assertTrue(localemod.gen_locale('en_US.UTF-8')) @@ -225,7 +225,7 @@ class LocalemodTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(localemod.__salt__, {'cmd.run_all': MagicMock(return_value=ret), 'file.replace': MagicMock()}), \ - patch('salt.utils.which', MagicMock(return_value='/some/dir/path')), \ + patch('salt.utils.path.which', MagicMock(return_value='/some/dir/path')), \ patch('os.listdir', MagicMock(return_value=['en_US'])): self.assertEqual(localemod.gen_locale('en_US.UTF-8', verbose=True), ret) diff --git a/tests/unit/modules/test_mdadm.py b/tests/unit/modules/test_mdadm.py index ae382e51df..ac62b67cfe 100644 --- a/tests/unit/modules/test_mdadm.py +++ b/tests/unit/modules/test_mdadm.py @@ -28,7 +28,7 @@ class MdadmTestCase(TestCase, LoaderModuleMockMixin): def test_create(self): mock = MagicMock(return_value='salt') with patch.dict(mdadm.__salt__, {'cmd.run': mock}), \ - patch('salt.utils.which', lambda exe: exe): + patch('salt.utils.path.which', lambda exe: exe): ret = mdadm.create( '/dev/md0', 5, devices=['/dev/sdb1', '/dev/sdc1', '/dev/sdd1'], diff --git a/tests/unit/modules/test_mod_random.py b/tests/unit/modules/test_mod_random.py index 12663d0507..1a80bd926d 100644 --- a/tests/unit/modules/test_mod_random.py +++ b/tests/unit/modules/test_mod_random.py @@ -21,7 +21,7 @@ import salt.utils.pycrypto from salt.exceptions import SaltInvocationError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six def _test_hashlib(): diff --git a/tests/unit/modules/test_mount.py b/tests/unit/modules/test_mount.py index 417c898959..ef7e6765e2 100644 --- a/tests/unit/modules/test_mount.py +++ b/tests/unit/modules/test_mount.py @@ -5,6 +5,7 @@ # Import Python libs from __future__ import absolute_import import os +import textwrap # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -18,8 +19,8 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.utils import salt.utils.files +import salt.utils.path from salt.exceptions import CommandExecutionError import salt.modules.mount as mount @@ -243,15 +244,30 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): ''' Returns true if the command passed is a fuse mountable application ''' - with patch.object(salt.utils, 'which', return_value=None): + with patch.object(salt.utils.path, 'which', return_value=None): self.assertFalse(mount.is_fuse_exec('cmd')) - with patch.object(salt.utils, 'which', return_value=True): - self.assertFalse(mount.is_fuse_exec('cmd')) - - mock = MagicMock(side_effect=[1, 0]) - with patch.object(salt.utils, 'which', mock): - self.assertFalse(mount.is_fuse_exec('cmd')) + def _ldd_side_effect(cmd, *args, **kwargs): + ''' + Neither of these are full ldd output, but what is_fuse_exec is + looking for is 'libfuse' in the ldd output, so these examples + should be sufficient enough to test both the True and False cases. + ''' + return { + 'ldd cmd1': textwrap.dedent('''\ + linux-vdso.so.1 (0x00007ffeaf5fb000) + libfuse3.so.3 => /usr/lib/libfuse3.so.3 (0x00007f91e66ac000) + '''), + 'ldd cmd2': textwrap.dedent('''\ + linux-vdso.so.1 (0x00007ffeaf5fb000) + ''') + }[cmd] + which_mock = MagicMock(side_effect=lambda x: x) + ldd_mock = MagicMock(side_effect=_ldd_side_effect) + with patch.object(salt.utils.path, 'which', which_mock): + with patch.dict(mount.__salt__, {'cmd.run': _ldd_side_effect}): + self.assertTrue(mount.is_fuse_exec('cmd1')) + self.assertFalse(mount.is_fuse_exec('cmd2')) def test_swaps(self): ''' diff --git a/tests/unit/modules/test_network.py b/tests/unit/modules/test_network.py index df58bf0347..9a190fab45 100644 --- a/tests/unit/modules/test_network.py +++ b/tests/unit/modules/test_network.py @@ -20,8 +20,9 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.ext.six as six -import salt.utils +from salt.ext import six +import salt.utils.network +import salt.utils.path import salt.modules.network as network from salt.exceptions import CommandExecutionError if six.PY2: @@ -118,7 +119,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin): ''' Test for Performs a traceroute to a 3rd party host ''' - with patch.object(salt.utils, 'which', side_effect=[False, True]): + with patch.object(salt.utils.path, 'which', side_effect=[False, True]): self.assertListEqual(network.traceroute('host'), []) with patch.object(salt.utils.network, 'sanitize_host', @@ -131,7 +132,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin): ''' Test for Performs a DNS lookup with dig ''' - with patch('salt.utils.which', MagicMock(return_value='dig')), \ + with patch('salt.utils.path.which', MagicMock(return_value='dig')), \ patch.object(salt.utils.network, 'sanitize_host', return_value='A'), \ patch.dict(network.__salt__, {'cmd.run': @@ -145,7 +146,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(network.__salt__, {'cmd.run': MagicMock(return_value='A,B,C,D\nE,F,G,H\n')}), \ - patch('salt.utils.which', MagicMock(return_value='')): + patch('salt.utils.path.which', MagicMock(return_value='')): self.assertDictEqual(network.arp(), {}) def test_interfaces(self): @@ -230,7 +231,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin): ''' self.assertFalse(network.mod_hostname(None)) - with patch.object(salt.utils, 'which', return_value='hostname'): + with patch.object(salt.utils.path, 'which', return_value='hostname'): with patch.dict(network.__salt__, {'cmd.run': MagicMock(return_value=None)}): file_d = '\n'.join(['#', 'A B C D,E,F G H']) diff --git a/tests/unit/modules/test_nginx.py b/tests/unit/modules/test_nginx.py index daf3590a60..c28147cc25 100644 --- a/tests/unit/modules/test_nginx.py +++ b/tests/unit/modules/test_nginx.py @@ -30,7 +30,7 @@ class MockUrllibStatus(object): class NginxTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/nginx')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/nginx')) patcher.start() self.addCleanup(patcher.stop) return {nginx: {'_urlopen': Mock(return_value=MockUrllibStatus())}} diff --git a/tests/unit/modules/test_openscap.py b/tests/unit/modules/test_openscap.py index 5f393bddd2..0bb330c804 100644 --- a/tests/unit/modules/test_openscap.py +++ b/tests/unit/modules/test_openscap.py @@ -18,7 +18,7 @@ from tests.support.mock import ( ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/modules/test_openstack_config.py b/tests/unit/modules/test_openstack_config.py index 493f11e97a..c14a5513c3 100644 --- a/tests/unit/modules/test_openstack_config.py +++ b/tests/unit/modules/test_openstack_config.py @@ -27,7 +27,7 @@ class OpenstackConfigTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.modules.openstack_config ''' def setup_loader_modules(self): - patcher = patch('salt.utils.which', MagicMock(return_value=True)) + patcher = patch('salt.utils.path.which', MagicMock(return_value=True)) patcher.start() self.addCleanup(patcher.stop) return {openstack_config: {}} diff --git a/tests/unit/modules/test_parallels.py b/tests/unit/modules/test_parallels.py index 369cdf9d61..302a7e8930 100644 --- a/tests/unit/modules/test_parallels.py +++ b/tests/unit/modules/test_parallels.py @@ -14,7 +14,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON # Import third party libs -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -33,7 +33,7 @@ class ParallelsTestCase(TestCase, LoaderModuleMockMixin): mock_false = MagicMock(return_value=False) # Validate false return - with patch('salt.utils.which', mock_false): + with patch('salt.utils.path.which', mock_false): ret = parallels.__virtual__() self.assertTrue(isinstance(ret, tuple)) self.assertEqual(len(ret), 2) @@ -41,7 +41,7 @@ class ParallelsTestCase(TestCase, LoaderModuleMockMixin): self.assertTrue(isinstance(ret[1], six.string_types)) # Validate true return - with patch('salt.utils.which', mock_true): + with patch('salt.utils.path.which', mock_true): ret = parallels.__virtual__() self.assertTrue(ret) self.assertEqual(ret, 'parallels') diff --git a/tests/unit/modules/test_parted.py b/tests/unit/modules/test_parted.py index 991c6787a2..1e7f89cc88 100644 --- a/tests/unit/modules/test_parted.py +++ b/tests/unit/modules/test_parted.py @@ -16,7 +16,7 @@ from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch -# Import salt libs +# Import Salt libs from salt.exceptions import CommandExecutionError import salt.modules.parted as parted @@ -42,28 +42,28 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin): def test_virtual_bails_on_windows(self): '''If running windows, __virtual__ shouldn't register module''' - with patch('salt.utils.is_windows', lambda: True): + with patch('salt.utils.platform.is_windows', lambda: True): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load Windows systems are not supported.') self.assertEqual(err, ret) def test_virtual_bails_without_parted(self): '''If parted not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "parted"): + with patch('salt.utils.path.which', lambda exe: not exe == "parted"): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load parted binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_lsblk(self): '''If lsblk not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "lsblk"): + with patch('salt.utils.path.which', lambda exe: not exe == "lsblk"): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load lsblk binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_partprobe(self): '''If partprobe not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "partprobe"): + with patch('salt.utils.path.which', lambda exe: not exe == "partprobe"): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load partprobe binary is not in the path.') self.assertEqual(err, ret) @@ -71,8 +71,8 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin): def test_virtual(self): '''On expected platform with correct utils in PATH, register "partition" module''' - with patch('salt.utils.is_windows', lambda: False), \ - patch('salt.utils.which', lambda exe: exe in ('parted', 'lsblk', 'partprobe')): + with patch('salt.utils.platform.is_windows', lambda: False), \ + patch('salt.utils.path.which', lambda exe: exe in ('parted', 'lsblk', 'partprobe')): ret = parted.__virtual__() expect = 'partition' self.assertEqual(ret, expect) diff --git a/tests/unit/modules/test_pillar.py b/tests/unit/modules/test_pillar.py index 85225f05b7..f952e375d0 100644 --- a/tests/unit/modules/test_pillar.py +++ b/tests/unit/modules/test_pillar.py @@ -14,7 +14,7 @@ from tests.support.mock import ( ) # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.utils.odict import OrderedDict import salt.modules.pillar as pillarmod diff --git a/tests/unit/modules/test_pip.py b/tests/unit/modules/test_pip.py index 88b8f1a23e..9829f3b279 100644 --- a/tests/unit/modules/test_pip.py +++ b/tests/unit/modules/test_pip.py @@ -10,6 +10,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs +import salt.utils import salt.modules.pip as pip from salt.exceptions import CommandExecutionError @@ -289,17 +290,23 @@ class PipTestCase(TestCase, LoaderModuleMockMixin): mock_path.isdir.return_value = True pkg = 'mock' - venv_path = '/test_env' def join(*args): - return '/'.join(args) + return os.sep.join(args) + mock_path.join = join mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(pip.__salt__, {'cmd.run_all': mock}): + if salt.utils.is_windows(): + venv_path = 'c:\\test_env' + bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe').encode('string-escape') + else: + venv_path = '/test_env' + bin_path = os.path.join(venv_path, 'bin', 'pip') pip.install(pkg, bin_env=venv_path) mock.assert_called_once_with( - [os.path.join(venv_path, 'bin', 'pip'), 'install', pkg], - env={'VIRTUAL_ENV': '/test_env'}, + [bin_path, 'install', pkg], + env={'VIRTUAL_ENV': venv_path}, saltenv='base', runas=None, use_vt=False, diff --git a/tests/unit/modules/test_pkg_resource.py b/tests/unit/modules/test_pkg_resource.py index 2e51fab6f5..e00d8cda77 100644 --- a/tests/unit/modules/test_pkg_resource.py +++ b/tests/unit/modules/test_pkg_resource.py @@ -20,7 +20,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils import salt.modules.pkg_resource as pkg_resource -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/modules/test_postgres.py b/tests/unit/modules/test_postgres.py index 326c2f1a68..c54f827903 100644 --- a/tests/unit/modules/test_postgres.py +++ b/tests/unit/modules/test_postgres.py @@ -53,7 +53,7 @@ test_privileges_list_group_csv = ( @skipIf(NO_MOCK, NO_MOCK_REASON) class PostgresTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pgsql')) patcher.start() self.addCleanup(patcher.stop) return { diff --git a/tests/unit/modules/test_puppet.py b/tests/unit/modules/test_puppet.py index 190f305016..5a23ba7744 100644 --- a/tests/unit/modules/test_puppet.py +++ b/tests/unit/modules/test_puppet.py @@ -19,7 +19,7 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.utils +import salt.utils.args import salt.utils.files import salt.modules.puppet as puppet from salt.exceptions import CommandExecutionError @@ -38,7 +38,7 @@ class PuppetTestCase(TestCase, LoaderModuleMockMixin): Test to execute a puppet run ''' mock = MagicMock(return_value={"A": "B"}) - with patch.object(salt.utils, 'clean_kwargs', mock): + with patch.object(salt.utils.args, 'clean_kwargs', mock): mock = MagicMock(return_value={'retcode': 0}) mock_lst = MagicMock(return_value=[]) with patch.dict(puppet.__salt__, {'cmd.run_all': mock, diff --git a/tests/unit/modules/test_reg_win.py b/tests/unit/modules/test_reg_win.py index 960c00e5d0..e83d89b127 100644 --- a/tests/unit/modules/test_reg_win.py +++ b/tests/unit/modules/test_reg_win.py @@ -27,7 +27,7 @@ except ImportError: PY2 = sys.version_info[0] == 2 # The following used to make sure we are not # testing already existing data -# Note strftime retunrns a str, so we need to make it unicode +# Note strftime returns a str, so we need to make it unicode TIMEINT = int(time.time()) if PY2: diff --git a/tests/unit/modules/test_scsi.py b/tests/unit/modules/test_scsi.py index b382720055..dd327780f5 100644 --- a/tests/unit/modules/test_scsi.py +++ b/tests/unit/modules/test_scsi.py @@ -19,7 +19,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.scsi as scsi -import salt.utils +import salt.utils.path @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -60,7 +60,7 @@ class ScsiTestCase(TestCase, LoaderModuleMockMixin): result_size['[0:0:0:0]']['size'] = '1.20TB' mock = MagicMock(return_value='/usr/bin/lsscsi') - with patch.object(salt.utils, 'which', mock): + with patch.object(salt.utils.path, 'which', mock): # get_size = True cmd_mock = MagicMock(return_value=lsscsi_size) @@ -77,7 +77,7 @@ class ScsiTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(scsi.ls_(get_size=False), result) mock = MagicMock(return_value=None) - with patch.object(salt.utils, 'which', mock): + with patch.object(salt.utils.path, 'which', mock): self.assertEqual(scsi.ls_(), 'scsi.ls not available - lsscsi command not found') def test_rescan_all(self): diff --git a/tests/unit/modules/test_shadow.py b/tests/unit/modules/test_shadow.py index 0e2c98a3ea..e152d59b9a 100644 --- a/tests/unit/modules/test_shadow.py +++ b/tests/unit/modules/test_shadow.py @@ -7,7 +7,7 @@ from __future__ import absolute_import # Import Salt Testing libs -from salt.utils import is_linux +import salt.utils.platform from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf @@ -19,7 +19,7 @@ except ImportError: HAS_SHADOW = False # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six _PASSWORD = 'lamepassword' @@ -41,7 +41,7 @@ _HASHES = dict( ) -@skipIf(not is_linux(), 'minion is not Linux') +@skipIf(not salt.utils.platform.is_linux(), 'minion is not Linux') class LinuxShadowTest(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): diff --git a/tests/unit/modules/test_snapper.py b/tests/unit/modules/test_snapper.py index 289d7dd1a3..18fded6f73 100644 --- a/tests/unit/modules/test_snapper.py +++ b/tests/unit/modules/test_snapper.py @@ -22,7 +22,7 @@ from tests.support.mock import ( ) # Import Salt libs -import salt.ext.six as six +from salt.ext import six from salt.exceptions import CommandExecutionError import salt.modules.snapper as snapper diff --git a/tests/unit/modules/test_ssh.py b/tests/unit/modules/test_ssh.py index 2353232df1..38aa23fbec 100644 --- a/tests/unit/modules/test_ssh.py +++ b/tests/unit/modules/test_ssh.py @@ -15,8 +15,8 @@ from tests.support.mock import ( ) # Import Salt Libs -import salt.utils import salt.utils.files +import salt.utils.platform import salt.modules.ssh as ssh from salt.exceptions import CommandExecutionError @@ -94,7 +94,7 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin): comment_line = '# this is a comment \n' # Write out the authorized key to a temporary file - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): temp_file = tempfile.NamedTemporaryFile(delete=False) else: temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+') diff --git a/tests/unit/modules/test_state.py b/tests/unit/modules/test_state.py index 0b99657768..26162a8705 100644 --- a/tests/unit/modules/test_state.py +++ b/tests/unit/modules/test_state.py @@ -21,6 +21,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils import salt.utils.odict +import salt.utils.platform import salt.modules.state as state from salt.exceptions import SaltInvocationError @@ -824,7 +825,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): with patch.object(state, 'check_request', mock): mock = MagicMock(return_value=True) with patch.object(os, 'umask', mock): - with patch.object(salt.utils, 'is_windows', mock): + with patch.object(salt.utils.platform, 'is_windows', mock): with patch.dict(state.__salt__, {'cmd.run': mock}): with patch('salt.utils.files.fopen', mock_open()): mock = MagicMock(return_value=True) @@ -935,7 +936,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): with patch.object(os.path, 'join', mock): with patch.object(os, 'umask', mock): mock = MagicMock(return_value=False) - with patch.object(salt.utils, 'is_windows', mock): + with patch.object(salt.utils.platform, 'is_windows', mock): mock = MagicMock(return_value=True) with patch.object(os, 'umask', mock): with patch.object(state, '_set_retcode', mock): diff --git a/tests/unit/modules/test_status.py b/tests/unit/modules/test_status.py index c7e2b58664..832ca427bb 100644 --- a/tests/unit/modules/test_status.py +++ b/tests/unit/modules/test_status.py @@ -4,7 +4,7 @@ from __future__ import absolute_import # Import Salt Libs -import salt.utils +import salt.utils.platform import salt.modules.status as status from salt.exceptions import CommandExecutionError @@ -72,7 +72,7 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin): ''' m = self._set_up_test_uptime() - with patch.multiple(salt.utils, + with patch.multiple(salt.utils.platform, is_linux=MagicMock(return_value=True), is_sunos=MagicMock(return_value=False), is_darwin=MagicMock(return_value=False), @@ -97,7 +97,7 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin): ''' m = self._set_up_test_uptime() m2 = self._set_up_test_uptime_sunos() - with patch.multiple(salt.utils, + with patch.multiple(salt.utils.platform, is_linux=MagicMock(return_value=False), is_sunos=MagicMock(return_value=True), is_darwin=MagicMock(return_value=False), @@ -119,7 +119,7 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin): kern_boottime = ('{{ sec = {0}, usec = {1:0<6} }} Mon Oct 03 03:09:18.23 2016' ''.format(*str(m.now - m.ut).split('.'))) - with patch.multiple(salt.utils, + with patch.multiple(salt.utils.platform, is_linux=MagicMock(return_value=False), is_sunos=MagicMock(return_value=False), is_darwin=MagicMock(return_value=True), @@ -140,7 +140,7 @@ class StatusTestCase(TestCase, LoaderModuleMockMixin): ''' Test modules.status.uptime function for other platforms ''' - with patch.multiple(salt.utils, + with patch.multiple(salt.utils.platform, is_linux=MagicMock(return_value=False), is_sunos=MagicMock(return_value=False), is_darwin=MagicMock(return_value=False), diff --git a/tests/unit/modules/test_syslog_ng.py b/tests/unit/modules/test_syslog_ng.py index 7a53dc5b09..27f8550204 100644 --- a/tests/unit/modules/test_syslog_ng.py +++ b/tests/unit/modules/test_syslog_ng.py @@ -13,7 +13,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import Salt libs -import salt +import salt.utils.path import salt.modules.syslog_ng as syslog_ng _VERSION = "3.6.0alpha0" @@ -261,7 +261,7 @@ class SyslogNGTestCase(TestCase, LoaderModuleMockMixin): function_args = {} installed = True - if not salt.utils.which("syslog-ng"): + if not salt.utils.path.which("syslog-ng"): installed = False if "syslog-ng-ctl" in mock_function_args: expected_output = _SYSLOG_NG_CTL_NOT_INSTALLED_RETURN_VALUE diff --git a/tests/unit/modules/test_timezone.py b/tests/unit/modules/test_timezone.py index 37a97297de..e33476f3d8 100644 --- a/tests/unit/modules/test_timezone.py +++ b/tests/unit/modules/test_timezone.py @@ -19,8 +19,10 @@ from tests.support.mock import ( # Import Salt Libs from salt.exceptions import CommandExecutionError, SaltInvocationError import salt.modules.timezone as timezone -import salt.ext.six as six +from salt.ext import six import salt.utils +import salt.utils.platform +import salt.utils.stringutils GET_ZONE_FILE = 'salt.modules.timezone._get_zone_file' GET_ETC_LOCALTIME_PATH = 'salt.modules.timezone._get_etc_localtime_path' @@ -77,7 +79,7 @@ class TimezoneTestCase(TestCase, LoaderModuleMockMixin): def create_tempfile_with_contents(self, contents): temp = NamedTemporaryFile(delete=False) if six.PY3: - temp.write(salt.utils.to_bytes(contents)) + temp.write(salt.utils.stringutils.to_bytes(contents)) else: temp.write(contents) temp.close() @@ -98,7 +100,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run': MagicMock(), 'cmd.retcode': MagicMock(return_value=0)}}} - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_centos(self): ''' Test CentOS is recognized @@ -108,7 +110,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_os_family_rh_suse(self): ''' Test RedHat and Suse are recognized @@ -119,7 +121,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_sysconfig', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_os_family_debian_gentoo(self): ''' Test Debian and Gentoo are recognized @@ -130,7 +132,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_etc_timezone', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_os_family_allbsd_nilinuxrt(self): ''' Test *BSD and NILinuxRT are recognized @@ -141,7 +143,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_etc_localtime', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_os_family_slowlaris(self): ''' Test Slowlaris is recognized @@ -151,7 +153,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_solaris', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @patch('salt.utils.path.which', MagicMock(return_value=False)) def test_get_zone_os_family_aix(self): ''' Test IBM AIX is recognized @@ -161,7 +163,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._get_zone_aix', MagicMock(return_value=self.TEST_TZ)): assert timezone.get_zone() == self.TEST_TZ - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -175,7 +178,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="UTC"') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -189,7 +193,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -207,7 +212,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] assert args == ('UTC',) - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -225,7 +231,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = _fopen.return_value.__enter__.return_value.write.mock_calls[0] assert args == ('UTC',) - @patch('salt.utils.which', MagicMock(return_value=True)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=True)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -239,7 +246,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.modules.timezone._timedatectl', MagicMock(return_value={'stdout': 'rtc in local tz:yes'})): assert timezone.get_hwclock() == 'localtime' - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -254,7 +262,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['tail', '-n', '1', '/etc/adjtime'],) assert kwarg == {'python_shell': False} - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -274,7 +283,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): Test get hwclock on Debian :return: ''' - with patch('salt.utils.which', MagicMock(return_value=False)): + with patch('salt.utils.path.which', MagicMock(return_value=False)): with patch('os.path.exists', MagicMock(return_value=True)): with patch('os.unlink', MagicMock()): with patch('os.symlink', MagicMock()): @@ -284,7 +293,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['tail', '-n', '1', '/etc/adjtime'],) assert kwarg == {'python_shell': False} - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -299,7 +309,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.utils.files.fopen', mock_open()): assert timezone.get_hwclock() == 'localtime' - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -315,7 +326,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(timezone.__grains__, {'os_family': ['AIX']}): assert timezone.get_hwclock() == hwclock - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -330,7 +342,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert timezone.set_hwclock('forty two') assert timezone.set_hwclock('UTC') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -349,7 +362,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['rtc', '-z', 'GMT'],) assert kwargs == {'python_shell': False} - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -365,7 +379,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): assert args == (['timezonectl', 'set-local-rtc', 'false'],) assert kwargs == {'python_shell': False} - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -380,7 +395,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="TEST_TIMEZONE"') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -395,7 +411,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="TEST_TIMEZONE"') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) @@ -414,7 +431,8 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[1] assert args == ('/etc/default/rcS', '^UTC=.*', 'UTC=no') - @patch('salt.utils.which', MagicMock(return_value=False)) + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') + @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) @patch('os.symlink', MagicMock()) diff --git a/tests/unit/modules/test_twilio_notify.py b/tests/unit/modules/test_twilio_notify.py index 6aaaaae573..370b45d690 100644 --- a/tests/unit/modules/test_twilio_notify.py +++ b/tests/unit/modules/test_twilio_notify.py @@ -19,6 +19,17 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.twilio_notify as twilio_notify +HAS_LIBS = False +try: + import twilio + if twilio.__version__ > 5: + TWILIO_5 = False + else: + TWILIO_5 = True + HAS_LIBS = True +except ImportError: + pass + class MockTwilioRestException(Exception): ''' @@ -75,9 +86,13 @@ class MockTwilioRestClient(object): Mock TwilioRestClient class ''' def __init__(self): - self.sms = MockSMS() + if TWILIO_5: + self.sms = MockSMS() + else: + self.messages = MockMessages() +@skipIf(not HAS_LIBS, 'twilio.rest is not available') @skipIf(NO_MOCK, NO_MOCK_REASON) class TwilioNotifyTestCase(TestCase, LoaderModuleMockMixin): ''' diff --git a/tests/unit/modules/test_useradd.py b/tests/unit/modules/test_useradd.py index 2bcbee38d1..7fd457425f 100644 --- a/tests/unit/modules/test_useradd.py +++ b/tests/unit/modules/test_useradd.py @@ -72,6 +72,7 @@ class UserAddTestCase(TestCase, LoaderModuleMockMixin): # 'getent' function tests: 2 + @skipIf(HAS_PWD is False, 'The pwd module is not available') def test_getent(self): ''' Test if user.getent already have a value @@ -359,6 +360,7 @@ class UserAddTestCase(TestCase, LoaderModuleMockMixin): # 'list_users' function tests: 1 + @skipIf(HAS_PWD is False, 'The pwd module is not available') def test_list_users(self): ''' Test if it returns a list of all users diff --git a/tests/unit/modules/test_uwsgi.py b/tests/unit/modules/test_uwsgi.py index 1957bd9728..f22cc5975e 100644 --- a/tests/unit/modules/test_uwsgi.py +++ b/tests/unit/modules/test_uwsgi.py @@ -16,7 +16,7 @@ import salt.modules.uwsgi as uwsgi class UwsgiTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/uwsgi')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/uwsgi')) patcher.start() self.addCleanup(patcher.stop) return {uwsgi: {}} diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index f616bb9391..52e7f27e0b 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -17,7 +17,7 @@ import salt.utils # Import third party libs import yaml -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/modules/test_virtualenv.py b/tests/unit/modules/test_virtualenv.py index da49531dd9..25f511ef1b 100644 --- a/tests/unit/modules/test_virtualenv.py +++ b/tests/unit/modules/test_virtualenv.py @@ -28,7 +28,7 @@ class VirtualenvTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): base_virtualenv_mock = MagicMock() base_virtualenv_mock.__version__ = '1.9.1' - patcher = patch('salt.utils.which', lambda exe: exe) + patcher = patch('salt.utils.path.which', lambda exe: exe) patcher.start() self.addCleanup(patcher.stop) return { diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index fd8ab4abc8..e7ac51dfa0 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -619,6 +619,14 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'mechanism': 'fake_mechanism', 'principal': 'fake_principal', 'domain': 'fake_domain'} + self.esxdatacenter_details = {'vcenter': 'fake_vcenter', + 'username': 'fake_username', + 'password': 'fake_password', + 'protocol': 'fake_protocol', + 'port': 'fake_port', + 'mechanism': 'fake_mechanism', + 'principal': 'fake_principal', + 'domain': 'fake_domain'} def tearDown(self): for attrname in ('esxi_host_details', 'esxi_vcenter_details'): @@ -638,6 +646,14 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'fake_protocol', 'fake_port', 'fake_mechanism', 'fake_principal', 'fake_domain'), ret) + def test_esxdatacenter_proxy_details(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxdatacenter')): + with patch.dict(vsphere.__salt__, + {'esxdatacenter.get_details': MagicMock( + return_value=self.esxdatacenter_details)}): + ret = vsphere._get_proxy_connection_details() + def test_esxi_proxy_vcenter_details(self): with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value='esxi')): @@ -847,7 +863,7 @@ class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxes(self): - supported_proxies = ['esxi'] + supported_proxies = ['esxi', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -890,7 +906,7 @@ class DisconnectTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxes(self): - supported_proxies = ['esxi'] + supported_proxies = ['esxi', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -931,7 +947,7 @@ class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxes(self): - supported_proxies = ['esxi'] + supported_proxies = ['esxi', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): diff --git a/tests/unit/modules/test_win_status.py b/tests/unit/modules/test_win_status.py index df317323ab..be46f428ba 100644 --- a/tests/unit/modules/test_win_status.py +++ b/tests/unit/modules/test_win_status.py @@ -6,7 +6,7 @@ import sys import types # Import Salt libs -import salt.ext.six as six +from salt.ext import six # Import Salt Testing libs from tests.support.unit import skipIf, TestCase diff --git a/tests/unit/modules/test_win_system.py b/tests/unit/modules/test_win_system.py index 3506a4ddf6..8b8a05356f 100644 --- a/tests/unit/modules/test_win_system.py +++ b/tests/unit/modules/test_win_system.py @@ -67,30 +67,31 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to reboot the system ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(), True) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_timeout_in_minutes(self): ''' Test to reboot the system with a timeout ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(5, in_seconds=False), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(5, in_seconds=False), True) + shutdown.assert_called_with(timeout=5, in_seconds=False, reboot=True, + only_on_pending_reboot=False) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_timeout_in_seconds(self): ''' Test to reboot the system with a timeout ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.reboot(5, in_seconds=True), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '5'], python_shell=False) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.reboot(5, in_seconds=True), True) + shutdown.assert_called_with(timeout=5, in_seconds=True, reboot=True, + only_on_pending_reboot=False) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_reboot_with_wait(self): @@ -98,50 +99,49 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): Test to reboot the system with a timeout and wait for it to finish ''' - mock = MagicMock(return_value='salt') - sleep_mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - with patch('time.sleep', sleep_mock): - self.assertEqual(win_system.reboot(wait_for_reboot=True), 'salt') - mock.assert_called_once_with(['shutdown', '/r', '/t', '300'], python_shell=False) - sleep_mock.assert_called_once_with(330) + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)), \ + patch('salt.modules.win_system.time.sleep', + MagicMock()) as time: + self.assertEqual(win_system.reboot(wait_for_reboot=True), True) + time.assert_called_with(330) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_shutdown(self): ''' Test to shutdown a running system ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.shutdown(), 'salt') + with patch('salt.modules.win_system.win32api.InitiateSystemShutdown', + MagicMock()): + self.assertEqual(win_system.shutdown(), True) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_shutdown_hard(self): ''' Test to shutdown a running system with no timeout or warning ''' - mock = MagicMock(return_value='salt') - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.shutdown_hard(), 'salt') + with patch('salt.modules.win_system.shutdown', + MagicMock(return_value=True)) as shutdown: + self.assertEqual(win_system.shutdown_hard(), True) + shutdown.assert_called_with(timeout=0) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_set_computer_name(self): ''' Test to set the Windows computer name ''' - mock = MagicMock(side_effect=[{'Computer Name': {'Current': ""}, - 'ReturnValue = 0;': True}, - {'Computer Name': {'Current': 'salt'}}]) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - mock = MagicMock(return_value='salt') - with patch.object(win_system, 'get_computer_name', mock): - mock = MagicMock(return_value=True) - with patch.object(win_system, - 'get_pending_computer_name', mock): - self.assertDictEqual(win_system.set_computer_name("salt"), + with patch('salt.modules.win_system.windll.kernel32.SetComputerNameExW', + MagicMock(return_value=True)): + with patch.object(win_system, 'get_computer_name', + MagicMock(return_value='salt')): + with patch.object(win_system, 'get_pending_computer_name', + MagicMock(return_value='salt_new')): + self.assertDictEqual(win_system.set_computer_name("salt_new"), {'Computer Name': {'Current': 'salt', - 'Pending': True}}) - + 'Pending': 'salt_new'}}) + # Test set_computer_name failure + with patch('salt.modules.win_system.windll.kernel32.SetComputerNameExW', + MagicMock(return_value=False)): self.assertFalse(win_system.set_computer_name("salt")) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') @@ -149,25 +149,25 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to get a pending computer name. ''' - mock = MagicMock(return_value='salt') - with patch.object(win_system, 'get_computer_name', mock): - mock = MagicMock(side_effect=['salt0', - 'ComputerName REG_SZ (salt)']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): + with patch.object(win_system, 'get_computer_name', + MagicMock(return_value='salt')): + reg_mock = MagicMock(return_value={'vdata': 'salt'}) + with patch.dict(win_system.__salt__, {'reg.read_value': reg_mock}): self.assertFalse(win_system.get_pending_computer_name()) + reg_mock = MagicMock(return_value={'vdata': 'salt_pending'}) + with patch.dict(win_system.__salt__, {'reg.read_value': reg_mock}): self.assertEqual(win_system.get_pending_computer_name(), - '(salt)') + 'salt_pending') @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') def test_get_computer_name(self): ''' Test to get the Windows computer name ''' - mock = MagicMock(side_effect=['Server Name Salt', 'Salt']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.get_computer_name(), 'Salt') - + with patch('salt.modules.win_system.win32api.GetComputerNameEx', + MagicMock(side_effect=['computer name', ''])): + self.assertEqual(win_system.get_computer_name(), 'computer name') self.assertFalse(win_system.get_computer_name()) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs the w32net library') @@ -189,10 +189,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to get the Windows computer description ''' - mock = MagicMock(side_effect=['Server Comment Salt', 'Salt']) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertEqual(win_system.get_computer_desc(), 'Salt') - + with patch('salt.modules.win_system.get_system_info', + MagicMock(side_effect=[{'description': 'salt description'}, + {'description': None}])): + self.assertEqual(win_system.get_computer_desc(), 'salt description') self.assertFalse(win_system.get_computer_desc()) @skipIf(not win_system.HAS_WIN32NET_MODS, 'this test needs w32net and other windows libraries') @@ -200,17 +200,20 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to join a computer to an Active Directory domain ''' - mock = MagicMock(side_effect=[{'ReturnValue = 0;': True}, - {'Salt': True}]) - with patch.dict(win_system.__salt__, {'cmd.run': mock}): - self.assertDictEqual(win_system.join_domain("saltstack", - "salt", - "salt@123"), - {'Domain': 'saltstack'}) + with patch('salt.modules.win_system._join_domain', + MagicMock(return_value=0)): + with patch('salt.modules.win_system.get_domain_workgroup', + MagicMock(return_value={'Workgroup': 'Workgroup'})): + self.assertDictEqual( + win_system.join_domain( + "saltstack", "salt", "salt@123"), + {'Domain': 'saltstack', 'Restart': False}) - self.assertFalse(win_system.join_domain("saltstack", - "salt", - "salt@123")) + with patch('salt.modules.win_system.get_domain_workgroup', + MagicMock(return_value={'Domain': 'saltstack'})): + self.assertEqual( + win_system.join_domain("saltstack", "salt", "salt@123"), + 'Already joined to saltstack') def test_get_system_time(self): ''' @@ -230,13 +233,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to set system time ''' - mock = MagicMock(side_effect=[False, True]) - with patch.object(win_system, '_validate_time', mock): + with patch('salt.modules.win_system.set_system_date_time', + MagicMock(side_effect=[False, True])): self.assertFalse(win_system.set_system_time("11:31:15 AM")) - - mock = MagicMock(return_value=True) - with patch.dict(win_system.__salt__, {'cmd.retcode': mock}): - self.assertFalse(win_system.set_system_time("11:31:15 AM")) + self.assertTrue(win_system.set_system_time("11:31:15 AM")) def test_get_system_date(self): ''' @@ -250,13 +250,10 @@ class WinSystemTestCase(TestCase, LoaderModuleMockMixin): ''' Test to set system date ''' - mock = MagicMock(side_effect=[False, True]) - with patch.object(win_system, '_validate_date', mock): + with patch('salt.modules.win_system.set_system_date_time', + MagicMock(side_effect=[False, True])): self.assertFalse(win_system.set_system_date("03-28-13")) - - mock = MagicMock(return_value=True) - with patch.dict(win_system.__salt__, {'cmd.retcode': mock}): - self.assertFalse(win_system.set_system_date("03-28-13")) + self.assertTrue(win_system.set_system_date("03-28-13")) def test_start_time_service(self): ''' diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py new file mode 100644 index 0000000000..cf754d6289 --- /dev/null +++ b/tests/unit/modules/test_yumpkg.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- + +# Import Python Libs +from __future__ import absolute_import +import os + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +# Import Salt libs +import salt.modules.yumpkg as yumpkg +import salt.modules.pkg_resource as pkg_resource + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class YumTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.yumpkg + ''' + def setup_loader_modules(self): + return {yumpkg: {'rpm': None}} + + def test_list_pkgs(self): + ''' + Test packages listing. + + :return: + ''' + def _add_data(data, key, value): + data.setdefault(key, []).append(value) + + rpm_out = [ + 'python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471', + 'alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475', + 'gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477', + 'rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477', + 'pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478', + 'yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479', + 'lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479', + 'qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480', + 'ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480', + 'shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481', + 'util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484', + 'openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485', + 'virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486', + ] + with patch.dict(yumpkg.__grains__, {'osarch': 'x86_64'}), \ + patch.dict(yumpkg.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}): + pkgs = yumpkg.list_pkgs(versions_as_list=True) + for pkg_name, pkg_version in { + 'python-urlgrabber': '3.10-8.el7', + 'alsa-lib': '1.1.1-1.el7', + 'gnupg2': '2.0.22-4.el7', + 'rpm-python': '4.11.3-21.el7', + 'pygpgme': '0.3-9.el7', + 'yum': '3.4.3-150.el7.centos', + 'lzo': '2.06-8.el7', + 'qrencode-libs': '3.4.1-3.el7', + 'ustr': '1.0.4-16.el7', + 'shadow-utils': '2:4.1.5.1-24.el7', + 'util-linux': '2.23.2-33.el7', + 'openssh': '6.6.1p1-33.el7_3', + 'virt-what': '1.13-8.el7'}.items(): + self.assertTrue(pkgs.get(pkg_name)) + self.assertEqual(pkgs[pkg_name], [pkg_version]) + + def test_list_pkgs_with_attr(self): + ''' + Test packages listing with the attr parameter + + :return: + ''' + def _add_data(data, key, value): + data.setdefault(key, []).append(value) + + rpm_out = [ + 'python-urlgrabber_|-(none)_|-3.10_|-8.el7_|-noarch_|-(none)_|-1487838471', + 'alsa-lib_|-(none)_|-1.1.1_|-1.el7_|-x86_64_|-(none)_|-1487838475', + 'gnupg2_|-(none)_|-2.0.22_|-4.el7_|-x86_64_|-(none)_|-1487838477', + 'rpm-python_|-(none)_|-4.11.3_|-21.el7_|-x86_64_|-(none)_|-1487838477', + 'pygpgme_|-(none)_|-0.3_|-9.el7_|-x86_64_|-(none)_|-1487838478', + 'yum_|-(none)_|-3.4.3_|-150.el7.centos_|-noarch_|-(none)_|-1487838479', + 'lzo_|-(none)_|-2.06_|-8.el7_|-x86_64_|-(none)_|-1487838479', + 'qrencode-libs_|-(none)_|-3.4.1_|-3.el7_|-x86_64_|-(none)_|-1487838480', + 'ustr_|-(none)_|-1.0.4_|-16.el7_|-x86_64_|-(none)_|-1487838480', + 'shadow-utils_|-2_|-4.1.5.1_|-24.el7_|-x86_64_|-(none)_|-1487838481', + 'util-linux_|-(none)_|-2.23.2_|-33.el7_|-x86_64_|-(none)_|-1487838484', + 'openssh_|-(none)_|-6.6.1p1_|-33.el7_3_|-x86_64_|-(none)_|-1487838485', + 'virt-what_|-(none)_|-1.13_|-8.el7_|-x86_64_|-(none)_|-1487838486', + ] + with patch.dict(yumpkg.__grains__, {'osarch': 'x86_64'}), \ + patch.dict(yumpkg.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ + patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}): + pkgs = yumpkg.list_pkgs(attr=['arch', 'install_date_time_t']) + for pkg_name, pkg_attr in { + 'python-urlgrabber': { + 'version': '3.10-8.el7', + 'arch': 'noarch', + 'install_date_time_t': 1487838471, + }, + 'alsa-lib': { + 'version': '1.1.1-1.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838475, + }, + 'gnupg2': { + 'version': '2.0.22-4.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838477, + }, + 'rpm-python': { + 'version': '4.11.3-21.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838477, + }, + 'pygpgme': { + 'version': '0.3-9.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838478, + }, + 'yum': { + 'version': '3.4.3-150.el7.centos', + 'arch': 'noarch', + 'install_date_time_t': 1487838479, + }, + 'lzo': { + 'version': '2.06-8.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838479, + }, + 'qrencode-libs': { + 'version': '3.4.1-3.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838480, + }, + 'ustr': { + 'version': '1.0.4-16.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838480, + }, + 'shadow-utils': { + 'version': '2:4.1.5.1-24.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838481, + }, + 'util-linux': { + 'version': '2.23.2-33.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838484, + }, + 'openssh': { + 'version': '6.6.1p1-33.el7_3', + 'arch': 'x86_64', + 'install_date_time_t': 1487838485, + }, + 'virt-what': { + 'version': '1.13-8.el7', + 'arch': 'x86_64', + 'install_date_time_t': 1487838486, + }}.items(): + self.assertTrue(pkgs.get(pkg_name)) + self.assertEqual(pkgs[pkg_name], [pkg_attr]) diff --git a/tests/unit/modules/test_zcbuildout.py b/tests/unit/modules/test_zcbuildout.py index 9ec25dc6be..a8b66164cf 100644 --- a/tests/unit/modules/test_zcbuildout.py +++ b/tests/unit/modules/test_zcbuildout.py @@ -9,7 +9,7 @@ import shutil # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.error import URLError from salt.ext.six.moves.urllib.request import urlopen # pylint: enable=import-error,no-name-in-module,redefined-builtin @@ -21,8 +21,8 @@ from tests.support.unit import TestCase, skipIf from tests.support.helpers import requires_network, skip_if_binaries_missing # Import Salt libs -import salt.utils import salt.utils.files +import salt.utils.path import salt.modules.zcbuildout as buildout import salt.modules.cmdmod as cmd @@ -88,7 +88,7 @@ class Base(TestCase, LoaderModuleMockMixin): '{0} --no-site-packages {1};' '{1}/bin/pip install -U setuptools; ' '{1}/bin/easy_install -U distribute;').format( - salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), + salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), cls.ppy_st ) ) @@ -121,7 +121,7 @@ class Base(TestCase, LoaderModuleMockMixin): @skipIf(True, 'These tests are not running reliably') -@skipIf(salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, +@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, 'The \'virtualenv\' packaged needs to be installed') @skip_if_binaries_missing(['tar']) class BuildoutTestCase(Base): @@ -313,7 +313,7 @@ class BuildoutTestCase(Base): self.assertEqual(time2, time3) -@skipIf(salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, +@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, 'The \'virtualenv\' packaged needs to be installed') @skipIf(True, 'These tests are not running reliably') class BuildoutOnlineTestCase(Base): @@ -329,14 +329,14 @@ class BuildoutOnlineTestCase(Base): try: ret20 = buildout._Popen(( '{0} --no-site-packages --no-setuptools --no-pip {1}'.format( - salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), + salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), cls.ppy_dis ) )) except buildout._BuildoutError: ret20 = buildout._Popen(( '{0} --no-site-packages {1}'.format( - salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), + salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), cls.ppy_dis )) ) @@ -357,14 +357,14 @@ class BuildoutOnlineTestCase(Base): try: ret3 = buildout._Popen(( '{0} --no-site-packages --no-setuptools --no-pip {1}'.format( - salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), + salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), cls.ppy_blank ) )) except buildout._BuildoutError: ret3 = buildout._Popen(( '{0} --no-site-packages {1}'.format( - salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), + salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES), cls.ppy_blank ) )) diff --git a/tests/unit/modules/test_zfs.py b/tests/unit/modules/test_zfs.py index 5d421b7288..3bc0c614f5 100644 --- a/tests/unit/modules/test_zfs.py +++ b/tests/unit/modules/test_zfs.py @@ -340,7 +340,7 @@ class ZfsTestCase(TestCase, LoaderModuleMockMixin): ''' Tests zfs bookmark success ''' - with patch('salt.utils.which', MagicMock(return_value='/usr/bin/man')): + with patch('salt.utils.path.which', MagicMock(return_value='/usr/bin/man')): res = {'myzpool/mydataset@yesterday': 'bookmarked as myzpool/mydataset#important'} ret = {'pid': 20990, 'retcode': 0, 'stderr': '', 'stdout': ''} mock_cmd = MagicMock(return_value=ret) diff --git a/tests/unit/modules/test_zypper.py b/tests/unit/modules/test_zypper.py index a736539a45..ae343ee7d6 100644 --- a/tests/unit/modules/test_zypper.py +++ b/tests/unit/modules/test_zypper.py @@ -23,11 +23,12 @@ from tests.support.mock import ( # Import Salt libs import salt.utils.files import salt.modules.zypper as zypper +import salt.modules.pkg_resource as pkg_resource from salt.exceptions import CommandExecutionError # Import 3rd-party libs from salt.ext.six.moves import configparser -import salt.ext.six as six +from salt.ext import six import salt.utils.pkg @@ -496,20 +497,20 @@ Repository 'DUMMY' not found by its alias, number, or URI. 'jakarta-commons-discovery_|-0.4_|-129.686_|-noarch_|-_|-1498636511', 'susemanager-build-keys-web_|-12.0_|-5.1.develHead_|-noarch_|-_|-1498636510', ] - with patch.dict(zypper.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}): - with patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}): - with patch.dict(zypper.__salt__, {'pkg_resource.sort_pkglist': MagicMock()}): - with patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}): - pkgs = zypper.list_pkgs() - for pkg_name, pkg_version in { - 'jakarta-commons-discovery': '0.4-129.686', - 'yast2-ftp-server': '3.1.8-8.1', - 'protobuf-java': '2.6.1-3.1.develHead', - 'susemanager-build-keys-web': '12.0-5.1.develHead', - 'apache-commons-cli': '1.2-1.233', - 'jose4j': '0.4.4-2.1.develHead'}.items(): - self.assertTrue(pkgs.get(pkg_name)) - self.assertEqual(pkgs[pkg_name], [pkg_version]) + with patch.dict(zypper.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}), \ + patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}), \ + patch.dict(zypper.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ + patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}): + pkgs = zypper.list_pkgs(versions_as_list=True) + for pkg_name, pkg_version in { + 'jakarta-commons-discovery': '0.4-129.686', + 'yast2-ftp-server': '3.1.8-8.1', + 'protobuf-java': '2.6.1-3.1.develHead', + 'susemanager-build-keys-web': '12.0-5.1.develHead', + 'apache-commons-cli': '1.2-1.233', + 'jose4j': '0.4.4-2.1.develHead'}.items(): + self.assertTrue(pkgs.get(pkg_name)) + self.assertEqual(pkgs[pkg_name], [pkg_version]) def test_list_pkgs_with_attr(self): ''' @@ -528,42 +529,44 @@ Repository 'DUMMY' not found by its alias, number, or URI. 'jakarta-commons-discovery_|-0.4_|-129.686_|-noarch_|-_|-1498636511', 'susemanager-build-keys-web_|-12.0_|-5.1.develHead_|-noarch_|-_|-1498636510', ] - with patch.dict(zypper.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}): - with patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}): - pkgs = zypper.list_pkgs(attr=['arch', 'install_date_time_t']) - for pkg_name, pkg_attr in { - 'jakarta-commons-discovery': { - 'version': '0.4-129.686', - 'arch': 'noarch', - 'install_date_time_t': 1498636511, - }, - 'yast2-ftp-server': { - 'version': '3.1.8-8.1', - 'arch': 'x86_64', - 'install_date_time_t': 1499257798, - }, - 'protobuf-java': { - 'version': '2.6.1-3.1.develHead', - 'arch': 'noarch', - 'install_date_time_t': 1499257756, - }, - 'susemanager-build-keys-web': { - 'version': '12.0-5.1.develHead', - 'arch': 'noarch', - 'install_date_time_t': 1498636510, - }, - 'apache-commons-cli': { - 'version': '1.2-1.233', - 'arch': 'noarch', - 'install_date_time_t': 1498636510, - }, - 'jose4j': { - 'version': '0.4.4-2.1.develHead', - 'arch': 'noarch', - 'install_date_time_t': 1499257756, - }}.items(): - self.assertTrue(pkgs.get(pkg_name)) - self.assertEqual(pkgs[pkg_name], [pkg_attr]) + with patch.dict(zypper.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}), \ + patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}), \ + patch.dict(zypper.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ + patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}): + pkgs = zypper.list_pkgs(attr=['arch', 'install_date_time_t']) + for pkg_name, pkg_attr in { + 'jakarta-commons-discovery': { + 'version': '0.4-129.686', + 'arch': 'noarch', + 'install_date_time_t': 1498636511, + }, + 'yast2-ftp-server': { + 'version': '3.1.8-8.1', + 'arch': 'x86_64', + 'install_date_time_t': 1499257798, + }, + 'protobuf-java': { + 'version': '2.6.1-3.1.develHead', + 'arch': 'noarch', + 'install_date_time_t': 1499257756, + }, + 'susemanager-build-keys-web': { + 'version': '12.0-5.1.develHead', + 'arch': 'noarch', + 'install_date_time_t': 1498636510, + }, + 'apache-commons-cli': { + 'version': '1.2-1.233', + 'arch': 'noarch', + 'install_date_time_t': 1498636510, + }, + 'jose4j': { + 'version': '0.4.4-2.1.develHead', + 'arch': 'noarch', + 'install_date_time_t': 1499257756, + }}.items(): + self.assertTrue(pkgs.get(pkg_name)) + self.assertEqual(pkgs[pkg_name], [pkg_attr]) def test_list_patches(self): ''' @@ -1151,7 +1154,7 @@ Repository 'DUMMY' not found by its alias, number, or URI. """ _zpr = MagicMock() _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) - assert isinstance(zypper.Wildcard(_zpr)('libzypp', '*.1'), str) + assert isinstance(zypper.Wildcard(_zpr)('libzypp', '*.1'), six.string_types) def test_wildcard_to_query_condition_preservation(self): ''' diff --git a/tests/unit/netapi/rest_tornado/test_handlers.py b/tests/unit/netapi/rest_tornado/test_handlers.py index 555528e076..23bf5188ca 100644 --- a/tests/unit/netapi/rest_tornado/test_handlers.py +++ b/tests/unit/netapi/rest_tornado/test_handlers.py @@ -38,7 +38,7 @@ except ImportError: class AsyncHTTPTestCase(object): pass -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves.urllib.parse import urlencode, urlparse # pylint: disable=no-name-in-module # pylint: enable=import-error diff --git a/tests/unit/netapi/rest_tornado/test_utils.py b/tests/unit/netapi/rest_tornado/test_utils.py index 76b5d66076..5df66cb2d1 100644 --- a/tests/unit/netapi/rest_tornado/test_utils.py +++ b/tests/unit/netapi/rest_tornado/test_utils.py @@ -31,7 +31,8 @@ except ImportError: HAS_TORNADO = False # Import utility lib from tests -from tests.unit.utils.test_event import eventpublisher_process, event, SOCK_DIR # pylint: disable=import-error +import salt.utils.event +from tests.unit.utils.test_event import eventpublisher_process, SOCK_DIR # pylint: disable=import-error @skipIf(HAS_TORNADO is False, 'The tornado package needs to be installed') @@ -85,7 +86,7 @@ class TestEventListener(AsyncTestCase): Test getting a few events ''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR) + me = salt.utils.event.MasterEvent(SOCK_DIR) event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save? {'sock_dir': SOCK_DIR, 'transport': 'zeromq'}) @@ -105,7 +106,7 @@ class TestEventListener(AsyncTestCase): Test subscribing events using set_event_handler ''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR) + me = salt.utils.event.MasterEvent(SOCK_DIR) event_listener = saltnado.EventListener({}, # we don't use mod_opts, don't save? {'sock_dir': SOCK_DIR, 'transport': 'zeromq'}) diff --git a/tests/unit/output/test_highstate_out.py b/tests/unit/output/test_highstate_out.py index b66c21311b..d02f1076fa 100644 --- a/tests/unit/output/test_highstate_out.py +++ b/tests/unit/output/test_highstate_out.py @@ -12,10 +12,11 @@ from tests.support.unit import TestCase # Import Salt Libs import salt.utils +import salt.utils.stringutils import salt.output.highstate as highstate # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class JsonTestCase(TestCase, LoaderModuleMockMixin): @@ -100,9 +101,9 @@ class JsonTestCase(TestCase, LoaderModuleMockMixin): continue entry = entry[key] if six.PY2: - entry['comment'] = salt.utils.to_unicode(entry['comment']) + entry['comment'] = salt.utils.stringutils.to_unicode(entry['comment']) else: - entry['comment'] = salt.utils.to_bytes(entry['comment']) + entry['comment'] = salt.utils.stringutils.to_bytes(entry['comment']) ret = highstate.output(self.data) self.assertIn('Succeeded: 1 (changed=1)', ret) self.assertIn('Failed: 0', ret) diff --git a/tests/unit/output/test_json_out.py b/tests/unit/output/test_json_out.py index 13c9659607..0e0be5f4fa 100644 --- a/tests/unit/output/test_json_out.py +++ b/tests/unit/output/test_json_out.py @@ -16,7 +16,7 @@ from tests.support.mock import patch import salt.output.json_out as json_out # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class JsonTestCase(TestCase, LoaderModuleMockMixin): diff --git a/tests/unit/pillar/test_consul.py b/tests/unit/pillar/test_consul.py index efe8d628e0..6d26b75d32 100644 --- a/tests/unit/pillar/test_consul.py +++ b/tests/unit/pillar/test_consul.py @@ -11,6 +11,9 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import Salt Libs import salt.pillar.consul_pillar as consul_pillar +# Import 3rd-party libs +from salt.ext import six + OPTS = {'consul_config': {'consul.port': 8500, 'consul.host': '172.17.0.15'}} PILLAR_DATA = [ @@ -66,7 +69,7 @@ class ConsulPillarTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(consul_pillar.__salt__, {'grains.get': MagicMock(return_value=({}))}): with patch.object(consul_pillar, 'consul_fetch', MagicMock(return_value=('2232', PILLAR_DATA))): pillar_data = consul_pillar.ext_pillar('testminion', {}, 'consul_config root=test-shared/') - assert isinstance(pillar_data[u'user'][u'dontsplit'], str) + assert isinstance(pillar_data[u'user'][u'dontsplit'], six.string_types) def test_dict_merge(self): test_dict = {} diff --git a/tests/unit/proxy/__init__.py b/tests/unit/proxy/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/tests/unit/proxy/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/unit/proxy/test_esxdatacenter.py b/tests/unit/proxy/test_esxdatacenter.py new file mode 100644 index 0000000000..fb44851a9f --- /dev/null +++ b/tests/unit/proxy/test_esxdatacenter.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for esxdatacenter proxy +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import external libs +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Import Salt Libs +import salt.proxy.esxdatacenter as esxdatacenter +import salt.exceptions +from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_JSONSCHEMA, 'jsonschema is required') +class InitTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.proxy.esxdatacenter.init''' + def setup_loader_modules(self): + return {esxdatacenter: {'__virtual__': + MagicMock(return_value='esxdatacenter'), + 'DETAILS': {}}} + + def setUp(self): + self.opts_userpass = {'proxy': {'proxytype': 'esxdatacenter', + 'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'mechanism': 'userpass', + 'username': 'fake_username', + 'passwords': ['fake_password'], + 'protocol': 'fake_protocol', + 'port': 100}} + self.opts_sspi = {'proxy': {'proxytype': 'esxdatacenter', + 'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'mechanism': 'sspi', + 'domain': 'fake_domain', + 'principal': 'fake_principal', + 'protocol': 'fake_protocol', + 'port': 100}} + + def test_esxdatacenter_schema(self): + mock_json_validate = MagicMock() + serialized_schema = EsxdatacenterProxySchema().serialize() + with patch('salt.proxy.esxdatacenter.jsonschema.validate', + mock_json_validate): + esxdatacenter.init(self.opts_sspi) + mock_json_validate.assert_called_once_with( + self.opts_sspi['proxy'], serialized_schema) + + def test_invalid_proxy_input_error(self): + with patch('salt.proxy.esxdatacenter.jsonschema.validate', + MagicMock(side_effect=jsonschema.exceptions.ValidationError( + 'Validation Error'))): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(self.opts_userpass) + self.assertEqual(excinfo.exception.strerror.message, + 'Validation Error') + + def test_no_username(self): + opts = self.opts_userpass.copy() + del opts['proxy']['username'] + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'userpass\', but no ' + '\'username\' key found in proxy config.') + + def test_no_passwords(self): + opts = self.opts_userpass.copy() + del opts['proxy']['passwords'] + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'userpass\', but no ' + '\'passwords\' key found in proxy config.') + + def test_no_domain(self): + opts = self.opts_sspi.copy() + del opts['proxy']['domain'] + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'sspi\', but no ' + '\'domain\' key found in proxy config.') + + def test_no_principal(self): + opts = self.opts_sspi.copy() + del opts['proxy']['principal'] + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'sspi\', but no ' + '\'principal\' key found in proxy config.') + + def test_find_credentials(self): + mock_find_credentials = MagicMock(return_value=('fake_username', + 'fake_password')) + with patch('salt.proxy.esxdatacenter.find_credentials', + mock_find_credentials): + esxdatacenter.init(self.opts_userpass) + mock_find_credentials.assert_called_once_with() + + def test_details_userpass(self): + mock_find_credentials = MagicMock(return_value=('fake_username', + 'fake_password')) + with patch('salt.proxy.esxdatacenter.find_credentials', + mock_find_credentials): + esxdatacenter.init(self.opts_userpass) + self.assertDictEqual(esxdatacenter.DETAILS, + {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'mechanism': 'userpass', + 'username': 'fake_username', + 'password': 'fake_password', + 'passwords': ['fake_password'], + 'protocol': 'fake_protocol', + 'port': 100}) + + def test_details_sspi(self): + esxdatacenter.init(self.opts_sspi) + self.assertDictEqual(esxdatacenter.DETAILS, + {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'mechanism': 'sspi', + 'domain': 'fake_domain', + 'principal': 'fake_principal', + 'protocol': 'fake_protocol', + 'port': 100}) diff --git a/tests/unit/renderers/test_gpg.py b/tests/unit/renderers/test_gpg.py index 5890e6d8d8..b4b26e3828 100644 --- a/tests/unit/renderers/test_gpg.py +++ b/tests/unit/renderers/test_gpg.py @@ -32,10 +32,10 @@ class GPGTestCase(TestCase, LoaderModuleMockMixin): ''' gpg_exec = '/bin/gpg' - with patch('salt.utils.which', MagicMock(return_value=gpg_exec)): + with patch('salt.utils.path.which', MagicMock(return_value=gpg_exec)): self.assertEqual(gpg._get_gpg_exec(), gpg_exec) - with patch('salt.utils.which', MagicMock(return_value=False)): + with patch('salt.utils.path.which', MagicMock(return_value=False)): self.assertRaises(SaltRenderError, gpg._get_gpg_exec) def test__decrypt_ciphertext(self): @@ -55,7 +55,7 @@ class GPGTestCase(TestCase, LoaderModuleMockMixin): return [None, 'decrypt error'] with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)), \ - patch('salt.utils.which', MagicMock()): + patch('salt.utils.path.which', MagicMock()): with patch('salt.renderers.gpg.Popen', MagicMock(return_value=GPGDecrypt())): self.assertEqual(gpg._decrypt_ciphertext(crypted), secret) with patch('salt.renderers.gpg.Popen', MagicMock(return_value=GPGNotDecrypt())): diff --git a/tests/unit/returners/test_local_cache.py b/tests/unit/returners/test_local_cache.py index 62abf40ff1..ad9ff53e07 100644 --- a/tests/unit/returners/test_local_cache.py +++ b/tests/unit/returners/test_local_cache.py @@ -26,12 +26,12 @@ from tests.support.mock import ( ) # Import Salt libs -import salt.utils import salt.utils.files import salt.utils.jid import salt.utils.job +import salt.utils.platform import salt.returners.local_cache as local_cache -import salt.ext.six as six +from salt.ext import six log = logging.getLogger(__name__) @@ -280,7 +280,7 @@ class Local_CacheTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleM # This needed due to a race condition in Windows # `os.makedirs` hasn't released the handle before # `local_cache.clean_old_jobs` tries to delete the new_jid_dir - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): import time lock_dir = new_jid_dir + '.lckchk' tries = 0 diff --git a/tests/unit/runners/test_vault.py b/tests/unit/runners/test_vault.py index 1231caafce..e9f751fa99 100644 --- a/tests/unit/runners/test_vault.py +++ b/tests/unit/runners/test_vault.py @@ -18,7 +18,7 @@ from tests.support.mock import ( ) # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.runners.vault as vault log = logging.getLogger(__name__) diff --git a/tests/unit/serializers/test_serializers.py b/tests/unit/serializers/test_serializers.py index 2520a462d8..4f4890e06e 100644 --- a/tests/unit/serializers/test_serializers.py +++ b/tests/unit/serializers/test_serializers.py @@ -9,7 +9,7 @@ from tests.support.unit import skipIf, TestCase # Import 3rd party libs import jinja2 -import salt.ext.six as six +from salt.ext import six # Import salt libs import salt.serializers.configparser as configparser diff --git a/tests/unit/ssh/test_ssh_single.py b/tests/unit/ssh/test_ssh_single.py index 25d89f6eca..68970ac093 100644 --- a/tests/unit/ssh/test_ssh_single.py +++ b/tests/unit/ssh/test_ssh_single.py @@ -14,8 +14,8 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON # Import Salt libs import tests.integration as integration +import salt.utils.thin as thin from salt.client import ssh -from salt.utils import thin @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/states/test_archive.py b/tests/unit/states/test_archive.py index 2823711fe7..30d256773d 100644 --- a/tests/unit/states/test_archive.py +++ b/tests/unit/states/test_archive.py @@ -102,16 +102,17 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': mock_run, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts): - ret = archive.extracted(tmp_dir, - source, - options=test_opts, - enforce_toplevel=False) - ret_opts.append(source) - mock_run.assert_called_with(ret_opts, - cwd=tmp_dir + os.sep, - python_shell=False) + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts): + ret = archive.extracted(tmp_dir, + source, + options=test_opts, + enforce_toplevel=False) + ret_opts.append(source) + mock_run.assert_called_with(ret_opts, + cwd=tmp_dir + os.sep, + python_shell=False) def test_tar_gnutar(self): ''' @@ -142,13 +143,14 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': run_all, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - ret = archive.extracted('/tmp/out', - source, - options='xvzf', - enforce_toplevel=False, - keep=True) - self.assertEqual(ret['changes']['extracted_files'], 'stdout') + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + ret = archive.extracted('/tmp/out', + source, + options='xvzf', + enforce_toplevel=False, + keep=True) + self.assertEqual(ret['changes']['extracted_files'], 'stdout') def test_tar_bsdtar(self): ''' @@ -179,10 +181,11 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin): 'cmd.run_all': run_all, 'archive.list': list_mock, 'file.source_list': mock_source_list}): - with patch.object(os.path, 'isfile', isfile_mock): - ret = archive.extracted('/tmp/out', - source, - options='xvzf', - enforce_toplevel=False, - keep=True) - self.assertEqual(ret['changes']['extracted_files'], 'stderr') + with patch.dict(archive.__states__, {'file.directory': mock_true}): + with patch.object(os.path, 'isfile', isfile_mock): + ret = archive.extracted('/tmp/out', + source, + options='xvzf', + enforce_toplevel=False, + keep=True) + self.assertEqual(ret['changes']['extracted_files'], 'stderr') diff --git a/tests/unit/states/test_beacon.py b/tests/unit/states/test_beacon.py new file mode 100644 index 0000000000..f3b716b2c1 --- /dev/null +++ b/tests/unit/states/test_beacon.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Jayesh Kariya ` +''' +# Import Python libs +from __future__ import absolute_import +import os + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.paths import TMP +from tests.support.unit import skipIf, TestCase +from tests.support.mock import ( + NO_MOCK, + NO_MOCK_REASON, + MagicMock, + patch +) + +# Import Salt Libs +import salt.states.beacon as beacon + +SOCK_DIR = os.path.join(TMP, 'test-socks') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class BeaconTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.states.beacon + ''' + def setup_loader_modules(self): + return {beacon: {}} + + # 'present' function tests: 1 + + def test_present(self): + ''' + Test to ensure a job is present in the beacon. + ''' + beacon_name = 'ps' + + ret = {'name': beacon_name, + 'changes': {}, + 'result': False, + 'comment': ''} + + mock_dict = MagicMock(side_effect=[ret, []]) + mock_mod = MagicMock(return_value=ret) + mock_lst = MagicMock(side_effect=[{beacon_name: {}}, + {beacon_name: {}}, + {}, + {}]) + with patch.dict(beacon.__salt__, + {'beacons.list': mock_lst, + 'beacons.modify': mock_mod, + 'beacons.add': mock_mod}): + self.assertDictEqual(beacon.present(beacon_name), ret) + + with patch.dict(beacon.__opts__, {'test': False}): + self.assertDictEqual(beacon.present(beacon_name), ret) + + self.assertDictEqual(beacon.present(beacon_name), ret) + + with patch.dict(beacon.__opts__, {'test': True}): + ret.update({'result': True}) + self.assertDictEqual(beacon.present(beacon_name), ret) + + # 'absent' function tests: 1 + + def test_absent(self): + ''' + Test to ensure a job is absent from the schedule. + ''' + beacon_name = 'ps' + + ret = {'name': beacon_name, + 'changes': {}, + 'result': False, + 'comment': ''} + + mock_mod = MagicMock(return_value=ret) + mock_lst = MagicMock(side_effect=[{beacon_name: {}}, {}]) + with patch.dict(beacon.__salt__, + {'beacons.list': mock_lst, + 'beacons.delete': mock_mod}): + with patch.dict(beacon.__opts__, {'test': False}): + self.assertDictEqual(beacon.absent(beacon_name), ret) + + with patch.dict(beacon.__opts__, {'test': True}): + comt = ('ps not configured in beacons') + ret.update({'comment': comt, 'result': True}) + self.assertDictEqual(beacon.absent(beacon_name), ret) diff --git a/tests/unit/states/test_blockdev.py b/tests/unit/states/test_blockdev.py index e5899f1c70..9cf8b1db27 100644 --- a/tests/unit/states/test_blockdev.py +++ b/tests/unit/states/test_blockdev.py @@ -17,7 +17,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.states.blockdev as blockdev -import salt.utils +import salt.utils.path @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -85,7 +85,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(blockdev.__salt__, {'cmd.run': MagicMock(return_value='')}): ret.update({'comment': 'Invalid fs_type: foo-bar', 'result': False}) - with patch.object(salt.utils, 'which', + with patch.object(salt.utils.path, 'which', MagicMock(return_value=False)): self.assertDictEqual(blockdev.formatted(name, fs_type='foo-bar'), ret) @@ -93,7 +93,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(blockdev.__salt__, {'cmd.run': MagicMock(return_value='new-thing')}): comt = ('Changes to {0} will be applied '.format(name)) ret.update({'comment': comt, 'result': None}) - with patch.object(salt.utils, 'which', + with patch.object(salt.utils.path, 'which', MagicMock(return_value=True)): with patch.dict(blockdev.__opts__, {'test': True}): self.assertDictEqual(blockdev.formatted(name), ret) @@ -103,7 +103,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin): 'disk.format_': MagicMock(return_value=True)}): comt = ('Failed to format {0}'.format(name)) ret.update({'comment': comt, 'result': False}) - with patch.object(salt.utils, 'which', + with patch.object(salt.utils.path, 'which', MagicMock(return_value=True)): with patch.dict(blockdev.__opts__, {'test': False}): self.assertDictEqual(blockdev.formatted(name), ret) diff --git a/tests/unit/states/test_boto_elasticsearch_domain.py b/tests/unit/states/test_boto_elasticsearch_domain.py index 2b9f1ee56a..a8266c06aa 100644 --- a/tests/unit/states/test_boto_elasticsearch_domain.py +++ b/tests/unit/states/test_boto_elasticsearch_domain.py @@ -12,7 +12,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.loader from salt.utils.versions import LooseVersion import salt.states.boto_elasticsearch_domain as boto_elasticsearch_domain diff --git a/tests/unit/states/test_boto_s3_bucket.py b/tests/unit/states/test_boto_s3_bucket.py index e98166d7bf..5bebb36039 100644 --- a/tests/unit/states/test_boto_s3_bucket.py +++ b/tests/unit/states/test_boto_s3_bucket.py @@ -13,7 +13,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.loader from salt.utils.versions import LooseVersion import salt.states.boto_s3_bucket as boto_s3_bucket diff --git a/tests/unit/states/test_boto_sqs.py b/tests/unit/states/test_boto_sqs.py index 1ee4c9820e..4c50a5449f 100644 --- a/tests/unit/states/test_boto_sqs.py +++ b/tests/unit/states/test_boto_sqs.py @@ -4,6 +4,7 @@ ''' # Import Python libs from __future__ import absolute_import +import textwrap # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -11,6 +12,8 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import Salt Libs +import salt.config +import salt.loader import salt.states.boto_sqs as boto_sqs @@ -20,7 +23,24 @@ class BotoSqsTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.states.boto_sqs ''' def setup_loader_modules(self): - return {boto_sqs: {}} + utils = salt.loader.utils( + self.opts, + whitelist=['boto3', 'yamldumper'], + context={} + ) + return { + boto_sqs: { + '__utils__': utils, + } + } + + @classmethod + def setUpClass(cls): + cls.opts = salt.config.DEFAULT_MINION_OPTS + + @classmethod + def tearDownClass(cls): + del cls.opts # 'present' function tests: 1 @@ -29,37 +49,55 @@ class BotoSqsTestCase(TestCase, LoaderModuleMockMixin): Test to ensure the SQS queue exists. ''' name = 'mysqs' - attributes = {'ReceiveMessageWaitTimeSeconds': 20} + attributes = {'DelaySeconds': 20} + base_ret = {'name': name, 'changes': {}} - ret = {'name': name, - 'result': False, - 'changes': {}, - 'comment': ''} - - mock = MagicMock(side_effect=[False, False, True, True]) - mock_bool = MagicMock(return_value=False) - mock_attr = MagicMock(return_value={}) + mock = MagicMock( + side_effect=[{'result': b} for b in [False, False, True, True]], + ) + mock_bool = MagicMock(return_value={'error': 'create error'}) + mock_attr = MagicMock(return_value={'result': {}}) with patch.dict(boto_sqs.__salt__, {'boto_sqs.exists': mock, 'boto_sqs.create': mock_bool, 'boto_sqs.get_attributes': mock_attr}): with patch.dict(boto_sqs.__opts__, {'test': False}): - comt = ('Failed to create {0} AWS queue'.format(name)) - ret.update({'comment': comt}) + comt = 'Failed to create SQS queue {0}: create error'.format( + name, + ) + ret = base_ret.copy() + ret.update({'result': False, 'comment': comt}) self.assertDictEqual(boto_sqs.present(name), ret) with patch.dict(boto_sqs.__opts__, {'test': True}): - comt = ('AWS SQS queue {0} is set to be created.'.format(name)) - ret.update({'comment': comt, 'result': None}) + comt = 'SQS queue {0} is set to be created.'.format(name) + ret = base_ret.copy() + ret.update({ + 'result': None, + 'comment': comt, + 'pchanges': {'old': None, 'new': 'mysqs'}, + }) self.assertDictEqual(boto_sqs.present(name), ret) - - comt = ('Attribute(s) ReceiveMessageWaitTimeSeconds' - ' to be set on mysqs.') - ret.update({'comment': comt}) + diff = textwrap.dedent('''\ + --- + +++ + @@ -1 +1 @@ + -{} + +DelaySeconds: 20 + ''') + comt = textwrap.dedent('''\ + SQS queue mysqs present. + Attribute(s) DelaySeconds set to be updated: + ''') + diff + ret.update({ + 'comment': comt, + 'pchanges': {'attributes': {'diff': diff}}, + }) self.assertDictEqual(boto_sqs.present(name, attributes), ret) - comt = ('mysqs present. Attributes set.') - ret.update({'comment': comt, 'result': True}) + comt = ('SQS queue mysqs present.') + ret = base_ret.copy() + ret.update({'result': True, 'comment': comt}) self.assertDictEqual(boto_sqs.present(name), ret) # 'absent' function tests: 1 @@ -69,20 +107,22 @@ class BotoSqsTestCase(TestCase, LoaderModuleMockMixin): Test to ensure the named sqs queue is deleted. ''' name = 'test.example.com.' + base_ret = {'name': name, 'changes': {}} - ret = {'name': name, - 'result': True, - 'changes': {}, - 'comment': ''} - - mock = MagicMock(side_effect=[False, True]) + mock = MagicMock(side_effect=[{'result': False}, {'result': True}]) with patch.dict(boto_sqs.__salt__, {'boto_sqs.exists': mock}): - comt = ('{0} does not exist in None.'.format(name)) - ret.update({'comment': comt}) + comt = ('SQS queue {0} does not exist in None.'.format(name)) + ret = base_ret.copy() + ret.update({'result': True, 'comment': comt}) self.assertDictEqual(boto_sqs.absent(name), ret) with patch.dict(boto_sqs.__opts__, {'test': True}): - comt = ('AWS SQS queue {0} is set to be removed.'.format(name)) - ret.update({'comment': comt, 'result': None}) + comt = ('SQS queue {0} is set to be removed.'.format(name)) + ret = base_ret.copy() + ret.update({ + 'result': None, + 'comment': comt, + 'pchanges': {'old': name, 'new': None}, + }) self.assertDictEqual(boto_sqs.absent(name), ret) diff --git a/tests/unit/states/test_boto_vpc.py b/tests/unit/states/test_boto_vpc.py index e632b1ec2e..1d5e334a2f 100644 --- a/tests/unit/states/test_boto_vpc.py +++ b/tests/unit/states/test_boto_vpc.py @@ -11,14 +11,11 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch # Import Salt libs -import salt.config -import salt.loader import salt.utils.boto +from salt.ext import six from salt.utils.versions import LooseVersion import salt.states.boto_vpc as boto_vpc -# Import test suite libs - # pylint: disable=import-error,unused-import from tests.unit.modules.test_boto_vpc import BotoVpcTestCaseMixin @@ -34,18 +31,18 @@ except ImportError: HAS_BOTO = False try: - from moto import mock_ec2 + from moto import mock_ec2_deprecated HAS_MOTO = True except ImportError: HAS_MOTO = False - def mock_ec2(self): + def mock_ec2_deprecated(self): ''' - if the mock_ec2 function is not available due to import failure + if the mock_ec2_deprecated function is not available due to import failure this replaces the decorated function with stub_function. - Allows boto_vpc unit tests to use the @mock_ec2 decorator - without a "NameError: name 'mock_ec2' is not defined" error. + Allows boto_vpc unit tests to use the @mock_ec2_deprecated decorator + without a "NameError: name 'mock_ec2_deprecated' is not defined" error. ''' def stub_function(self): @@ -131,7 +128,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): TestCase for salt.states.boto_vpc state.module ''' - @mock_ec2 + @mock_ec2_deprecated def test_present_when_vpc_does_not_exist(self): ''' Tests present on a VPC that does not exist. @@ -142,14 +139,14 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_present_result['result']) self.assertEqual(vpc_present_result['changes']['new']['vpc']['state'], 'available') - @mock_ec2 + @mock_ec2_deprecated def test_present_when_vpc_exists(self): vpc = self._create_vpc(name='test') vpc_present_result = self.salt_states['boto_vpc.present']('test', cidr_block) self.assertTrue(vpc_present_result['result']) self.assertEqual(vpc_present_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_present_with_failure(self): with patch('moto.ec2.models.VPCBackend.create_vpc', side_effect=BotoServerError(400, 'Mocked error')): @@ -157,7 +154,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertFalse(vpc_present_result['result']) self.assertTrue('Mocked error' in vpc_present_result['comment']) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_vpc_does_not_exist(self): ''' Tests absent on a VPC that does not exist. @@ -167,7 +164,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_absent_result['result']) self.assertEqual(vpc_absent_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_vpc_exists(self): vpc = self._create_vpc(name='test') with patch.dict(salt.utils.boto.__salt__, self.funcs): @@ -175,7 +172,7 @@ class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin): self.assertTrue(vpc_absent_result['result']) self.assertEqual(vpc_absent_result['changes']['new']['vpc'], None) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_absent_with_failure(self): vpc = self._create_vpc(name='test') @@ -195,7 +192,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): _create = getattr(self, '_create_' + self.resource_type) _create(vpc_id=vpc_id, name=name, **self.extra_kwargs) - @mock_ec2 + @mock_ec2_deprecated def test_present_when_resource_does_not_exist(self): ''' Tests present on a resource that does not exist. @@ -210,7 +207,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): exists = self.funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists') self.assertTrue(exists) - @mock_ec2 + @mock_ec2_deprecated def test_present_when_resource_exists(self): vpc = self._create_vpc(name='test') resource = self._create_resource(vpc_id=vpc.id, name='test') @@ -220,7 +217,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertTrue(resource_present_result['result']) self.assertEqual(resource_present_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_present_with_failure(self): vpc = self._create_vpc(name='test') @@ -231,7 +228,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertFalse(resource_present_result['result']) self.assertTrue('Mocked error' in resource_present_result['comment']) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_resource_does_not_exist(self): ''' Tests absent on a resource that does not exist. @@ -241,7 +238,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): self.assertTrue(resource_absent_result['result']) self.assertEqual(resource_absent_result['changes'], {}) - @mock_ec2 + @mock_ec2_deprecated def test_absent_when_resource_exists(self): vpc = self._create_vpc(name='test') self._create_resource(vpc_id=vpc.id, name='test') @@ -253,7 +250,7 @@ class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin): exists = self.funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists') self.assertFalse(exists) - @mock_ec2 + @mock_ec2_deprecated @skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493') def test_absent_with_failure(self): vpc = self._create_vpc(name='test') @@ -291,6 +288,9 @@ class BotoVpcInternetGatewayTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTe @skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(six.PY3, 'Disabled for Python 3 due to upstream bugs: ' + 'https://github.com/spulec/moto/issues/548 and ' + 'https://github.com/gabrielfalcao/HTTPretty/issues/325') @skipIf(HAS_BOTO is False, 'The boto module must be installed.') @skipIf(HAS_MOTO is False, 'The moto module must be installed.') @skipIf(_has_required_boto() is False, 'The boto module must be greater than' @@ -301,7 +301,7 @@ class BotoVpcRouteTableTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCas backend_create = 'RouteTableBackend.create_route_table' backend_delete = 'RouteTableBackend.delete_route_table' - @mock_ec2 + @mock_ec2_deprecated def test_present_with_subnets(self): vpc = self._create_vpc(name='test') subnet1 = self._create_subnet(vpc_id=vpc.id, name='test1') @@ -326,7 +326,7 @@ class BotoVpcRouteTableTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCas new_subnets = changes['new']['subnets_associations'] self.assertEqual(new_subnets[0]['subnet_id'], subnet2.id) - @mock_ec2 + @mock_ec2_deprecated def test_present_with_routes(self): vpc = self._create_vpc(name='test') igw = self._create_internet_gateway(name='test', vpc_id=vpc.id) diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index 172deae7ac..c1d885c976 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -43,10 +43,18 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): docker_create_network = Mock(return_value='created') docker_connect_container_to_network = Mock(return_value='connected') docker_inspect_container = Mock(return_value={'Id': 'abcd'}) + # Get docker.networks to return a network with a name which is a superset of the name of + # the network which is to be created, despite this network existing we should still expect + # that the new network will be created. + # Regression test for #41982. + docker_networks = Mock(return_value=[{ + 'Name': 'network_foobar', + 'Containers': {'container': {}} + }]) __salt__ = {'docker.create_network': docker_create_network, 'docker.inspect_container': docker_inspect_container, 'docker.connect_container_to_network': docker_connect_container_to_network, - 'docker.networks': Mock(return_value=[]), + 'docker.networks': docker_networks, } with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): @@ -72,10 +80,14 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): ''' docker_remove_network = Mock(return_value='removed') docker_disconnect_container_from_network = Mock(return_value='disconnected') + docker_networks = Mock(return_value=[{ + 'Name': 'network_foo', + 'Containers': {'container': {}} + }]) __salt__ = { 'docker.remove_network': docker_remove_network, 'docker.disconnect_container_from_network': docker_disconnect_container_from_network, - 'docker.networks': Mock(return_value=[{'Containers': {'container': {}}}]), + 'docker.networks': docker_networks, } with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): @@ -88,3 +100,32 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): 'changes': {'disconnected': 'disconnected', 'removed': 'removed'}, 'result': True}) + + def test_absent_with_matching_network(self): + ''' + Test docker_network.absent when the specified network does not exist, + but another network with a name which is a superset of the specified + name does exist. In this case we expect there to be no attempt to remove + any network. + Regression test for #41982. + ''' + docker_remove_network = Mock(return_value='removed') + docker_disconnect_container_from_network = Mock(return_value='disconnected') + docker_networks = Mock(return_value=[{ + 'Name': 'network_foobar', + 'Containers': {'container': {}} + }]) + __salt__ = { + 'docker.remove_network': docker_remove_network, + 'docker.disconnect_container_from_network': docker_disconnect_container_from_network, + 'docker.networks': docker_networks, + } + with patch.dict(docker_state.__dict__, + {'__salt__': __salt__}): + ret = docker_state.absent('network_foo') + docker_disconnect_container_from_network.assert_not_called() + docker_remove_network.assert_not_called() + self.assertEqual(ret, {'name': 'network_foo', + 'comment': 'Network \'network_foo\' already absent', + 'changes': {}, + 'result': True}) diff --git a/tests/unit/states/test_elasticsearch.py b/tests/unit/states/test_elasticsearch.py index 112ee869bb..0c2a5da022 100644 --- a/tests/unit/states/test_elasticsearch.py +++ b/tests/unit/states/test_elasticsearch.py @@ -17,8 +17,8 @@ from tests.support.mock import ( from salt.exceptions import CommandExecutionError # Import Salt Libs +import salt.utils.dictdiffer as dictdiffer from salt.states import elasticsearch -from salt.utils import dictdiffer @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/states/test_environ.py b/tests/unit/states/test_environ.py index 45a8e5fb1a..290015a503 100644 --- a/tests/unit/states/test_environ.py +++ b/tests/unit/states/test_environ.py @@ -49,7 +49,7 @@ class TestEnvironState(TestCase, LoaderModuleMockMixin): def test_setenv_permanent(self): with patch.dict(envmodule.__salt__, {'reg.set_value': MagicMock(), 'reg.delete_value': MagicMock()}), \ - patch('salt.utils.is_windows', MagicMock(return_value=True)): + patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): ret = envstate.setenv('test', 'value', permanent=True) self.assertEqual(ret['changes'], {'test': 'value'}) envmodule.__salt__['reg.set_value'].assert_called_with("HKCU", "Environment", 'test', 'value') diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 492eec253c..4febdd57be 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -31,9 +31,9 @@ from tests.support.mock import ( import yaml # Import salt libs -import salt import salt.utils import salt.utils.files +import salt.utils.platform import salt.states.file as filestate import salt.serializers.yaml as yamlserializer import salt.serializers.json as jsonserializer @@ -100,7 +100,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): def test_contents_pillar_doesnt_add_more_newlines(self): # make sure the newline - pillar_value = 'i am the pillar value\n' + pillar_value = 'i am the pillar value{0}'.format(os.linesep) self.run_contents_pillar(pillar_value, expected=pillar_value) @@ -140,7 +140,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): test_dir = '/tmp' user = 'salt' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): group = 'salt' else: group = 'saltstack' @@ -167,7 +167,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_empty, - 'file.group_to_gid': mock_empty}): + 'file.group_to_gid': mock_empty, + 'user.info': mock_empty, + 'user.current': mock_user}): comt = ('User {0} does not exist. Group {1} does not exist.'.format(user, group)) ret.update({'comment': comt, 'name': name}) self.assertDictEqual(filestate.symlink(name, target, user=user, @@ -176,7 +178,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, - 'file.is_link': mock_f}): + 'file.is_link': mock_f, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': True}): with patch.object(os.path, 'exists', mock_f): comt = ('Symlink {0} to {1}' @@ -191,7 +195,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, - 'file.is_link': mock_f}): + 'file.is_link': mock_f, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_f): with patch.object(os.path, 'exists', mock_f): @@ -207,7 +213,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_t, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(salt.states.file, '_check_symlink_ownership', mock_t): @@ -224,7 +232,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_f, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(os.path, 'exists', mock_f): @@ -243,7 +253,9 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.is_link': mock_f, - 'file.readlink': mock_target}): + 'file.readlink': mock_target, + 'user.info': mock_empty, + 'user.current': mock_user}): with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_t): with patch.object(os.path, 'exists', mock_f): @@ -538,7 +550,10 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'G12', 'G12', 'G12', 'G12', 'G12']) mock_if = MagicMock(side_effect=[True, False, False, False, False, False, False, False]) - mock_ret = MagicMock(return_value=(ret, None)) + if salt.utils.is_windows(): + mock_ret = MagicMock(return_value=ret) + else: + mock_ret = MagicMock(return_value=(ret, None)) mock_dict = MagicMock(return_value={}) mock_cp = MagicMock(side_effect=[Exception, True]) mock_ex = MagicMock(side_effect=[Exception, {'changes': {name: name}}, @@ -573,8 +588,14 @@ class TestFileState(TestCase, LoaderModuleMockMixin): self.assertDictEqual(filestate.managed(name, create=False), ret) - comt = ('User salt is not available Group saltstack' - ' is not available') + # Group argument is ignored on Windows systems. Group is set to + # user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.managed(name, user=user, group=group), ret) @@ -711,26 +732,37 @@ class TestFileState(TestCase, LoaderModuleMockMixin): mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) - mock_perms = MagicMock(return_value=(ret, '')) + if salt.utils.is_windows(): + mock_perms = MagicMock(return_value=ret) + else: + mock_perms = MagicMock(return_value=(ret, '')) mock_uid = MagicMock(side_effect=['', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12', 'U12']) mock_gid = MagicMock(side_effect=['', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12', 'G12']) + mock_check = MagicMock(return_value=( + None, + 'The directory "{0}" will be changed'.format(name), + {'directory': 'new'})) + mock_error = CommandExecutionError with patch.dict(filestate.__salt__, {'config.manage_mode': mock_t, 'file.user_to_uid': mock_uid, 'file.group_to_gid': mock_gid, 'file.stats': mock_f, 'file.check_perms': mock_perms, - 'file.mkdir': mock_t}): - if salt.utils.is_windows(): + 'file.mkdir': mock_t}), \ + patch('salt.utils.win_dacl.get_sid', mock_error), \ + patch('os.path.isdir', mock_t), \ + patch('salt.states.file._check_directory_win', mock_check): + if salt.utils.platform.is_windows(): comt = ('User salt is not available Group salt' ' is not available') else: comt = ('User salt is not available Group saltstack' ' is not available') ret.update({'comment': comt, 'name': name}) - self.assertDictEqual(filestate.directory(name, user=user, - group=group), ret) + self.assertDictEqual( + filestate.directory(name, user=user, group=group), ret) with patch.object(os.path, 'isabs', mock_f): comt = ('Specified file {0} is not an absolute path' @@ -771,9 +803,19 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.object(os.path, 'isfile', mock_f): with patch.dict(filestate.__opts__, {'test': True}): - comt = ('The following files will be changed:\n{0}:' - ' directory - new\n'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {'/etc/grub.conf': {'directory': 'new'}}}) + if salt.utils.is_windows(): + comt = 'The directory "{0}" will be changed' \ + ''.format(name) + p_chg = {'directory': 'new'} + else: + comt = ('The following files will be changed:\n{0}:' + ' directory - new\n'.format(name)) + p_chg = {'/etc/grub.conf': {'directory': 'new'}} + ret.update({ + 'comment': comt, + 'result': None, + 'pchanges': p_chg + }) self.assertDictEqual(filestate.directory(name, user=user, group=group), @@ -845,8 +887,14 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.source_list': mock_lst, 'cp.list_master_dirs': mock_emt, 'cp.list_master': mock_l}): - comt = ('User salt is not available Group saltstack' - ' is not available') + + # Group argument is ignored on Windows systems. Group is set to user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt}) self.assertDictEqual(filestate.recurse(name, source, user=user, group=group), ret) @@ -954,7 +1002,8 @@ class TestFileState(TestCase, LoaderModuleMockMixin): ret.update({'comment': comt, 'name': name}) self.assertDictEqual(filestate.blockreplace(name), ret) - with patch.object(os.path, 'isabs', mock_t): + with patch.object(os.path, 'isabs', mock_t), \ + patch.object(os.path, 'exists', mock_t): with patch.dict(filestate.__salt__, {'file.blockreplace': mock_t}): with patch.dict(filestate.__opts__, {'test': True}): comt = ('Changes would be made') @@ -970,7 +1019,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): Test to comment out specified lines in a file. ''' with patch.object(os.path, 'exists', MagicMock(return_value=True)): - name = '/etc/aliases' if salt.utils.is_darwin() else '/etc/fstab' + name = '/etc/aliases' if salt.utils.platform.is_darwin() else '/etc/fstab' regex = 'bind 127.0.0.1' ret = {'name': name, @@ -1025,7 +1074,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): Test to uncomment specified commented lines in a file ''' with patch.object(os.path, 'exists', MagicMock(return_value=True)): - name = '/etc/aliases' if salt.utils.is_darwin() else '/etc/fstab' + name = '/etc/aliases' if salt.utils.platform.is_darwin() else '/etc/fstab' regex = 'bind 127.0.0.1' ret = {'name': name, @@ -1312,8 +1361,15 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'file.get_group': mock_grp, 'file.get_mode': mock_grp, 'file.check_perms': mock_t}): - comt = ('User salt is not available Group ' - 'saltstack is not available') + + # Group argument is ignored on Windows systems. Group is set + # to user + if salt.utils.is_windows(): + comt = ('User salt is not available Group salt' + ' is not available') + else: + comt = ('User salt is not available Group saltstack' + ' is not available') ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.copy(name, source, user=user, group=group), ret) diff --git a/tests/unit/states/test_ldap.py b/tests/unit/states/test_ldap.py index 51d794c91c..cb7cf372c5 100644 --- a/tests/unit/states/test_ldap.py +++ b/tests/unit/states/test_ldap.py @@ -12,8 +12,9 @@ with something sensible. from __future__ import absolute_import import copy -import salt.ext.six as six +from salt.ext import six import salt.states.ldap +from salt.utils.oset import OrderedSet from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase @@ -34,20 +35,20 @@ def _init_db(newdb=None): def _complex_db(): return { 'dnfoo': { - 'attrfoo1': set(( + 'attrfoo1': OrderedSet(( 'valfoo1.1', 'valfoo1.2', )), - 'attrfoo2': set(( + 'attrfoo2': OrderedSet(( 'valfoo2.1', )), }, 'dnbar': { - 'attrbar1': set(( + 'attrbar1': OrderedSet(( 'valbar1.1', 'valbar1.2', )), - 'attrbar2': set(( + 'attrbar2': OrderedSet(( 'valbar2.1', )), }, @@ -72,7 +73,7 @@ def _dummy_connect(connect_spec): def _dummy_search(connect_spec, base, scope): if base not in db: return {} - return {base: dict(((attr, sorted(db[base][attr])) + return {base: dict(((attr, list(db[base][attr])) for attr in db[base] if len(db[base][attr])))} @@ -83,7 +84,7 @@ def _dummy_add(connect_spec, dn, attributes): db[dn] = {} for attr, vals in six.iteritems(attributes): assert len(vals) - db[dn][attr] = set(vals) + db[dn][attr] = OrderedSet(vals) return True @@ -100,7 +101,7 @@ def _dummy_change(connect_spec, dn, before, after): assert dn in db e = db[dn] assert e == before - all_attrs = set() + all_attrs = OrderedSet() all_attrs.update(before) all_attrs.update(after) directives = [] @@ -131,7 +132,7 @@ def _dummy_modify(connect_spec, dn, directives): for op, attr, vals in directives: if op == 'add': assert len(vals) - existing_vals = e.setdefault(attr, set()) + existing_vals = e.setdefault(attr, OrderedSet()) for val in vals: assert val not in existing_vals existing_vals.add(val) @@ -149,7 +150,7 @@ def _dummy_modify(connect_spec, dn, directives): del e[attr] elif op == 'replace': e.pop(attr, None) - e[attr] = set(vals) + e[attr] = OrderedSet(vals) else: raise ValueError() return True @@ -158,7 +159,7 @@ def _dummy_modify(connect_spec, dn, directives): def _dump_db(d=None): if d is None: d = db - return dict(((dn, dict(((attr, sorted(d[dn][attr])) + return dict(((dn, dict(((attr, list(d[dn][attr])) for attr in d[dn]))) for dn in d)) @@ -186,8 +187,8 @@ class LDAPTestCase(TestCase, LoaderModuleMockMixin): for dn, attrs in six.iteritems(replace): for attr, vals in six.iteritems(attrs): if len(vals): - new.setdefault(dn, {})[attr] = sorted(set(vals)) - expected_db.setdefault(dn, {})[attr] = set(vals) + new.setdefault(dn, {})[attr] = list(OrderedSet(vals)) + expected_db.setdefault(dn, {})[attr] = OrderedSet(vals) elif dn in expected_db: new[dn].pop(attr, None) expected_db[dn].pop(attr, None) @@ -195,10 +196,10 @@ class LDAPTestCase(TestCase, LoaderModuleMockMixin): new.pop(dn, None) expected_db.pop(dn, None) if delete_others: - dn_to_delete = set() + dn_to_delete = OrderedSet() for dn, attrs in six.iteritems(expected_db): if dn in replace: - to_delete = set() + to_delete = OrderedSet() for attr, vals in six.iteritems(attrs): if attr not in replace[dn]: to_delete.add(attr) @@ -305,7 +306,7 @@ class LDAPTestCase(TestCase, LoaderModuleMockMixin): def test_managed_no_net_change(self): self._test_helper_nochange( _complex_db(), - {'dnfoo': {'attrfoo1': ['valfoo1.2', 'valfoo1.1']}}) + {'dnfoo': {'attrfoo1': ['valfoo1.1', 'valfoo1.2']}}) def test_managed_repeated_values(self): self._test_helper_success( diff --git a/tests/unit/states/test_libvirt.py b/tests/unit/states/test_libvirt.py index 9301ac5332..7be24eba27 100644 --- a/tests/unit/states/test_libvirt.py +++ b/tests/unit/states/test_libvirt.py @@ -20,7 +20,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.states.virt as virt -import salt.utils +import salt.utils.files @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -67,7 +67,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret) with patch.dict(virt.__opts__, {'test': False}): - with patch.object(salt.utils, 'fopen', MagicMock(mock_open())): + with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Updated libvirt certs and keys') ret.update({'comment': comt, 'result': True, 'changes': {'servercert': 'new'}}) @@ -102,7 +102,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): expiration_days=700), ret) with patch.dict(virt.__opts__, {'test': False}): - with patch.object(salt.utils, 'fopen', MagicMock(mock_open())): + with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Updated libvirt certs and keys') ret.update({'comment': comt, 'result': True, 'changes': {'servercert': 'new'}}) @@ -139,7 +139,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): st='California'), ret) with patch.dict(virt.__opts__, {'test': False}): - with patch.object(salt.utils, 'fopen', MagicMock(mock_open())): + with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Updated libvirt certs and keys') ret.update({'comment': comt, 'result': True, 'changes': {'servercert': 'new'}}) @@ -184,7 +184,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): expiration_days=700), ret) with patch.dict(virt.__opts__, {'test': False}): - with patch.object(salt.utils, 'fopen', MagicMock(mock_open())): + with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Updated libvirt certs and keys') ret.update({'comment': comt, 'result': True, 'changes': {'servercert': 'new'}}) diff --git a/tests/unit/states/test_linux_acl.py b/tests/unit/states/test_linux_acl.py index 3ade701b82..f5488d2ae4 100644 --- a/tests/unit/states/test_linux_acl.py +++ b/tests/unit/states/test_linux_acl.py @@ -15,8 +15,10 @@ from tests.support.mock import ( MagicMock, patch) + # Import Salt Libs import salt.states.linux_acl as linux_acl +from salt.exceptions import CommandExecutionError @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -34,34 +36,120 @@ class LinuxAclTestCase(TestCase, LoaderModuleMockMixin): ''' Test to ensure a Linux ACL is present ''' + self.maxDiff = None name = '/root' acl_type = 'users' acl_name = 'damian' perms = 'rwx' - ret = {'name': name, - 'result': None, - 'comment': '', - 'changes': {}} - mock = MagicMock(side_effect=[{name: {acl_type: [{acl_name: - {'octal': 'A'}}]}}, + {'octal': 'A'}}]}}, + {name: {acl_type: [{acl_name: + {'octal': 'A'}}]}}, + {name: {acl_type: [{acl_name: + {'octal': 'A'}}]}}, + {name: {acl_type: [{}]}}, + {name: {acl_type: [{}]}}, {name: {acl_type: [{}]}}, {name: {acl_type: ''}}]) + mock_modfacl = MagicMock(return_value=True) + with patch.dict(linux_acl.__salt__, {'acl.getfacl': mock}): + # Update - test=True with patch.dict(linux_acl.__opts__, {'test': True}): - comt = ('Permissions have been updated') - ret.update({'comment': comt}) + comt = ('Updated permissions will be applied for {0}: A -> {1}' + ''.format(acl_name, perms)) + ret = {'name': name, + 'comment': comt, + 'changes': {}, + 'pchanges': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'A'}}, + 'result': None} + self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), ret) - - comt = ('Permissions will be applied') - ret.update({'comment': comt}) - self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, - perms), ret) - + # Update - test=False + with patch.dict(linux_acl.__salt__, {'acl.modfacl': mock_modfacl}): + with patch.dict(linux_acl.__opts__, {'test': False}): + comt = ('Updated permissions for {0}'.format(acl_name)) + ret = {'name': name, + 'comment': comt, + 'changes': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'A'}}, + 'pchanges': {}, + 'result': True} + self.assertDictEqual(linux_acl.present(name, acl_type, + acl_name, perms), + ret) + # Update - modfacl error + with patch.dict(linux_acl.__salt__, {'acl.modfacl': MagicMock( + side_effect=CommandExecutionError('Custom err'))}): + with patch.dict(linux_acl.__opts__, {'test': False}): + comt = ('Error updating permissions for {0}: Custom err' + ''.format(acl_name)) + ret = {'name': name, + 'comment': comt, + 'changes': {}, + 'pchanges': {}, + 'result': False} + self.assertDictEqual(linux_acl.present(name, acl_type, + acl_name, perms), + ret) + # New - test=True + with patch.dict(linux_acl.__salt__, {'acl.modfacl': mock_modfacl}): + with patch.dict(linux_acl.__opts__, {'test': True}): + comt = ('New permissions will be applied ' + 'for {0}: {1}'.format(acl_name, perms)) + ret = {'name': name, + 'comment': comt, + 'changes': {}, + 'pchanges': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}}, + 'result': None} + self.assertDictEqual(linux_acl.present(name, acl_type, + acl_name, perms), + ret) + # New - test=False + with patch.dict(linux_acl.__salt__, {'acl.modfacl': mock_modfacl}): + with patch.dict(linux_acl.__opts__, {'test': False}): + comt = ('Applied new permissions for {0}'.format(acl_name)) + ret = {'name': name, + 'comment': comt, + 'changes': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}}, + 'pchanges': {}, + 'result': True} + self.assertDictEqual(linux_acl.present(name, acl_type, + acl_name, perms), + ret) + # New - modfacl error + with patch.dict(linux_acl.__salt__, {'acl.modfacl': MagicMock( + side_effect=CommandExecutionError('Custom err'))}): + with patch.dict(linux_acl.__opts__, {'test': False}): + comt = ('Error updating permissions for {0}: Custom err' + ''.format(acl_name)) + ret = {'name': name, + 'comment': comt, + 'changes': {}, + 'pchanges': {}, + 'result': False} + self.assertDictEqual(linux_acl.present(name, acl_type, + acl_name, perms), + ret) + # No acl type comt = ('ACL Type does not exist') - ret.update({'comment': comt, 'result': False}) + ret = {'name': name, 'comment': comt, 'result': False, + 'changes': {}, 'pchanges': {}} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), ret) diff --git a/tests/unit/states/test_lxc.py b/tests/unit/states/test_lxc.py index 868636d196..a940839cd5 100644 --- a/tests/unit/states/test_lxc.py +++ b/tests/unit/states/test_lxc.py @@ -16,7 +16,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.states.lxc as lxc -import salt.utils +import salt.utils.versions @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -246,7 +246,7 @@ class LxcTestCase(TestCase, LoaderModuleMockMixin): 'comment': comment, 'changes': {}} - with patch.object(salt.utils, 'warn_until', MagicMock()): + with patch.object(salt.utils.versions, 'warn_until', MagicMock()): with patch.dict(lxc.__opts__, {'test': True}): self.assertDictEqual(lxc.edited_conf(name), ret) diff --git a/tests/unit/states/test_modjk.py b/tests/unit/states/test_modjk.py index 1dcfe00691..c91180fe18 100644 --- a/tests/unit/states/test_modjk.py +++ b/tests/unit/states/test_modjk.py @@ -13,7 +13,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.states.modjk as modjk -import salt.ext.six as six +from salt.ext import six if six.PY2: diff --git a/tests/unit/states/test_module.py b/tests/unit/states/test_module.py index 54f3768d37..4588e20ca8 100644 --- a/tests/unit/states/test_module.py +++ b/tests/unit/states/test_module.py @@ -122,7 +122,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin): with patch.dict(module.__salt__, {CMD: _mocked_func_named}): with patch.dict(module.__opts__, {'use_superseded': ['module.run']}): ret = module.run(**{CMD: None}) - assert ret['comment'] == "'{0}' failed: Missing arguments: name".format(CMD) + assert ret['comment'] == "'{0}' failed: Function expects 1 parameters, got only 0".format(CMD) def test_run_correct_arg(self): ''' @@ -131,7 +131,7 @@ class ModuleStateTest(TestCase, LoaderModuleMockMixin): ''' with patch.dict(module.__salt__, {CMD: _mocked_func_named}): with patch.dict(module.__opts__, {'use_superseded': ['module.run']}): - ret = module.run(**{CMD: [{'name': 'Fred'}]}) + ret = module.run(**{CMD: ['Fred']}) assert ret['comment'] == '{0}: Success'.format(CMD) assert ret['result'] diff --git a/tests/unit/states/test_postgres.py b/tests/unit/states/test_postgres.py index 7950c2fdf6..a1a24278cc 100644 --- a/tests/unit/states/test_postgres.py +++ b/tests/unit/states/test_postgres.py @@ -21,7 +21,7 @@ import salt.states.postgres_schema as postgres_schema class PostgresUserTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pgsql')) patcher.start() self.addCleanup(patcher.stop) self.salt_stub = { @@ -166,7 +166,7 @@ class PostgresUserTestCase(TestCase, LoaderModuleMockMixin): class PostgresGroupTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pgsql')) patcher.start() self.addCleanup(patcher.stop) self.salt_stub = { @@ -311,7 +311,7 @@ class PostgresGroupTestCase(TestCase, LoaderModuleMockMixin): class PostgresExtensionTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pgsql')) patcher.start() self.addCleanup(patcher.stop) self.salt_stub = { @@ -500,7 +500,7 @@ class PostgresExtensionTestCase(TestCase, LoaderModuleMockMixin): class PostgresSchemaTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): - patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql')) + patcher = patch('salt.utils.path.which', Mock(return_value='/usr/bin/pgsql')) patcher.start() self.addCleanup(patcher.stop) self.salt_stub = { diff --git a/tests/unit/states/test_proxy.py b/tests/unit/states/test_proxy.py index 30990f614e..ceba58e876 100644 --- a/tests/unit/states/test_proxy.py +++ b/tests/unit/states/test_proxy.py @@ -15,7 +15,7 @@ from tests.support.mock import ( ) # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six # Import Salt Libs import salt.states.proxy as proxy diff --git a/tests/unit/states/test_rbenv.py b/tests/unit/states/test_rbenv.py index d368a54e52..7ab9c1f1fd 100644 --- a/tests/unit/states/test_rbenv.py +++ b/tests/unit/states/test_rbenv.py @@ -33,36 +33,71 @@ class RbenvTestCase(TestCase, LoaderModuleMockMixin): ''' Test to verify that the specified ruby is installed with rbenv. ''' - name = 'rbenv-deps' + # rbenv.is_installed is used wherever test is False. + mock_is = MagicMock(side_effect=[False, True, True, True, True]) - ret = {'name': name, - 'changes': {}, - 'result': True, - 'comment': ''} + # rbenv.install is only called when an action is attempted + # (ie. Successfully... or Failed...) + mock_i = MagicMock(side_effect=[False, False, False]) - mock_t = MagicMock(side_effect=[False, True, True]) - mock_f = MagicMock(return_value=False) - mock_def = MagicMock(return_value='2.7') - mock_ver = MagicMock(return_value=['2.7']) + # rbenv.install_ruby is only called when rbenv is successfully + # installed and an attempt to install a version of Ruby is + # made. + mock_ir = MagicMock(side_effect=[True, False]) + mock_def = MagicMock(return_value='2.3.4') + mock_ver = MagicMock(return_value=['2.3.4', '2.4.1']) with patch.dict(rbenv.__salt__, - {'rbenv.is_installed': mock_f, - 'rbenv.install': mock_t, + {'rbenv.is_installed': mock_is, + 'rbenv.install': mock_i, 'rbenv.default': mock_def, 'rbenv.versions': mock_ver, - 'rbenv.install_ruby': mock_t}): + 'rbenv.install_ruby': mock_ir}): with patch.dict(rbenv.__opts__, {'test': True}): - comt = ('Ruby rbenv-deps is set to be installed') - ret.update({'comment': comt, 'result': None}) + name = '1.9.3-p551' + comt = 'Ruby {0} is set to be installed'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': None} + self.assertDictEqual(rbenv.installed(name), ret) + + name = '2.4.1' + comt = 'Ruby {0} is already installed'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': False, 'result': True} + self.assertDictEqual(rbenv.installed(name), ret) + + name = '2.3.4' + comt = 'Ruby {0} is already installed'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': True, 'result': True} self.assertDictEqual(rbenv.installed(name), ret) with patch.dict(rbenv.__opts__, {'test': False}): - comt = ('Rbenv failed to install') - ret.update({'comment': comt, 'result': False}) + name = '2.4.1' + comt = 'Rbenv failed to install' + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': False} self.assertDictEqual(rbenv.installed(name), ret) - comt = ('Successfully installed ruby') - ret.update({'comment': comt, 'result': True, 'default': False, - 'changes': {name: 'Installed'}}) + comt = 'Requested ruby exists' + ret = {'name': name, 'comment': comt, 'default': False, + 'changes': {}, 'result': True} + self.assertDictEqual(rbenv.installed(name), ret) + + name = '2.3.4' + comt = 'Requested ruby exists' + ret = {'name': name, 'comment': comt, 'default': True, + 'changes': {}, 'result': True} + self.assertDictEqual(rbenv.installed(name), ret) + + name = '1.9.3-p551' + comt = 'Successfully installed ruby' + ret = {'name': name, 'comment': comt, 'default': False, + 'changes': {name: 'Installed'}, 'result': True} + self.assertDictEqual(rbenv.installed(name), ret) + + comt = 'Failed to install ruby' + ret = {'name': name, 'comment': comt, + 'changes': {}, 'result': False} self.assertDictEqual(rbenv.installed(name), ret) # 'absent' function tests: 1 @@ -71,32 +106,76 @@ class RbenvTestCase(TestCase, LoaderModuleMockMixin): ''' Test to verify that the specified ruby is not installed with rbenv. ''' - name = 'myqueue' - - ret = {'name': name, - 'changes': {}, - 'result': True, - 'comment': ''} - - mock = MagicMock(side_effect=[False, True]) - mock_def = MagicMock(return_value='2.7') - mock_ver = MagicMock(return_value=['2.7']) + # rbenv.is_installed is used for all tests here. + mock_is = MagicMock(side_effect=[False, True, True, True, False, + True, True, True, True, True]) + # rbenv.uninstall_ruby is only called when an action is + # attempted (ie. Successfully... or Failed...) + mock_uninstalled = MagicMock(side_effect=[True, False, False, True]) + mock_def = MagicMock(return_value='2.3.4') + mock_ver = MagicMock(return_value=['2.3.4', '2.4.1']) with patch.dict(rbenv.__salt__, - {'rbenv.is_installed': mock, + {'rbenv.is_installed': mock_is, 'rbenv.default': mock_def, - 'rbenv.versions': mock_ver}): + 'rbenv.versions': mock_ver, + 'rbenv.uninstall_ruby': mock_uninstalled}): + with patch.dict(rbenv.__opts__, {'test': True}): - comt = ('Ruby myqueue is set to be uninstalled') - ret.update({'comment': comt, 'result': None}) + name = '1.9.3-p551' + comt = 'Rbenv not installed, {0} not either'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': True} self.assertDictEqual(rbenv.absent(name), ret) + comt = 'Ruby {0} is already uninstalled'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': True} + self.assertDictEqual(rbenv.absent(name), ret) + + name = '2.3.4' + comt = 'Ruby {0} is set to be uninstalled'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': True, 'result': None} + self.assertDictEqual(rbenv.absent('2.3.4'), ret) + + name = '2.4.1' + comt = 'Ruby {0} is set to be uninstalled'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': False, 'result': None} + self.assertDictEqual(rbenv.absent('2.4.1'), ret) + with patch.dict(rbenv.__opts__, {'test': False}): - comt = ('Rbenv not installed, myqueue not either') - ret.update({'comment': comt, 'result': True}) + name = '1.9.3-p551' + comt = 'Rbenv not installed, {0} not either'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': True} self.assertDictEqual(rbenv.absent(name), ret) - comt = ('Ruby myqueue is already absent') - ret.update({'comment': comt, 'result': True}) + comt = 'Ruby {0} is already absent'.format(name) + ret = {'name': name, 'changes': {}, 'comment': comt, + 'result': True} + self.assertDictEqual(rbenv.absent(name), ret) + + name = '2.3.4' + comt = 'Successfully removed ruby' + ret = {'name': name, 'changes': {name: 'Uninstalled'}, + 'comment': comt, 'default': True, 'result': True} + self.assertDictEqual(rbenv.absent(name), ret) + + comt = 'Failed to uninstall ruby' + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': True, 'result': False} + self.assertDictEqual(rbenv.absent(name), ret) + + name = '2.4.1' + comt = 'Failed to uninstall ruby' + ret = {'name': name, 'changes': {}, 'comment': comt, + 'default': False, 'result': False} + self.assertDictEqual(rbenv.absent(name), ret) + + comt = 'Successfully removed ruby' + ret = {'name': name, 'changes': {name: 'Uninstalled'}, + 'comment': comt, 'default': False, 'result': True} self.assertDictEqual(rbenv.absent(name), ret) # 'install_rbenv' function tests: 1 @@ -112,16 +191,30 @@ class RbenvTestCase(TestCase, LoaderModuleMockMixin): 'result': True, 'comment': ''} - with patch.dict(rbenv.__opts__, {'test': True}): - comt = ('Rbenv is set to be installed') - ret.update({'comment': comt, 'result': None}) - self.assertDictEqual(rbenv.install_rbenv(name), ret) + mock_is = MagicMock(side_effect=[False, True, True, False, False]) + mock_i = MagicMock(side_effect=[False, True]) + with patch.dict(rbenv.__salt__, + {'rbenv.is_installed': mock_is, + 'rbenv.install': mock_i}): - with patch.dict(rbenv.__opts__, {'test': False}): - mock = MagicMock(side_effect=[False, True]) - with patch.dict(rbenv.__salt__, - {'rbenv.is_installed': mock, - 'rbenv.install': mock}): - comt = ('Rbenv installed') + with patch.dict(rbenv.__opts__, {'test': True}): + comt = 'Rbenv is set to be installed' + ret.update({'comment': comt, 'result': None}) + self.assertDictEqual(rbenv.install_rbenv(name), ret) + + comt = 'Rbenv is already installed' + ret.update({'comment': comt, 'result': True}) + self.assertDictEqual(rbenv.install_rbenv(name), ret) + + with patch.dict(rbenv.__opts__, {'test': False}): + comt = 'Rbenv is already installed' + ret.update({'comment': comt, 'result': True}) + self.assertDictEqual(rbenv.install_rbenv(name), ret) + + comt = 'Rbenv failed to install' + ret.update({'comment': comt, 'result': False}) + self.assertDictEqual(rbenv.install_rbenv(name), ret) + + comt = 'Rbenv installed' ret.update({'comment': comt, 'result': True}) self.assertDictEqual(rbenv.install_rbenv(name), ret) diff --git a/tests/unit/states/test_rvm.py b/tests/unit/states/test_rvm.py index a57f8ce565..0ccd62062c 100644 --- a/tests/unit/states/test_rvm.py +++ b/tests/unit/states/test_rvm.py @@ -12,7 +12,7 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch import salt.states.rvm as rvm # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/states/test_selinux.py b/tests/unit/states/test_selinux.py index 36474b95c4..f6ce3bfb8f 100644 --- a/tests/unit/states/test_selinux.py +++ b/tests/unit/states/test_selinux.py @@ -118,9 +118,11 @@ class SelinuxTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(selinux.__opts__, {'test': False}): comt = ('Boolean samba_create_home_dirs has been set to on') ret.update({'comment': comt, 'result': True}) + ret.update({'changes': {'State': {'old': 'off', 'new': 'on'}}}) self.assertDictEqual(selinux.boolean(name, value), ret) comt = ('Failed to set the boolean ' 'samba_create_home_dirs to on') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': False}) + ret.update({'changes': {}}) self.assertDictEqual(selinux.boolean(name, value), ret) diff --git a/tests/unit/states/test_win_snmp.py b/tests/unit/states/test_win_snmp.py index 3618d01ab0..69ffad1c6a 100644 --- a/tests/unit/states/test_win_snmp.py +++ b/tests/unit/states/test_win_snmp.py @@ -11,7 +11,7 @@ from __future__ import absolute_import # Import Salt Libs import salt.states.win_snmp as win_snmp -import salt.ext.six as six +from salt.ext import six # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin diff --git a/tests/unit/states/test_zcbuildout.py b/tests/unit/states/test_zcbuildout.py index d4e6ec3026..a1e0a2bdef 100644 --- a/tests/unit/states/test_zcbuildout.py +++ b/tests/unit/states/test_zcbuildout.py @@ -10,7 +10,7 @@ from tests.support.unit import skipIf from tests.support.helpers import requires_network # Import Salt libs -import salt.utils +import salt.utils.path from tests.unit.modules.test_zcbuildout import Base, KNOWN_VIRTUALENV_BINARY_NAMES import salt.modules.zcbuildout as modbuildout import salt.states.zcbuildout as buildout @@ -19,7 +19,7 @@ import salt.modules.cmdmod as cmd ROOT = os.path.join(FILES, 'file/base/buildout') -@skipIf(salt.utils.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, +@skipIf(salt.utils.path.which_bin(KNOWN_VIRTUALENV_BINARY_NAMES) is None, 'The \'virtualenv\' packaged needs to be installed') class BuildoutTestCase(Base): diff --git a/tests/unit/templates/test_jinja.py b/tests/unit/templates/test_jinja.py index acbd55272c..146e230747 100644 --- a/tests/unit/templates/test_jinja.py +++ b/tests/unit/templates/test_jinja.py @@ -19,14 +19,13 @@ from tests.support.paths import TMP_CONF_DIR # Import salt libs import salt.config -import salt.ext.six as six +from salt.ext import six import salt.loader -import salt.utils import salt.utils.files +from salt.utils import get_context from salt.exceptions import SaltRenderError from salt.ext.six.moves import builtins -from salt.utils import get_context -from salt.utils.decorators import JinjaFilter +from salt.utils.decorators.jinja import JinjaFilter from salt.utils.jinja import ( SaltCacheLoader, SerializerExtension, @@ -537,7 +536,7 @@ class TestCustomExtensions(TestCase): env.filters.update(JinjaFilter.salt_jinja_filters) if six.PY3: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '") - self.assertEqual(rendered, list(unique)) + self.assertEqual(sorted(rendered), sorted(list(unique))) else: rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset) self.assertEqual(rendered, u"{0}".format(unique)) diff --git a/tests/unit/test_crypt.py b/tests/unit/test_crypt.py index 7d53f22522..9e3708402c 100644 --- a/tests/unit/test_crypt.py +++ b/tests/unit/test_crypt.py @@ -89,13 +89,13 @@ SIG = ( class CryptTestCase(TestCase): def test_gen_keys(self): - with patch.multiple(os, umask=MagicMock(), chmod=MagicMock(), chown=MagicMock, + with patch.multiple(os, umask=MagicMock(), chmod=MagicMock(), access=MagicMock(return_value=True)): with patch('salt.utils.files.fopen', mock_open()): - open_priv_wb = call('/keydir/keyname.pem', 'wb+') - open_pub_wb = call('/keydir/keyname.pub', 'wb+') + open_priv_wb = call('/keydir{0}keyname.pem'.format(os.sep), 'wb+') + open_pub_wb = call('/keydir{0}keyname.pub'.format(os.sep), 'wb+') with patch('os.path.isfile', return_value=True): - self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048), '/keydir/keyname.pem') + self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048), '/keydir{0}keyname.pem'.format(os.sep)) self.assertNotIn(open_priv_wb, salt.utils.files.fopen.mock_calls) self.assertNotIn(open_pub_wb, salt.utils.files.fopen.mock_calls) with patch('os.path.isfile', return_value=False): @@ -103,11 +103,33 @@ class CryptTestCase(TestCase): crypt.gen_keys('/keydir', 'keyname', 2048) salt.utils.files.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True) + @patch('os.umask', MagicMock()) + @patch('os.chmod', MagicMock()) + @patch('os.chown', MagicMock()) + @patch('os.access', MagicMock(return_value=True)) + def test_gen_keys_with_passphrase(self): + with patch('salt.utils.files.fopen', mock_open()): + open_priv_wb = call('/keydir/keyname.pem', 'wb+') + open_pub_wb = call('/keydir/keyname.pub', 'wb+') + with patch('os.path.isfile', return_value=True): + self.assertEqual(crypt.gen_keys('/keydir', 'keyname', 2048, passphrase='password'), '/keydir/keyname.pem') + self.assertNotIn(open_priv_wb, salt.utils.files.fopen.mock_calls) + self.assertNotIn(open_pub_wb, salt.utils.files.fopen.mock_calls) + with patch('os.path.isfile', return_value=False): + with patch('salt.utils.files.fopen', mock_open()): + crypt.gen_keys('/keydir', 'keyname', 2048) + salt.utils.files.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True) + def test_sign_message(self): key = RSA.importKey(PRIVKEY_DATA) with patch('salt.crypt._get_rsa_key', return_value=key): self.assertEqual(SIG, salt.crypt.sign_message('/keydir/keyname.pem', MSG)) + def test_sign_message_with_passphrase(self): + key = RSA.importKey(PRIVKEY_DATA) + with patch('salt.crypt._get_rsa_key', return_value=key): + self.assertEqual(SIG, crypt.sign_message('/keydir/keyname.pem', MSG, passphrase='password')) + def test_verify_signature(self): with patch('salt.utils.files.fopen', mock_open(read_data=PUBKEY_DATA)): self.assertTrue(crypt.verify_signature('/keydir/keyname.pub', MSG, SIG)) diff --git a/tests/unit/test_fileclient.py b/tests/unit/test_fileclient.py index 077630693a..b6bd2207e0 100644 --- a/tests/unit/test_fileclient.py +++ b/tests/unit/test_fileclient.py @@ -6,6 +6,7 @@ # Import Python libs from __future__ import absolute_import import errno +import os # Import Salt Testing libs from tests.support.mock import patch, Mock @@ -38,7 +39,7 @@ class FileclientTestCase(TestCase): for exists in range(2): with patch('os.makedirs', self._fake_makedir()): with Client(self.opts)._cache_loc('testfile') as c_ref_itr: - assert c_ref_itr == '/__test__/files/base/testfile' + assert c_ref_itr == os.sep + os.sep.join(['__test__', 'files', 'base', 'testfile']) def test_cache_raises_exception_on_non_eexist_ioerror(self): ''' diff --git a/tests/unit/test_files.py b/tests/unit/test_files.py index 6b286820b9..bb425b7150 100644 --- a/tests/unit/test_files.py +++ b/tests/unit/test_files.py @@ -14,7 +14,7 @@ import tempfile from tests.support.unit import TestCase # Import Salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files diff --git a/tests/unit/test_minion.py b/tests/unit/test_minion.py index 535dfeedfc..e60e08edf3 100644 --- a/tests/unit/test_minion.py +++ b/tests/unit/test_minion.py @@ -13,8 +13,8 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock from tests.support.helpers import skip_if_not_root # Import salt libs -from salt import minion -from salt.utils import event +import salt.minion as minion +import salt.utils.event as event from salt.exceptions import SaltSystemExit import salt.syspaths import tornado diff --git a/tests/unit/test_payload.py b/tests/unit/test_payload.py index d41b62695f..21f7953923 100644 --- a/tests/unit/test_payload.py +++ b/tests/unit/test_payload.py @@ -26,7 +26,7 @@ import salt.exceptions # Import 3rd-party libs import msgpack import zmq -import salt.ext.six as six +from salt.ext import six import logging diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index 4cfaa6ef3b..41a5f4efdc 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -18,6 +18,7 @@ from tests.support.paths import TMP # Import salt libs import salt.pillar +import salt.utils.stringutils @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -213,7 +214,7 @@ base: - ssh.minion - generic.minion '''.format(nodegroup_order=nodegroup_order, glob_order=glob_order) - self.top_file.write(salt.utils.to_bytes(s)) + self.top_file.write(salt.utils.stringutils.to_bytes(s)) self.top_file.flush() self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP) self.ssh_file.write(b''' diff --git a/tests/unit/test_pydsl.py b/tests/unit/test_pydsl.py index 3334983bbf..20087ad17a 100644 --- a/tests/unit/test_pydsl.py +++ b/tests/unit/test_pydsl.py @@ -16,14 +16,14 @@ from tests.support.paths import TMP # Import Salt libs import salt.loader import salt.config -import salt.utils import salt.utils.files +import salt.utils.versions from salt.state import HighState from salt.utils.pydsl import PyDslError # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import StringIO @@ -91,7 +91,7 @@ class PyDSLRendererTestCase(CommonTestCaseBoilerplate): def render_sls(self, content, sls='', saltenv='base', **kws): if 'env' in kws: - salt.utils.warn_until( + salt.utils.versions.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' diff --git a/tests/unit/test_stateconf.py b/tests/unit/test_stateconf.py index 5396d0dac0..ea7e9754d3 100644 --- a/tests/unit/test_stateconf.py +++ b/tests/unit/test_stateconf.py @@ -17,7 +17,7 @@ from salt.exceptions import SaltRenderError from salt.ext.six.moves import StringIO # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six REQUISITES = ['require', 'require_in', 'use', 'use_in', 'watch', 'watch_in'] diff --git a/tests/unit/transport/mixins.py b/tests/unit/transport/mixins.py index a42c2dabc8..43333050e7 100644 --- a/tests/unit/transport/mixins.py +++ b/tests/unit/transport/mixins.py @@ -7,7 +7,7 @@ from __future__ import absolute_import import salt.transport.client # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class ReqChannelMixin(object): diff --git a/tests/unit/transport/test_tcp.py b/tests/unit/transport/test_tcp.py index a3b1c89eb9..fb8e6701ad 100644 --- a/tests/unit/transport/test_tcp.py +++ b/tests/unit/transport/test_tcp.py @@ -13,8 +13,9 @@ import tornado.concurrent from tornado.testing import AsyncTestCase, gen_test import salt.config -import salt.ext.six as six -import salt.utils +from salt.ext import six +import salt.utils.platform +import salt.utils.process import salt.transport.server import salt.transport.client import salt.exceptions @@ -100,7 +101,7 @@ class BaseTCPReqCase(TestCase, AdaptedConfigurationTestCaseMixin): raise tornado.gen.Return((payload, {'fun': 'send_clear'})) -@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS') +@skipIf(salt.utils.platform.is_darwin(), 'hanging test suite on MacOS') class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin): ''' Test all of the clear msg stuff @@ -120,7 +121,7 @@ class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin): raise tornado.gen.Return((payload, {'fun': 'send_clear'})) -@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS') +@skipIf(salt.utils.platform.is_darwin(), 'hanging test suite on MacOS') class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin): def setUp(self): self.channel = salt.transport.client.ReqChannel.factory(self.minion_config) diff --git a/tests/unit/transport/test_zeromq.py b/tests/unit/transport/test_zeromq.py index 651cf11cdb..fee8002322 100644 --- a/tests/unit/transport/test_zeromq.py +++ b/tests/unit/transport/test_zeromq.py @@ -25,7 +25,7 @@ import tornado.gen # Import Salt libs import salt.config -import salt.ext.six as six +from salt.ext import six import salt.utils import salt.transport.server import salt.transport.client diff --git a/tests/unit/utils/cache_mods/cache_mod.py b/tests/unit/utils/cache_mods/cache_mod.py index 40032a5832..91fe9ee687 100644 --- a/tests/unit/utils/cache_mods/cache_mod.py +++ b/tests/unit/utils/cache_mods/cache_mod.py @@ -1,14 +1,15 @@ +# -*- coding: utf-8 -*- +''' +This is a module used in unit.utils.cache to test the context wrapper functions +''' + import salt.utils.cache -''' -This is a module used in -unit.utils.cache to test -the context wrapper functions. -''' def __virtual__(): return True + @salt.utils.cache.context_cache def test_context_module(): if 'called' in __context__: diff --git a/tests/unit/utils/test_args.py b/tests/unit/utils/test_args.py index 928a9f60cd..a92851a025 100644 --- a/tests/unit/utils/test_args.py +++ b/tests/unit/utils/test_args.py @@ -2,16 +2,15 @@ # Import python libs from __future__ import absolute_import +from collections import namedtuple # Import Salt Libs -from salt.utils import args +import salt.utils.args # Import Salt Testing Libs -from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON +from tests.support.unit import TestCase -@skipIf(NO_MOCK, NO_MOCK_REASON) class ArgsTestCase(TestCase): ''' TestCase for salt.utils.args module @@ -21,5 +20,28 @@ class ArgsTestCase(TestCase): ''' Test passing a jid on the command line ''' - cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None) + cmd = salt.utils.args.condition_input(['*', 'foo.bar', 20141020201325675584], None) self.assertIsInstance(cmd[2], str) + + def test_clean_kwargs(self): + self.assertDictEqual(salt.utils.args.clean_kwargs(foo='bar'), {'foo': 'bar'}) + self.assertDictEqual(salt.utils.args.clean_kwargs(__pub_foo='bar'), {}) + self.assertDictEqual(salt.utils.args.clean_kwargs(__foo_bar='gwar'), {}) + self.assertDictEqual(salt.utils.args.clean_kwargs(foo_bar='gwar'), {'foo_bar': 'gwar'}) + + def test_get_function_argspec(self): + def dummy_func(first, second, third, fourth='fifth'): + pass + + expected_argspec = namedtuple('ArgSpec', 'args varargs keywords defaults')( + args=['first', 'second', 'third', 'fourth'], varargs=None, keywords=None, defaults=('fifth',)) + ret = salt.utils.args.get_function_argspec(dummy_func) + + self.assertEqual(ret, expected_argspec) + + def test_parse_kwarg(self): + ret = salt.utils.args.parse_kwarg('foo=bar') + self.assertEqual(ret, ('foo', 'bar')) + + ret = salt.utils.args.parse_kwarg('foobar') + self.assertEqual(ret, (None, None)) diff --git a/tests/unit/utils/test_async.py b/tests/unit/utils/test_async.py index fc6b2b2106..bea7a95a74 100644 --- a/tests/unit/utils/test_async.py +++ b/tests/unit/utils/test_async.py @@ -8,7 +8,7 @@ import tornado.testing import tornado.gen from tornado.testing import AsyncTestCase -from salt.utils import async +import salt.utils.async as async class HelperA(object): diff --git a/tests/unit/utils/test_cache.py b/tests/unit/utils/test_cache.py index 88b9d07ac9..649bb2abc7 100644 --- a/tests/unit/utils/test_cache.py +++ b/tests/unit/utils/test_cache.py @@ -19,7 +19,7 @@ from tests.support.unit import TestCase # Import salt libs import salt.config import salt.loader -from salt.utils import cache +import salt.utils.cache as cache class CacheDictTestCase(TestCase): diff --git a/tests/unit/utils/test_cloud.py b/tests/unit/utils/test_cloud.py index f8cb0217f7..93ae570253 100644 --- a/tests/unit/utils/test_cloud.py +++ b/tests/unit/utils/test_cloud.py @@ -19,7 +19,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.paths import TMP, CODE_DIR # Import salt libs -from salt.utils import cloud +import salt.utils.cloud as cloud GPG_KEYDIR = os.path.join(TMP, 'gpg-keydir') diff --git a/tests/unit/utils/test_configcomparer.py b/tests/unit/utils/test_configcomparer.py index 2cc17e38d2..ab0e158194 100644 --- a/tests/unit/utils/test_configcomparer.py +++ b/tests/unit/utils/test_configcomparer.py @@ -8,7 +8,7 @@ import copy from tests.support.unit import TestCase # Import Salt libs -from salt.utils import configcomparer +import salt.utils.configcomparer as configcomparer class UtilConfigcomparerTestCase(TestCase): diff --git a/tests/unit/utils/test_decorators.py b/tests/unit/utils/test_decorators.py index a729ec024c..f2884469d9 100644 --- a/tests/unit/utils/test_decorators.py +++ b/tests/unit/utils/test_decorators.py @@ -7,12 +7,12 @@ # Import Python libs from __future__ import absolute_import -# Import Salt Testing libs -from tests.support.unit import skipIf, TestCase -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch -from salt.utils import decorators +# Import Salt libs +import salt.utils.decorators as decorators from salt.version import SaltStackVersion from salt.exceptions import CommandExecutionError, SaltConfigurationError +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch class DummyLogger(object): @@ -59,6 +59,7 @@ class DecoratorsTest(TestCase): self.globs = { '__virtualname__': 'test', '__opts__': {}, + '__pillar__': {}, 'old_function': self.old_function, 'new_function': self.new_function, '_new_function': self._new_function, @@ -149,6 +150,23 @@ class DecoratorsTest(TestCase): ['The function "test.new_function" is using its deprecated ' 'version and will expire in version "Beryllium".']) + def test_with_deprecated_notfound_in_pillar(self): + ''' + Test with_deprecated should raise an exception, if a same name + function with the "_" prefix not implemented. + + :return: + ''' + del self.globs['_new_function'] + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + with self.assertRaises(CommandExecutionError): + depr(self.new_function)() + self.assertEqual(self.messages, + ['The function "test.new_function" is using its deprecated ' + 'version and will expire in version "Beryllium".']) + def test_with_deprecated_found(self): ''' Test with_deprecated should not raise an exception, if a same name @@ -166,6 +184,23 @@ class DecoratorsTest(TestCase): 'and will expire in version "Beryllium".'] self.assertEqual(self.messages, log_msg) + def test_with_deprecated_found_in_pillar(self): + ''' + Test with_deprecated should not raise an exception, if a same name + function with the "_" prefix is implemented, but should use + an old version instead, if "use_deprecated" is requested. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['_new_function'] = self.old_function + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + self.assertEqual(depr(self.new_function)(), self.old_function()) + log_msg = ['The function "test.new_function" is using its deprecated version ' + 'and will expire in version "Beryllium".'] + self.assertEqual(self.messages, log_msg) + def test_with_deprecated_found_eol(self): ''' Test with_deprecated should raise an exception, if a same name @@ -185,6 +220,25 @@ class DecoratorsTest(TestCase): 'is configured as its deprecated version. The lifetime of the function ' '"new_function" expired. Please use its successor "new_function" instead.']) + def test_with_deprecated_found_eol_in_pillar(self): + ''' + Test with_deprecated should raise an exception, if a same name + function with the "_" prefix is implemented, "use_deprecated" is requested + and EOL is reached. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['_new_function'] = self.old_function + depr = decorators.with_deprecated(self.globs, "Helium") + depr._curr_version = self._mk_version("Beryllium")[1] + with self.assertRaises(CommandExecutionError): + depr(self.new_function)() + self.assertEqual(self.messages, + ['Although function "new_function" is called, an alias "new_function" ' + 'is configured as its deprecated version. The lifetime of the function ' + '"new_function" expired. Please use its successor "new_function" instead.']) + def test_with_deprecated_no_conf(self): ''' Test with_deprecated should not raise an exception, if a same name @@ -260,6 +314,19 @@ class DecoratorsTest(TestCase): assert depr(self.new_function)() == self.new_function() assert not self.messages + def test_with_deprecated_opt_in_use_superseded_in_pillar(self): + ''' + Test with_deprecated using opt-in policy, + where newer function is used as per configuration. + + :return: + ''' + self.globs['__pillar__']['use_superseded'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium", policy=decorators._DeprecationDecorator.OPT_IN) + depr._curr_version = self._mk_version("Helium")[1] + assert depr(self.new_function)() == self.new_function() + assert not self.messages + def test_with_deprecated_opt_in_use_superseded_and_deprecated(self): ''' Test with_deprecated misconfiguration. @@ -272,3 +339,16 @@ class DecoratorsTest(TestCase): depr._curr_version = self._mk_version("Helium")[1] with self.assertRaises(SaltConfigurationError): assert depr(self.new_function)() == self.new_function() + + def test_with_deprecated_opt_in_use_superseded_and_deprecated_in_pillar(self): + ''' + Test with_deprecated misconfiguration. + + :return: + ''' + self.globs['__pillar__']['use_deprecated'] = ['test.new_function'] + self.globs['__pillar__']['use_superseded'] = ['test.new_function'] + depr = decorators.with_deprecated(self.globs, "Beryllium") + depr._curr_version = self._mk_version("Helium")[1] + with self.assertRaises(SaltConfigurationError): + assert depr(self.new_function)() == self.new_function() diff --git a/tests/unit/utils/test_dictupdate.py b/tests/unit/utils/test_dictupdate.py index df482970f7..892c7b03df 100644 --- a/tests/unit/utils/test_dictupdate.py +++ b/tests/unit/utils/test_dictupdate.py @@ -8,7 +8,7 @@ import copy from tests.support.unit import TestCase # Import Salt libs -from salt.utils import dictupdate +import salt.utils.dictupdate as dictupdate class UtilDictupdateTestCase(TestCase): diff --git a/tests/unit/utils/test_disk_cache.py b/tests/unit/utils/test_disk_cache.py index 2d37afc98b..1180cf7f52 100644 --- a/tests/unit/utils/test_disk_cache.py +++ b/tests/unit/utils/test_disk_cache.py @@ -17,7 +17,7 @@ import time from tests.support.unit import TestCase # Import salt libs -from salt.utils import cache +import salt.utils.cache as cache class CacheDiskTestCase(TestCase): diff --git a/tests/unit/utils/test_docker.py b/tests/unit/utils/test_docker.py index 5b0587f18c..baec4476e1 100644 --- a/tests/unit/utils/test_docker.py +++ b/tests/unit/utils/test_docker.py @@ -18,6 +18,7 @@ from tests.support.unit import TestCase # Import salt libs import salt.config import salt.loader +import salt.utils.platform import salt.utils.docker as docker_utils import salt.utils.docker.translate as translate_funcs @@ -29,7 +30,7 @@ def __test_stringlist(testcase, name): alias = salt.utils.docker.ALIASES_REVMAP.get(name) # Using file paths here because "volumes" must be passed through this # set of assertions and it requires absolute paths. - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): data = [r'c:\foo', r'c:\bar', r'c:\baz'] else: data = ['/foo', '/bar', '/baz'] @@ -189,7 +190,7 @@ def assert_string(func): alias = salt.utils.docker.ALIASES_REVMAP.get(name) # Using file paths here because "working_dir" must be passed through # this set of assertions and it requires absolute paths. - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): data = r'c:\foo' else: data = '/foo' @@ -384,7 +385,7 @@ def assert_device_rates(func): continue # Error case: Not an absolute path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = r'foo\bar\baz' else: path = 'foo/bar/baz' @@ -820,7 +821,7 @@ class TranslateInputTestCase(TestCase): expected ) - @assert_stringlist + @assert_string def test_domainname(self): ''' Should be a list of strings or converted to one @@ -1709,7 +1710,7 @@ class TranslateInputTestCase(TestCase): Should be a list of absolute paths ''' # Error case: Not an absolute path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = r'foo\bar\baz' else: path = 'foo/bar/baz' @@ -1735,7 +1736,7 @@ class TranslateInputTestCase(TestCase): Should be a single absolute path ''' # Error case: Not an absolute path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = r'foo\bar\baz' else: path = 'foo/bar/baz' diff --git a/tests/unit/utils/test_etcd_util.py b/tests/unit/utils/test_etcd_util.py index 98072e8864..f80c7bbdaa 100644 --- a/tests/unit/utils/test_etcd_util.py +++ b/tests/unit/utils/test_etcd_util.py @@ -16,7 +16,7 @@ from tests.support.mock import ( ) # Import Salt Libs -from salt.utils import etcd_util +import salt.utils.etcd_util as etcd_util try: from urllib3.exceptions import ReadTimeoutError, MaxRetryError HAS_URLLIB3 = True diff --git a/tests/unit/utils/test_event.py b/tests/unit/utils/test_event.py index c65b397d27..722f2b4b14 100644 --- a/tests/unit/utils/test_event.py +++ b/tests/unit/utils/test_event.py @@ -25,9 +25,10 @@ from multiprocessing import Process from tests.support.unit import expectedFailure, skipIf, TestCase # Import salt libs +import salt.utils.event +import salt.utils.stringutils import tests.integration as integration from salt.utils.process import clean_proc -from salt.utils import event, to_bytes # Import 3rd-+arty libs from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin @@ -41,7 +42,7 @@ if getattr(zmq, 'IPC_PATH_MAX_LEN', 103) <= 103: @contextmanager def eventpublisher_process(): - proc = event.EventPublisher({'sock_dir': SOCK_DIR}) + proc = salt.utils.event.EventPublisher({'sock_dir': SOCK_DIR}) proc.start() try: if os.environ.get('TRAVIS_PYTHON_VERSION', None) is not None: @@ -62,7 +63,7 @@ class EventSender(Process): self.wait = wait def run(self): - me = event.MasterEvent(SOCK_DIR, listen=False) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=False) time.sleep(self.wait) me.fire_event(self.data, self.tag) # Wait a few seconds before tearing down the zmq context @@ -98,7 +99,7 @@ class TestSaltEvent(TestCase): self.assertEqual(data[key], evt[key], assertMsg) def test_master_event(self): - me = event.MasterEvent(SOCK_DIR, listen=False) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=False) self.assertEqual( me.puburi, '{0}'.format( os.path.join(SOCK_DIR, 'master_event_pub.ipc') @@ -113,8 +114,8 @@ class TestSaltEvent(TestCase): def test_minion_event(self): opts = dict(id='foo', sock_dir=SOCK_DIR) - id_hash = hashlib.sha256(to_bytes(opts['id'])).hexdigest()[:10] - me = event.MinionEvent(opts, listen=False) + id_hash = hashlib.sha256(salt.utils.stringutils.to_bytes(opts['id'])).hexdigest()[:10] + me = salt.utils.event.MinionEvent(opts, listen=False) self.assertEqual( me.puburi, '{0}'.format( @@ -134,13 +135,13 @@ class TestSaltEvent(TestCase): def test_minion_event_tcp_ipc_mode(self): opts = dict(id='foo', ipc_mode='tcp') - me = event.MinionEvent(opts, listen=False) + me = salt.utils.event.MinionEvent(opts, listen=False) self.assertEqual(me.puburi, 4510) self.assertEqual(me.pulluri, 4511) def test_minion_event_no_id(self): - me = event.MinionEvent(dict(sock_dir=SOCK_DIR), listen=False) - id_hash = hashlib.sha256(to_bytes('')).hexdigest()[:10] + me = salt.utils.event.MinionEvent(dict(sock_dir=SOCK_DIR), listen=False) + id_hash = hashlib.sha256(salt.utils.stringutils.to_bytes('')).hexdigest()[:10] self.assertEqual( me.puburi, '{0}'.format( @@ -161,7 +162,7 @@ class TestSaltEvent(TestCase): def test_event_single(self): '''Test a single event is received''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event(tag='evt1') self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -169,7 +170,7 @@ class TestSaltEvent(TestCase): def test_event_single_no_block(self): '''Test a single event is received, no block''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) start = time.time() finish = start + 5 evt1 = me.get_event(wait=0, tag='evt1', no_block=True) @@ -185,7 +186,7 @@ class TestSaltEvent(TestCase): def test_event_single_wait_0_no_block_False(self): '''Test a single event is received with wait=0 and no_block=False and doesn't spin the while loop''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') # This is too fast and will be None but assures we're not blocking evt1 = me.get_event(wait=0, tag='evt1', no_block=False) @@ -194,7 +195,7 @@ class TestSaltEvent(TestCase): def test_event_timeout(self): '''Test no event is received if the timeout is reached''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event(tag='evt1') self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -204,7 +205,7 @@ class TestSaltEvent(TestCase): def test_event_no_timeout(self): '''Test no wait timeout, we should block forever, until we get one ''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) with eventsender_process({'data': 'foo2'}, 'evt2', 5): evt = me.get_event(tag='evt2', wait=0, no_block=False) self.assertGotEvent(evt, {'data': 'foo2'}) @@ -212,7 +213,7 @@ class TestSaltEvent(TestCase): def test_event_matching(self): '''Test a startswith match''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event(tag='ev') self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -220,7 +221,7 @@ class TestSaltEvent(TestCase): def test_event_matching_regex(self): '''Test a regex match''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event(tag='^ev', match_type='regex') self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -228,7 +229,7 @@ class TestSaltEvent(TestCase): def test_event_matching_all(self): '''Test an all match''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event(tag='') self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -236,7 +237,7 @@ class TestSaltEvent(TestCase): def test_event_matching_all_when_tag_is_None(self): '''Test event matching all when not passing a tag''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') evt1 = me.get_event() self.assertGotEvent(evt1, {'data': 'foo1'}) @@ -244,7 +245,7 @@ class TestSaltEvent(TestCase): def test_event_not_subscribed(self): '''Test get_event drops non-subscribed events''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') me.fire_event({'data': 'foo2'}, 'evt2') evt2 = me.get_event(tag='evt2') @@ -255,7 +256,7 @@ class TestSaltEvent(TestCase): def test_event_subscription_cache(self): '''Test subscriptions cache a message until requested''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.subscribe('evt1') me.fire_event({'data': 'foo1'}, 'evt1') me.fire_event({'data': 'foo2'}, 'evt2') @@ -267,7 +268,7 @@ class TestSaltEvent(TestCase): def test_event_subscriptions_cache_regex(self): '''Test regex subscriptions cache a message until requested''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.subscribe('e..1$', 'regex') me.fire_event({'data': 'foo1'}, 'evt1') me.fire_event({'data': 'foo2'}, 'evt2') @@ -279,8 +280,8 @@ class TestSaltEvent(TestCase): def test_event_multiple_clients(self): '''Test event is received by multiple clients''' with eventpublisher_process(): - me1 = event.MasterEvent(SOCK_DIR, listen=True) - me2 = event.MasterEvent(SOCK_DIR, listen=True) + me1 = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) + me2 = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) # We need to sleep here to avoid a race condition wherein # the second socket may not be connected by the time the first socket # sends the event. @@ -296,7 +297,7 @@ class TestSaltEvent(TestCase): '''Test nested event subscriptions do not drop events, get event for all tags''' # Show why not to call get_event(tag='') with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') me.fire_event({'data': 'foo2'}, 'evt2') evt2 = me.get_event(tag='') @@ -307,7 +308,7 @@ class TestSaltEvent(TestCase): def test_event_many(self): '''Test a large number of events, one at a time''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) for i in range(500): me.fire_event({'data': '{0}'.format(i)}, 'testevents') evt = me.get_event(tag='testevents') @@ -316,7 +317,7 @@ class TestSaltEvent(TestCase): def test_event_many_backlog(self): '''Test a large number of events, send all then recv all''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) # Must not exceed zmq HWM for i in range(500): me.fire_event({'data': '{0}'.format(i)}, 'testevents') @@ -329,7 +330,7 @@ class TestSaltEvent(TestCase): def test_send_master_event(self): '''Tests that sending an event through fire_master generates expected event''' with eventpublisher_process(): - me = event.MasterEvent(SOCK_DIR, listen=True) + me = salt.utils.event.MasterEvent(SOCK_DIR, listen=True) data = {'data': 'foo1'} me.fire_master(data, 'test_master') @@ -344,21 +345,21 @@ class TestAsyncEventPublisher(AsyncTestCase): def setUp(self): super(TestAsyncEventPublisher, self).setUp() self.opts = {'sock_dir': SOCK_DIR} - self.publisher = event.AsyncEventPublisher( + self.publisher = salt.utils.event.AsyncEventPublisher( self.opts, self.io_loop, ) - self.event = event.get_event('minion', opts=self.opts, io_loop=self.io_loop) + self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self._handle_publish) def _handle_publish(self, raw): - self.tag, self.data = event.SaltEvent.unpack(raw) + self.tag, self.data = salt.utils.event.SaltEvent.unpack(raw) self.stop() def test_event_subscription(self): '''Test a single event is received''' - me = event.MinionEvent(self.opts, listen=True) + me = salt.utils.event.MinionEvent(self.opts, listen=True) me.fire_event({'data': 'foo1'}, 'evt1') self.wait() evt1 = me.get_event(tag='evt1') diff --git a/tests/unit/utils/test_find.py b/tests/unit/utils/test_find.py index 0dbf84c50a..760c9d5ddc 100644 --- a/tests/unit/utils/test_find.py +++ b/tests/unit/utils/test_find.py @@ -13,7 +13,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.paths import TMP # Import salt libs -import salt.ext.six as six +from salt.ext import six import salt.utils.files import salt.utils.find diff --git a/tests/unit/utils/test_http.py b/tests/unit/utils/test_http.py index 993e7975e8..a6d24b021c 100644 --- a/tests/unit/utils/test_http.py +++ b/tests/unit/utils/test_http.py @@ -11,7 +11,7 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON # Import Salt Libs -from salt.utils import http +import salt.utils.http as http @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/utils/test_immutabletypes.py b/tests/unit/utils/test_immutabletypes.py index 4626866b13..f684296324 100644 --- a/tests/unit/utils/test_immutabletypes.py +++ b/tests/unit/utils/test_immutabletypes.py @@ -16,7 +16,7 @@ from __future__ import absolute_import from tests.support.unit import TestCase # Import salt libs -from salt.utils import immutabletypes +import salt.utils.immutabletypes as immutabletypes class ImmutableTypesTestCase(TestCase): diff --git a/tests/unit/utils/test_locales.py b/tests/unit/utils/test_locales.py index 45232544ce..60e2bd8151 100644 --- a/tests/unit/utils/test_locales.py +++ b/tests/unit/utils/test_locales.py @@ -1,16 +1,17 @@ # coding: utf-8 -# python libs +# Import Python libs from __future__ import absolute_import -# salt testing libs + +# Import Salt libs +import salt.utils.locales as locales from tests.support.unit import TestCase, skipIf from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON -# salt libs -import salt.ext.six as six +# Import 3rd-part libs +from salt.ext import six from salt.ext.six.moves import reload_module -from salt.utils import locales @skipIf(NO_MOCK, NO_MOCK_REASON) diff --git a/tests/unit/utils/test_mac_utils.py b/tests/unit/utils/test_mac_utils.py index 3b7bc32d33..4c60e928ed 100644 --- a/tests/unit/utils/test_mac_utils.py +++ b/tests/unit/utils/test_mac_utils.py @@ -9,12 +9,14 @@ from __future__ import absolute_import # Import Salt Testing Libs from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON -from salt.ext.six.moves import range -# Import Salt Libs -from salt.utils import mac_utils +# Import Salt libs +import salt.utils.mac_utils as mac_utils from salt.exceptions import SaltInvocationError, CommandExecutionError +# Import 3rd-party libs +from salt.ext.six.moves import range + @skipIf(NO_MOCK, NO_MOCK_REASON) class MacUtilsTestCase(TestCase): diff --git a/tests/unit/utils/test_minions.py b/tests/unit/utils/test_minions.py index 3a025960cc..0ee0dd1f7f 100644 --- a/tests/unit/utils/test_minions.py +++ b/tests/unit/utils/test_minions.py @@ -4,7 +4,7 @@ from __future__ import absolute_import # Import Salt Libs -from salt.utils import minions +import salt.utils.minions as minions # Import Salt Testing Libs from tests.support.unit import TestCase diff --git a/tests/unit/utils/test_network.py b/tests/unit/utils/test_network.py index 75a43a333e..068fa8b908 100644 --- a/tests/unit/utils/test_network.py +++ b/tests/unit/utils/test_network.py @@ -9,7 +9,7 @@ from tests.support.unit import TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock # Import salt libs -from salt.utils import network +import salt.utils.network as network LINUX = '''\ eth0 Link encap:Ethernet HWaddr e0:3f:49:85:6a:af @@ -227,7 +227,7 @@ class NetworkTestCase(TestCase): ) def test_interfaces_ifconfig_solaris(self): - with patch('salt.utils.is_sunos', lambda: True): + with patch('salt.utils.platform.is_sunos', lambda: True): interfaces = network._interfaces_ifconfig(SOLARIS) expected_interfaces = {'ilbint0': {'inet6': [], @@ -262,16 +262,16 @@ class NetworkTestCase(TestCase): self.assertEqual(interfaces, expected_interfaces) def test_freebsd_remotes_on(self): - with patch('salt.utils.is_sunos', lambda: False): - with patch('salt.utils.is_freebsd', lambda: True): + with patch('salt.utils.platform.is_sunos', lambda: False): + with patch('salt.utils.platform.is_freebsd', lambda: True): with patch('subprocess.check_output', return_value=FREEBSD_SOCKSTAT): remotes = network._freebsd_remotes_on('4506', 'remote') self.assertEqual(remotes, set(['127.0.0.1'])) def test_freebsd_remotes_on_with_fat_pid(self): - with patch('salt.utils.is_sunos', lambda: False): - with patch('salt.utils.is_freebsd', lambda: True): + with patch('salt.utils.platform.is_sunos', lambda: False): + with patch('salt.utils.platform.is_freebsd', lambda: True): with patch('subprocess.check_output', return_value=FREEBSD_SOCKSTAT_WITH_FAT_PID): remotes = network._freebsd_remotes_on('4506', 'remote') diff --git a/tests/unit/utils/test_path_join.py b/tests/unit/utils/test_path.py similarity index 74% rename from tests/unit/utils/test_path_join.py rename to tests/unit/utils/test_path.py index ddba151e43..c9bc6761a3 100644 --- a/tests/unit/utils/test_path_join.py +++ b/tests/unit/utils/test_path.py @@ -3,11 +3,11 @@ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` - tests.unit.utils.path_join_test + tests.unit.utils.salt.utils.path.join_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os import sys @@ -17,14 +17,15 @@ import platform import tempfile # Import Salt Testing libs -import salt.utils from tests.support.unit import TestCase, skipIf +from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON -# Import salt libs -from salt.utils import path_join +# Import Salt libs +import salt.utils.path +import salt.utils.platform # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six class PathJoinTestCase(TestCase): @@ -52,10 +53,10 @@ class PathJoinTestCase(TestCase): def test_nix_paths(self): if platform.system().lower() == "windows": self.skipTest( - "Windows platform found. not running *nix path_join tests" + "Windows platform found. not running *nix salt.utils.path.join tests" ) for idx, (parts, expected) in enumerate(self.NIX_PATHS): - path = path_join(*parts) + path = salt.utils.path.join(*parts) self.assertEqual( '{0}: {1}'.format(idx, path), '{0}: {1}'.format(idx, expected) @@ -66,11 +67,11 @@ class PathJoinTestCase(TestCase): if platform.system().lower() != "windows": self.skipTest( 'Non windows platform found. not running non patched os.path ' - 'path_join tests' + 'salt.utils.path.join tests' ) for idx, (parts, expected) in enumerate(self.WIN_PATHS): - path = path_join(*parts) + path = salt.utils.path.join(*parts) self.assertEqual( '{0}: {1}'.format(idx, path), '{0}: {1}'.format(idx, expected) @@ -81,13 +82,13 @@ class PathJoinTestCase(TestCase): if platform.system().lower() == "windows": self.skipTest( 'Windows platform found. not running patched os.path ' - 'path_join tests' + 'salt.utils.path.join tests' ) self.__patch_path() for idx, (parts, expected) in enumerate(self.WIN_PATHS): - path = path_join(*parts) + path = salt.utils.path.join(*parts) self.assertEqual( '{0}: {1}'.format(idx, path), '{0}: {1}'.format(idx, expected) @@ -95,7 +96,7 @@ class PathJoinTestCase(TestCase): self.__unpatch_path() - @skipIf(salt.utils.is_windows(), '*nix-only test') + @skipIf(salt.utils.platform.is_windows(), '*nix-only test') def test_mixed_unicode_and_binary(self): ''' This tests joining paths that contain a mix of components with unicode @@ -109,7 +110,7 @@ class PathJoinTestCase(TestCase): a = u'/foo/bar' b = 'Д' expected = u'/foo/bar/\u0414' - actual = path_join(a, b) + actual = salt.utils.path.join(a, b) self.assertEqual(actual, expected) def __patch_path(self): @@ -136,3 +137,22 @@ class PathJoinTestCase(TestCase): for module in (posixpath, os, os.path, tempfile, platform): reload(module) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class WhichTestCase(TestCase): + def test_which_bin(self): + ret = salt.utils.path.which_bin('str') + self.assertIs(None, ret) + + test_exes = ['ls', 'echo'] + with patch('salt.utils.path.which', return_value='/tmp/dummy_path'): + ret = salt.utils.path.which_bin(test_exes) + self.assertEqual(ret, '/tmp/dummy_path') + + ret = salt.utils.path.which_bin([]) + self.assertIs(None, ret) + + with patch('salt.utils.path.which', return_value=''): + ret = salt.utils.path.which_bin(test_exes) + self.assertIs(None, ret) diff --git a/tests/unit/utils/test_process.py b/tests/unit/utils/test_process.py index 72215cd98a..25be0b7a1b 100644 --- a/tests/unit/utils/test_process.py +++ b/tests/unit/utils/test_process.py @@ -16,7 +16,7 @@ import salt.utils import salt.utils.process # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin diff --git a/tests/unit/utils/test_schema.py b/tests/unit/utils/test_schema.py index 3319839c36..a8181ffe86 100644 --- a/tests/unit/utils/test_schema.py +++ b/tests/unit/utils/test_schema.py @@ -13,7 +13,7 @@ import yaml from tests.support.unit import TestCase, skipIf # Import Salt Libs -from salt.utils import schema +import salt.utils.schema as schema from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs diff --git a/tests/unit/utils/test_stringutils.py b/tests/unit/utils/test_stringutils.py new file mode 100644 index 0000000000..4c9249b7f4 --- /dev/null +++ b/tests/unit/utils/test_stringutils.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +# Import Salt libs +from tests.support.unit import TestCase +from tests.unit.utils.test_utils import LOREM_IPSUM +import salt.utils.stringutils + +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves import range # pylint: disable=redefined-builtin + + +class StringutilsTestCase(TestCase): + def test_contains_whitespace(self): + does_contain_whitespace = 'A brown fox jumped over the red hen.' + does_not_contain_whitespace = 'Abrownfoxjumpedovertheredhen.' + + self.assertTrue(salt.utils.stringutils.contains_whitespace(does_contain_whitespace)) + self.assertFalse(salt.utils.stringutils.contains_whitespace(does_not_contain_whitespace)) + + def test_to_num(self): + self.assertEqual(7, salt.utils.stringutils.to_num('7')) + self.assertIsInstance(salt.utils.stringutils.to_num('7'), int) + self.assertEqual(7, salt.utils.stringutils.to_num('7.0')) + self.assertIsInstance(salt.utils.stringutils.to_num('7.0'), float) + self.assertEqual(salt.utils.stringutils.to_num('Seven'), 'Seven') + self.assertIsInstance(salt.utils.stringutils.to_num('Seven'), str) + + def test_is_binary(self): + self.assertFalse(salt.utils.stringutils.is_binary(LOREM_IPSUM)) + + zero_str = '{0}{1}'.format(LOREM_IPSUM, '\0') + self.assertTrue(salt.utils.stringutils.is_binary(zero_str)) + + # To to ensure safe exit if str passed doesn't evaluate to True + self.assertFalse(salt.utils.stringutils.is_binary('')) + + nontext = 3 * (''.join([chr(x) for x in range(1, 32) if x not in (8, 9, 10, 12, 13)])) + almost_bin_str = '{0}{1}'.format(LOREM_IPSUM[:100], nontext[:42]) + self.assertFalse(salt.utils.stringutils.is_binary(almost_bin_str)) + + bin_str = almost_bin_str + '\x01' + self.assertTrue(salt.utils.stringutils.is_binary(bin_str)) + + def test_to_str(self): + for x in (123, (1, 2, 3), [1, 2, 3], {1: 23}, None): + self.assertRaises(TypeError, salt.utils.stringutils.to_str, x) + if six.PY3: + self.assertEqual(salt.utils.stringutils.to_str('plugh'), 'plugh') + self.assertEqual(salt.utils.stringutils.to_str('áéíóúý', 'utf-8'), 'áéíóúý') + un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string + ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) + self.assertEqual(salt.utils.stringutils.to_str(ut, 'utf-8'), un) + self.assertEqual(salt.utils.stringutils.to_str(bytearray(ut), 'utf-8'), un) + # Test situation when a minion returns incorrect utf-8 string because of... million reasons + ut2 = b'\x9c' + self.assertEqual(salt.utils.stringutils.to_str(ut2, 'utf-8'), u'\ufffd') + self.assertEqual(salt.utils.stringutils.to_str(bytearray(ut2), 'utf-8'), u'\ufffd') + else: + self.assertEqual(salt.utils.stringutils.to_str('plugh'), 'plugh') + self.assertEqual(salt.utils.stringutils.to_str(u'áéíóúý', 'utf-8'), 'áéíóúý') + un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' + ut = '\xe4\xb8\xad\xe5\x9b\xbd\xe8\xaa\x9e (\xe7\xb9\x81\xe4\xbd\x93)' + self.assertEqual(salt.utils.stringutils.to_str(un, 'utf-8'), ut) + self.assertEqual(salt.utils.stringutils.to_str(bytearray(ut), 'utf-8'), ut) + + def test_to_bytes(self): + for x in (123, (1, 2, 3), [1, 2, 3], {1: 23}, None): + self.assertRaises(TypeError, salt.utils.stringutils.to_bytes, x) + if six.PY3: + self.assertEqual(salt.utils.stringutils.to_bytes('xyzzy'), b'xyzzy') + ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) + un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string + self.assertEqual(salt.utils.stringutils.to_bytes(ut), ut) + self.assertEqual(salt.utils.stringutils.to_bytes(bytearray(ut)), ut) + self.assertEqual(salt.utils.stringutils.to_bytes(un, 'utf-8'), ut) + else: + self.assertEqual(salt.utils.stringutils.to_bytes('xyzzy'), 'xyzzy') + ut = ''.join([chr(x) for x in (0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)]) + un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string + self.assertEqual(salt.utils.stringutils.to_bytes(ut), ut) + self.assertEqual(salt.utils.stringutils.to_bytes(bytearray(ut)), ut) + self.assertEqual(salt.utils.stringutils.to_bytes(un, 'utf-8'), ut) + + def test_to_unicode(self): + if six.PY3: + self.assertEqual(salt.utils.stringutils.to_unicode('plugh'), 'plugh') + self.assertEqual(salt.utils.stringutils.to_unicode('áéíóúý'), 'áéíóúý') + un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string + ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) + self.assertEqual(salt.utils.stringutils.to_unicode(ut, 'utf-8'), un) + self.assertEqual(salt.utils.stringutils.to_unicode(bytearray(ut), 'utf-8'), un) + else: + self.assertEqual(salt.utils.stringutils.to_unicode('xyzzy', 'utf-8'), u'xyzzy') + ut = '\xe4\xb8\xad\xe5\x9b\xbd\xe8\xaa\x9e (\xe7\xb9\x81\xe4\xbd\x93)' + un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' + self.assertEqual(salt.utils.stringutils.to_unicode(ut, 'utf-8'), un) diff --git a/tests/unit/utils/test_systemd.py b/tests/unit/utils/test_systemd.py index e3ea053ced..8f3fce9660 100644 --- a/tests/unit/utils/test_systemd.py +++ b/tests/unit/utils/test_systemd.py @@ -10,8 +10,8 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import Mock, patch, NO_MOCK, NO_MOCK_REASON # Import Salt libs +import salt.utils.systemd as _systemd from salt.exceptions import SaltInvocationError -from salt.utils import systemd as _systemd def _booted_effect(path): diff --git a/tests/unit/utils/test_url.py b/tests/unit/utils/test_url.py index 95caa0253e..0556848a4e 100644 --- a/tests/unit/utils/test_url.py +++ b/tests/unit/utils/test_url.py @@ -81,7 +81,7 @@ class UrlTestCase(TestCase): ''' url = 'salt://dir/file.ini' - with patch('salt.utils.is_windows', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): self.assertFalse(salt.utils.url.is_escaped(url)) def test_is_escaped_escaped_path(self): @@ -132,7 +132,7 @@ class UrlTestCase(TestCase): ''' url = 'salt://dir/file.ini' - with patch('salt.utils.is_windows', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): self.assertEqual(salt.utils.url.escape(url), url) def test_escape_escaped_path(self): @@ -186,7 +186,7 @@ class UrlTestCase(TestCase): ''' url = 'salt://dir/file.ini' - with patch('salt.utils.is_windows', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_windows', MagicMock(return_value=True)): self.assertEqual(salt.utils.url.unescape(url), url) def test_unescape_escaped_path(self): diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 0e7b62b43e..87fe9b0018 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -16,10 +16,12 @@ from tests.support.mock import ( ) # Import Salt libs -from salt.utils import args +import salt.utils +import salt.utils.jid +import salt.utils.yamlencoding +import salt.utils.zeromq from salt.utils.odict import OrderedDict from salt.exceptions import (SaltInvocationError, SaltSystemExit, CommandNotFoundError) -from salt import utils # Import Python libraries import datetime @@ -28,15 +30,14 @@ import zmq from collections import namedtuple # Import 3rd-party libs -import salt.ext.six as six -from salt.ext.six.moves import range +from salt.ext import six try: import timelib # pylint: disable=import-error,unused-import HAS_TIMELIB = True except ImportError: HAS_TIMELIB = False -LORUM_IPSUM = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis. \n' \ +LOREM_IPSUM = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis. \n' \ 'Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor \n' \ 'nibh eu urna. Aliquam iaculis massa risus, sed elementum risus accumsan id. Suspendisse mattis, \n' \ 'metus sed lacinia dictum, leo orci dapibus sapien, at porttitor sapien nulla ac velit. \n' \ @@ -55,75 +56,57 @@ class UtilsTestCase(TestCase): ' dolor \n' \ '[...]\n' \ '---' - ret = utils.get_context(LORUM_IPSUM, 1, num_lines=1) + ret = salt.utils.get_context(LOREM_IPSUM, 1, num_lines=1) self.assertEqual(ret, expected_context) def test_jid_to_time(self): test_jid = 20131219110700123489 expected_jid = '2013, Dec 19 11:07:00.123489' - self.assertEqual(utils.jid.jid_to_time(test_jid), expected_jid) + self.assertEqual(salt.utils.jid.jid_to_time(test_jid), expected_jid) # Test incorrect lengths incorrect_jid_length = 2012 - self.assertEqual(utils.jid.jid_to_time(incorrect_jid_length), '') + self.assertEqual(salt.utils.jid.jid_to_time(incorrect_jid_length), '') @skipIf(NO_MOCK, NO_MOCK_REASON) def test_gen_mac(self): with patch('random.randint', return_value=1) as random_mock: self.assertEqual(random_mock.return_value, 1) - ret = utils.gen_mac('00:16:3E') + ret = salt.utils.gen_mac('00:16:3E') expected_mac = '00:16:3E:01:01:01' self.assertEqual(ret, expected_mac) def test_mac_str_to_bytes(self): - self.assertRaises(ValueError, utils.mac_str_to_bytes, '31337') - self.assertRaises(ValueError, utils.mac_str_to_bytes, '0001020304056') - self.assertRaises(ValueError, utils.mac_str_to_bytes, '00:01:02:03:04:056') - self.assertRaises(ValueError, utils.mac_str_to_bytes, 'a0:b0:c0:d0:e0:fg') - self.assertEqual(b'\x10\x08\x06\x04\x02\x00', utils.mac_str_to_bytes('100806040200')) - self.assertEqual(b'\xf8\xe7\xd6\xc5\xb4\xa3', utils.mac_str_to_bytes('f8e7d6c5b4a3')) + self.assertRaises(ValueError, salt.utils.mac_str_to_bytes, '31337') + self.assertRaises(ValueError, salt.utils.mac_str_to_bytes, '0001020304056') + self.assertRaises(ValueError, salt.utils.mac_str_to_bytes, '00:01:02:03:04:056') + self.assertRaises(ValueError, salt.utils.mac_str_to_bytes, 'a0:b0:c0:d0:e0:fg') + self.assertEqual(b'\x10\x08\x06\x04\x02\x00', salt.utils.mac_str_to_bytes('100806040200')) + self.assertEqual(b'\xf8\xe7\xd6\xc5\xb4\xa3', salt.utils.mac_str_to_bytes('f8e7d6c5b4a3')) def test_ip_bracket(self): test_ipv4 = '127.0.0.1' test_ipv6 = '::1' - self.assertEqual(test_ipv4, utils.ip_bracket(test_ipv4)) - self.assertEqual('[{0}]'.format(test_ipv6), utils.ip_bracket(test_ipv6)) + self.assertEqual(test_ipv4, salt.utils.ip_bracket(test_ipv4)) + self.assertEqual('[{0}]'.format(test_ipv6), salt.utils.ip_bracket(test_ipv6)) def test_is_jid(self): - self.assertTrue(utils.jid.is_jid('20131219110700123489')) # Valid JID - self.assertFalse(utils.jid.is_jid(20131219110700123489)) # int - self.assertFalse(utils.jid.is_jid('2013121911070012348911111')) # Wrong length - - @skipIf(NO_MOCK, NO_MOCK_REASON) - def test_path_join(self): - with patch('salt.utils.is_windows', return_value=False) as is_windows_mock: - self.assertFalse(is_windows_mock.return_value) - expected_path = '/a/b/c/d' - ret = utils.path_join('/a/b/c', 'd') - self.assertEqual(ret, expected_path) + self.assertTrue(salt.utils.jid.is_jid('20131219110700123489')) # Valid JID + self.assertFalse(salt.utils.jid.is_jid(20131219110700123489)) # int + self.assertFalse(salt.utils.jid.is_jid('2013121911070012348911111')) # Wrong length def test_build_whitespace_split_regex(self): expected_regex = '(?m)^(?:[\\s]+)?Lorem(?:[\\s]+)?ipsum(?:[\\s]+)?dolor(?:[\\s]+)?sit(?:[\\s]+)?amet\\,' \ '(?:[\\s]+)?$' - ret = utils.build_whitespace_split_regex(' '.join(LORUM_IPSUM.split()[:5])) + ret = salt.utils.build_whitespace_split_regex(' '.join(LOREM_IPSUM.split()[:5])) self.assertEqual(ret, expected_regex) - def test_get_function_argspec(self): - def dummy_func(first, second, third, fourth='fifth'): - pass - - expected_argspec = namedtuple('ArgSpec', 'args varargs keywords defaults')( - args=['first', 'second', 'third', 'fourth'], varargs=None, keywords=None, defaults=('fifth',)) - ret = utils.args.get_function_argspec(dummy_func) - - self.assertEqual(ret, expected_argspec) - def test_arg_lookup(self): def dummy_func(first, second, third, fourth='fifth'): pass expected_dict = {'args': ['first', 'second', 'third'], 'kwargs': {'fourth': 'fifth'}} - ret = utils.arg_lookup(dummy_func) + ret = salt.utils.arg_lookup(dummy_func) self.assertEqual(expected_dict, ret) @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -137,20 +120,20 @@ class UtilsTestCase(TestCase): args=['first', 'second', 'third', 'fourth'], varargs=None, keywords=None, defaults=('fifth',)) # Make sure we raise an error if we don't pass in the requisite number of arguments - self.assertRaises(SaltInvocationError, utils.format_call, dummy_func, {'1': 2}) + self.assertRaises(SaltInvocationError, salt.utils.format_call, dummy_func, {'1': 2}) # Make sure we warn on invalid kwargs - ret = utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}) + ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}) self.assertGreaterEqual(len(ret['warnings']), 1) - ret = utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}, + ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}, expected_extra_kws=('first', 'second', 'third')) self.assertDictEqual(ret, {'args': [], 'kwargs': {}}) def test_isorted(self): test_list = ['foo', 'Foo', 'bar', 'Bar'] expected_list = ['bar', 'Bar', 'foo', 'Foo'] - self.assertEqual(utils.isorted(test_list), expected_list) + self.assertEqual(salt.utils.isorted(test_list), expected_list) def test_mysql_to_dict(self): test_mysql_output = ['+----+------+-----------+------+---------+------+-------+------------------+', @@ -159,28 +142,13 @@ class UtilsTestCase(TestCase): '| 7 | root | localhost | NULL | Query | 0 | init | show processlist |', '+----+------+-----------+------+---------+------+-------+------------------+'] - ret = utils.mysql_to_dict(test_mysql_output, 'Info') + ret = salt.utils.mysql_to_dict(test_mysql_output, 'Info') expected_dict = { 'show processlist': {'Info': 'show processlist', 'db': 'NULL', 'State': 'init', 'Host': 'localhost', 'Command': 'Query', 'User': 'root', 'Time': 0, 'Id': 7}} self.assertDictEqual(ret, expected_dict) - def test_contains_whitespace(self): - does_contain_whitespace = 'A brown fox jumped over the red hen.' - does_not_contain_whitespace = 'Abrownfoxjumpedovertheredhen.' - - self.assertTrue(utils.contains_whitespace(does_contain_whitespace)) - self.assertFalse(utils.contains_whitespace(does_not_contain_whitespace)) - - def test_str_to_num(self): - self.assertEqual(7, utils.str_to_num('7')) - self.assertIsInstance(utils.str_to_num('7'), int) - self.assertEqual(7, utils.str_to_num('7.0')) - self.assertIsInstance(utils.str_to_num('7.0'), float) - self.assertEqual(utils.str_to_num('Seven'), 'Seven') - self.assertIsInstance(utils.str_to_num('Seven'), str) - def test_subdict_match(self): test_two_level_dict = {'foo': {'bar': 'baz'}} test_two_level_comb_dict = {'foo': {'bar': 'baz:woz'}} @@ -190,21 +158,21 @@ class UtilsTestCase(TestCase): test_three_level_dict = {'a': {'b': {'c': 'v'}}} self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_dict, 'foo:bar:baz' ) ) # In test_two_level_comb_dict, 'foo:bar' corresponds to 'baz:woz', not # 'baz'. This match should return False. self.assertFalse( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_comb_dict, 'foo:bar:baz' ) ) # This tests matching with the delimiter in the value part (in other # words, that the path 'foo:bar' corresponds to the string 'baz:woz'). self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_comb_dict, 'foo:bar:baz:woz' ) ) @@ -212,7 +180,7 @@ class UtilsTestCase(TestCase): # to 'baz:woz:wiz', or if there was more deep nesting. But it does not, # so this should return False. self.assertFalse( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_comb_dict, 'foo:bar:baz:woz:wiz' ) ) @@ -224,7 +192,7 @@ class UtilsTestCase(TestCase): # salt.utils.traverse_list_and_dict() so this particular assertion is a # sanity check. self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_dict_and_list, 'abc:ghi' ) ) @@ -232,25 +200,25 @@ class UtilsTestCase(TestCase): # list, embedded in a dict. This is a rather absurd case, but it # confirms that match recursion works properly. self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_two_level_dict_and_list, 'abc:lorem:ipsum:dolor:sit' ) ) # Test four level dict match for reference self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_three_level_dict, 'a:b:c:v' ) ) self.assertFalse( # Test regression in 2015.8 where 'a:c:v' would match 'a:b:c:v' - utils.subdict_match( + salt.utils.subdict_match( test_three_level_dict, 'a:c:v' ) ) # Test wildcard match self.assertTrue( - utils.subdict_match( + salt.utils.subdict_match( test_three_level_dict, 'a:*:c:v' ) ) @@ -260,13 +228,13 @@ class UtilsTestCase(TestCase): self.assertDictEqual( {'not_found': 'nope'}, - utils.traverse_dict( + salt.utils.traverse_dict( test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'} ) ) self.assertEqual( 'baz', - utils.traverse_dict( + salt.utils.traverse_dict( test_two_level_dict, 'foo:bar', {'not_found': 'not_found'} ) ) @@ -282,59 +250,53 @@ class UtilsTestCase(TestCase): # corresponding to the key path foo:bar. self.assertDictEqual( {'not_found': 'nope'}, - utils.traverse_dict_and_list( + salt.utils.traverse_dict_and_list( test_two_level_dict, 'foo:bar:baz', {'not_found': 'nope'} ) ) # Now check to ensure that foo:bar corresponds to baz self.assertEqual( 'baz', - utils.traverse_dict_and_list( + salt.utils.traverse_dict_and_list( test_two_level_dict, 'foo:bar', {'not_found': 'not_found'} ) ) # Check traversing too far self.assertDictEqual( {'not_found': 'nope'}, - utils.traverse_dict_and_list( + salt.utils.traverse_dict_and_list( test_two_level_dict_and_list, 'foo:bar', {'not_found': 'nope'} ) ) # Check index 1 (2nd element) of list corresponding to path 'foo' self.assertEqual( 'baz', - utils.traverse_dict_and_list( + salt.utils.traverse_dict_and_list( test_two_level_dict_and_list, 'foo:1', {'not_found': 'not_found'} ) ) # Traverse a couple times into dicts embedded in lists self.assertEqual( 'sit', - utils.traverse_dict_and_list( + salt.utils.traverse_dict_and_list( test_two_level_dict_and_list, 'foo:lorem:ipsum:dolor', {'not_found': 'not_found'} ) ) - def test_clean_kwargs(self): - self.assertDictEqual(utils.clean_kwargs(foo='bar'), {'foo': 'bar'}) - self.assertDictEqual(utils.clean_kwargs(__pub_foo='bar'), {}) - self.assertDictEqual(utils.clean_kwargs(__foo_bar='gwar'), {}) - self.assertDictEqual(utils.clean_kwargs(foo_bar='gwar'), {'foo_bar': 'gwar'}) - def test_sanitize_win_path_string(self): p = '\\windows\\system' - self.assertEqual(utils.sanitize_win_path_string('\\windows\\system'), '\\windows\\system') - self.assertEqual(utils.sanitize_win_path_string('\\bo:g|us\\p?at*h>'), '\\bo_g_us\\p_at_h_') + self.assertEqual(salt.utils.sanitize_win_path_string('\\windows\\system'), '\\windows\\system') + self.assertEqual(salt.utils.sanitize_win_path_string('\\bo:g|us\\p?at*h>'), '\\bo_g_us\\p_at_h_') def test_check_state_result(self): - self.assertFalse(utils.check_state_result(None), "Failed to handle None as an invalid data type.") - self.assertFalse(utils.check_state_result([]), "Failed to handle an invalid data type.") - self.assertFalse(utils.check_state_result({}), "Failed to handle an empty dictionary.") - self.assertFalse(utils.check_state_result({'host1': []}), "Failed to handle an invalid host data structure.") + self.assertFalse(salt.utils.check_state_result(None), "Failed to handle None as an invalid data type.") + self.assertFalse(salt.utils.check_state_result([]), "Failed to handle an invalid data type.") + self.assertFalse(salt.utils.check_state_result({}), "Failed to handle an empty dictionary.") + self.assertFalse(salt.utils.check_state_result({'host1': []}), "Failed to handle an invalid host data structure.") test_valid_state = {'host1': {'test_state': {'result': 'We have liftoff!'}}} - self.assertTrue(utils.check_state_result(test_valid_state)) + self.assertTrue(salt.utils.check_state_result(test_valid_state)) test_valid_false_states = { 'test1': OrderedDict([ ('host1', @@ -383,7 +345,7 @@ class UtilsTestCase(TestCase): } for test, data in six.iteritems(test_valid_false_states): self.assertFalse( - utils.check_state_result(data), + salt.utils.check_state_result(data), msg='{0} failed'.format(test)) test_valid_true_states = { 'test1': OrderedDict([ @@ -434,7 +396,7 @@ class UtilsTestCase(TestCase): } for test, data in six.iteritems(test_valid_true_states): self.assertTrue( - utils.check_state_result(data), + salt.utils.check_state_result(data), msg='{0} failed'.format(test)) test_invalid_true_ht_states = { 'test_onfail_simple2': ( @@ -610,7 +572,7 @@ class UtilsTestCase(TestCase): t_ = t_.split('_|-')[1] tdata['__id__'] = t_ self.assertFalse( - utils.check_state_result(data, highstate=ht), + salt.utils.check_state_result(data, highstate=ht), msg='{0} failed'.format(test)) test_valid_true_ht_states = { @@ -744,10 +706,10 @@ class UtilsTestCase(TestCase): t_ = t_.split('_|-')[1] tdata['__id__'] = t_ self.assertTrue( - utils.check_state_result(data, highstate=ht), + salt.utils.check_state_result(data, highstate=ht), msg='{0} failed'.format(test)) test_valid_false_state = {'host1': {'test_state': {'result': False}}} - self.assertFalse(utils.check_state_result(test_valid_false_state)) + self.assertFalse(salt.utils.check_state_result(test_valid_false_state)) @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not hasattr(zmq, 'IPC_PATH_MAX_LEN'), "ZMQ does not have max length support.") @@ -756,40 +718,33 @@ class UtilsTestCase(TestCase): Ensure we throw an exception if we have a too-long IPC URI ''' with patch('zmq.IPC_PATH_MAX_LEN', 1): - self.assertRaises(SaltSystemExit, utils.zeromq.check_ipc_path_max_len, '1' * 1024) + self.assertRaises(SaltSystemExit, salt.utils.zeromq.check_ipc_path_max_len, '1' * 1024) def test_test_mode(self): - self.assertTrue(utils.test_mode(test=True)) - self.assertTrue(utils.test_mode(Test=True)) - self.assertTrue(utils.test_mode(tEsT=True)) + self.assertTrue(salt.utils.test_mode(test=True)) + self.assertTrue(salt.utils.test_mode(Test=True)) + self.assertTrue(salt.utils.test_mode(tEsT=True)) def test_option(self): test_two_level_dict = {'foo': {'bar': 'baz'}} - self.assertDictEqual({'not_found': 'nope'}, utils.option('foo:bar', {'not_found': 'nope'})) - self.assertEqual('baz', utils.option('foo:bar', {'not_found': 'nope'}, opts=test_two_level_dict)) - self.assertEqual('baz', utils.option('foo:bar', {'not_found': 'nope'}, pillar={'master': test_two_level_dict})) - self.assertEqual('baz', utils.option('foo:bar', {'not_found': 'nope'}, pillar=test_two_level_dict)) + self.assertDictEqual({'not_found': 'nope'}, salt.utils.option('foo:bar', {'not_found': 'nope'})) + self.assertEqual('baz', salt.utils.option('foo:bar', {'not_found': 'nope'}, opts=test_two_level_dict)) + self.assertEqual('baz', salt.utils.option('foo:bar', {'not_found': 'nope'}, pillar={'master': test_two_level_dict})) + self.assertEqual('baz', salt.utils.option('foo:bar', {'not_found': 'nope'}, pillar=test_two_level_dict)) def test_get_hash_exception(self): - self.assertRaises(ValueError, utils.get_hash, '/tmp/foo/', form='INVALID') - - def test_parse_kwarg(self): - ret = args.parse_kwarg('foo=bar') - self.assertEqual(ret, ('foo', 'bar')) - - ret = args.parse_kwarg('foobar') - self.assertEqual(ret, (None, None)) + self.assertRaises(ValueError, salt.utils.get_hash, '/tmp/foo/', form='INVALID') @skipIf(NO_MOCK, NO_MOCK_REASON) def test_date_cast(self): now = datetime.datetime.now() with patch('datetime.datetime'): datetime.datetime.now.return_value = now - self.assertEqual(now, utils.date_cast(None)) - self.assertEqual(now, utils.date_cast(now)) + self.assertEqual(now, salt.utils.date_cast(None)) + self.assertEqual(now, salt.utils.date_cast(now)) try: - ret = utils.date_cast('Mon Dec 23 10:19:15 MST 2013') + ret = salt.utils.date_cast('Mon Dec 23 10:19:15 MST 2013') expected_ret = datetime.datetime(2013, 12, 23, 10, 19, 15) self.assertEqual(ret, expected_ret) except RuntimeError: @@ -804,49 +759,49 @@ class UtilsTestCase(TestCase): expected_ret = '2002-12-25' src = datetime.datetime(2002, 12, 25, 12, 00, 00, 00) - ret = utils.date_format(src) + ret = salt.utils.date_format(src) self.assertEqual(ret, expected_ret) src = '2002/12/25' - ret = utils.date_format(src) + ret = salt.utils.date_format(src) self.assertEqual(ret, expected_ret) src = 1040814000 - ret = utils.date_format(src) + ret = salt.utils.date_format(src) self.assertEqual(ret, expected_ret) src = '1040814000' - ret = utils.date_format(src) + ret = salt.utils.date_format(src) self.assertEqual(ret, expected_ret) def test_yaml_dquote(self): for teststr in (r'"\ []{}"',): - self.assertEqual(teststr, yaml.safe_load(utils.yamlencoding.yaml_dquote(teststr))) + self.assertEqual(teststr, yaml.safe_load(salt.utils.yamlencoding.yaml_dquote(teststr))) def test_yaml_dquote_doesNotAddNewLines(self): teststr = '"' * 100 - self.assertNotIn('\n', utils.yamlencoding.yaml_dquote(teststr)) + self.assertNotIn('\n', salt.utils.yamlencoding.yaml_dquote(teststr)) def test_yaml_squote(self): - ret = utils.yamlencoding.yaml_squote(r'"') + ret = salt.utils.yamlencoding.yaml_squote(r'"') self.assertEqual(ret, r"""'"'""") def test_yaml_squote_doesNotAddNewLines(self): teststr = "'" * 100 - self.assertNotIn('\n', utils.yamlencoding.yaml_squote(teststr)) + self.assertNotIn('\n', salt.utils.yamlencoding.yaml_squote(teststr)) def test_yaml_encode(self): for testobj in (None, True, False, '[7, 5]', '"monkey"', 5, 7.5, "2014-06-02 15:30:29.7"): - self.assertEqual(testobj, yaml.safe_load(utils.yamlencoding.yaml_encode(testobj))) + self.assertEqual(testobj, yaml.safe_load(salt.utils.yamlencoding.yaml_encode(testobj))) for testobj in ({}, [], set()): - self.assertRaises(TypeError, utils.yamlencoding.yaml_encode, testobj) + self.assertRaises(TypeError, salt.utils.yamlencoding.yaml_encode, testobj) def test_compare_dicts(self): - ret = utils.compare_dicts(old={'foo': 'bar'}, new={'foo': 'bar'}) + ret = salt.utils.compare_dicts(old={'foo': 'bar'}, new={'foo': 'bar'}) self.assertEqual(ret, {}) - ret = utils.compare_dicts(old={'foo': 'bar'}, new={'foo': 'woz'}) + ret = salt.utils.compare_dicts(old={'foo': 'bar'}, new={'foo': 'woz'}) expected_ret = {'foo': {'new': 'woz', 'old': 'bar'}} self.assertDictEqual(ret, expected_ret) @@ -857,14 +812,14 @@ class UtilsTestCase(TestCase): sys_mock = create_autospec(_test_spec) test_functions = {'test_module.test_spec': sys_mock} - ret = utils.argspec_report(test_functions, 'test_module.test_spec') + ret = salt.utils.argspec_report(test_functions, 'test_module.test_spec') self.assertDictEqual(ret, {'test_module.test_spec': {'kwargs': True, 'args': None, 'defaults': None, 'varargs': True}}) def test_decode_list(self): test_data = [u'unicode_str', [u'unicode_item_in_list', 'second_item_in_list'], {'dict_key': u'dict_val'}] expected_ret = ['unicode_str', ['unicode_item_in_list', 'second_item_in_list'], {'dict_key': 'dict_val'}] - ret = utils.decode_list(test_data) + ret = salt.utils.decode_list(test_data) self.assertEqual(ret, expected_ret) def test_decode_dict(self): @@ -874,7 +829,7 @@ class UtilsTestCase(TestCase): expected_ret = {'test_unicode_key': 'test_unicode_val', 'test_list_key': ['list_1', 'unicode_list_two'], 'test_dict_key': {'test_sub_dict_key': 'test_sub_dict_val'}} - ret = utils.decode_dict(test_data) + ret = salt.utils.decode_dict(test_data) self.assertDictEqual(ret, expected_ret) def test_find_json(self): @@ -910,32 +865,16 @@ class UtilsTestCase(TestCase): 'Abbrev': 'ISO 8879:1986', 'ID': 'SGML'}}, 'title': 'S'}, 'title': 'example glossary'}} # First test the valid JSON - ret = utils.find_json(test_sample_json) + ret = salt.utils.find_json(test_sample_json) self.assertDictEqual(ret, expected_ret) # Now pre-pend some garbage and re-test - garbage_prepend_json = '{0}{1}'.format(LORUM_IPSUM, test_sample_json) - ret = utils.find_json(garbage_prepend_json) + garbage_prepend_json = '{0}{1}'.format(LOREM_IPSUM, test_sample_json) + ret = salt.utils.find_json(garbage_prepend_json) self.assertDictEqual(ret, expected_ret) # Test to see if a ValueError is raised if no JSON is passed in - self.assertRaises(ValueError, utils.find_json, LORUM_IPSUM) - - def test_is_bin_str(self): - self.assertFalse(utils.is_bin_str(LORUM_IPSUM)) - - zero_str = '{0}{1}'.format(LORUM_IPSUM, '\0') - self.assertTrue(utils.is_bin_str(zero_str)) - - # To to ensure safe exit if str passed doesn't evaluate to True - self.assertFalse(utils.is_bin_str('')) - - nontext = 3 * (''.join([chr(x) for x in range(1, 32) if x not in (8, 9, 10, 12, 13)])) - almost_bin_str = '{0}{1}'.format(LORUM_IPSUM[:100], nontext[:42]) - self.assertFalse(utils.is_bin_str(almost_bin_str)) - - bin_str = almost_bin_str + '\x01' - self.assertTrue(utils.is_bin_str(bin_str)) + self.assertRaises(ValueError, salt.utils.find_json, LOREM_IPSUM) def test_repack_dict(self): list_of_one_element_dicts = [{'dict_key_1': 'dict_val_1'}, @@ -944,26 +883,26 @@ class UtilsTestCase(TestCase): expected_ret = {'dict_key_1': 'dict_val_1', 'dict_key_2': 'dict_val_2', 'dict_key_3': 'dict_val_3'} - ret = utils.repack_dictlist(list_of_one_element_dicts) + ret = salt.utils.repack_dictlist(list_of_one_element_dicts) self.assertDictEqual(ret, expected_ret) # Try with yaml yaml_key_val_pair = '- key1: val1' - ret = utils.repack_dictlist(yaml_key_val_pair) + ret = salt.utils.repack_dictlist(yaml_key_val_pair) self.assertDictEqual(ret, {'key1': 'val1'}) # Make sure we handle non-yaml junk data - ret = utils.repack_dictlist(LORUM_IPSUM) + ret = salt.utils.repack_dictlist(LOREM_IPSUM) self.assertDictEqual(ret, {}) def test_get_colors(self): - ret = utils.get_colors() + ret = salt.utils.get_colors() self.assertEqual('\x1b[0;37m', str(ret['LIGHT_GRAY'])) - ret = utils.get_colors(use=False) + ret = salt.utils.get_colors(use=False) self.assertDictContainsSubset({'LIGHT_GRAY': ''}, ret) - ret = utils.get_colors(use='LIGHT_GRAY') + ret = salt.utils.get_colors(use='LIGHT_GRAY') # LIGHT_YELLOW now == LIGHT_GRAY self.assertEqual(str(ret['LIGHT_YELLOW']), str(ret['LIGHT_GRAY'])) @@ -971,119 +910,32 @@ class UtilsTestCase(TestCase): def test_daemonize_if(self): # pylint: disable=assignment-from-none with patch('sys.argv', ['salt-call']): - ret = utils.daemonize_if({}) + ret = salt.utils.daemonize_if({}) self.assertEqual(None, ret) - ret = utils.daemonize_if({'multiprocessing': False}) + ret = salt.utils.daemonize_if({'multiprocessing': False}) self.assertEqual(None, ret) with patch('sys.platform', 'win'): - ret = utils.daemonize_if({}) + ret = salt.utils.daemonize_if({}) self.assertEqual(None, ret) with patch('salt.utils.daemonize'): - utils.daemonize_if({}) - self.assertTrue(utils.daemonize.called) + salt.utils.daemonize_if({}) + self.assertTrue(salt.utils.daemonize.called) # pylint: enable=assignment-from-none - @skipIf(NO_MOCK, NO_MOCK_REASON) - def test_which_bin(self): - ret = utils.which_bin('str') - self.assertIs(None, ret) - - test_exes = ['ls', 'echo'] - with patch('salt.utils.which', return_value='/tmp/dummy_path'): - ret = utils.which_bin(test_exes) - self.assertEqual(ret, '/tmp/dummy_path') - - ret = utils.which_bin([]) - self.assertIs(None, ret) - - with patch('salt.utils.which', return_value=''): - ret = utils.which_bin(test_exes) - self.assertIs(None, ret) - @skipIf(NO_MOCK, NO_MOCK_REASON) def test_gen_jid(self): now = datetime.datetime(2002, 12, 25, 12, 00, 00, 00) with patch('datetime.datetime'): datetime.datetime.now.return_value = now - ret = utils.jid.gen_jid() + ret = salt.utils.jid.gen_jid() self.assertEqual(ret, '20021225120000000000') @skipIf(NO_MOCK, NO_MOCK_REASON) def test_check_or_die(self): - self.assertRaises(CommandNotFoundError, utils.check_or_die, None) + self.assertRaises(CommandNotFoundError, salt.utils.check_or_die, None) - with patch('salt.utils.which', return_value=False): - self.assertRaises(CommandNotFoundError, utils.check_or_die, 'FAKE COMMAND') - - @skipIf(NO_MOCK, NO_MOCK_REASON) - def test_compare_versions(self): - ret = utils.compare_versions('1.0', '==', '1.0') - self.assertTrue(ret) - - ret = utils.compare_versions('1.0', '!=', '1.0') - self.assertFalse(ret) - - with patch('salt.utils.log') as log_mock: - ret = utils.compare_versions('1.0', 'HAH I AM NOT A COMP OPERATOR! I AM YOUR FATHER!', '1.0') - self.assertTrue(log_mock.error.called) - - def test_kwargs_warn_until(self): - # Test invalid version arg - self.assertRaises(RuntimeError, utils.kwargs_warn_until, {}, []) - - def test_to_str(self): - for x in (123, (1, 2, 3), [1, 2, 3], {1: 23}, None): - self.assertRaises(TypeError, utils.to_str, x) - if six.PY3: - self.assertEqual(utils.to_str('plugh'), 'plugh') - self.assertEqual(utils.to_str('áéíóúý', 'utf-8'), 'áéíóúý') - un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string - ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) - self.assertEqual(utils.to_str(ut, 'utf-8'), un) - self.assertEqual(utils.to_str(bytearray(ut), 'utf-8'), un) - # Test situation when a minion returns incorrect utf-8 string because of... million reasons - ut2 = b'\x9c' - self.assertEqual(utils.to_str(ut2, 'utf-8'), u'\ufffd') - self.assertEqual(utils.to_str(bytearray(ut2), 'utf-8'), u'\ufffd') - else: - self.assertEqual(utils.to_str('plugh'), 'plugh') - self.assertEqual(utils.to_str(u'áéíóúý', 'utf-8'), 'áéíóúý') - un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' - ut = '\xe4\xb8\xad\xe5\x9b\xbd\xe8\xaa\x9e (\xe7\xb9\x81\xe4\xbd\x93)' - self.assertEqual(utils.to_str(un, 'utf-8'), ut) - self.assertEqual(utils.to_str(bytearray(ut), 'utf-8'), ut) - - def test_to_bytes(self): - for x in (123, (1, 2, 3), [1, 2, 3], {1: 23}, None): - self.assertRaises(TypeError, utils.to_bytes, x) - if six.PY3: - self.assertEqual(utils.to_bytes('xyzzy'), b'xyzzy') - ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) - un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string - self.assertEqual(utils.to_bytes(ut), ut) - self.assertEqual(utils.to_bytes(bytearray(ut)), ut) - self.assertEqual(utils.to_bytes(un, 'utf-8'), ut) - else: - self.assertEqual(utils.to_bytes('xyzzy'), 'xyzzy') - ut = ''.join([chr(x) for x in (0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)]) - un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string - self.assertEqual(utils.to_bytes(ut), ut) - self.assertEqual(utils.to_bytes(bytearray(ut)), ut) - self.assertEqual(utils.to_bytes(un, 'utf-8'), ut) - - def test_to_unicode(self): - if six.PY3: - self.assertEqual(utils.to_unicode('plugh'), 'plugh') - self.assertEqual(utils.to_unicode('áéíóúý'), 'áéíóúý') - un = '\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' # pylint: disable=anomalous-unicode-escape-in-string - ut = bytes((0xe4, 0xb8, 0xad, 0xe5, 0x9b, 0xbd, 0xe8, 0xaa, 0x9e, 0x20, 0x28, 0xe7, 0xb9, 0x81, 0xe4, 0xbd, 0x93, 0x29)) - self.assertEqual(utils.to_unicode(ut, 'utf-8'), un) - self.assertEqual(utils.to_unicode(bytearray(ut), 'utf-8'), un) - else: - self.assertEqual(utils.to_unicode('xyzzy', 'utf-8'), u'xyzzy') - ut = '\xe4\xb8\xad\xe5\x9b\xbd\xe8\xaa\x9e (\xe7\xb9\x81\xe4\xbd\x93)' - un = u'\u4e2d\u56fd\u8a9e (\u7e41\u4f53)' - self.assertEqual(utils.to_unicode(ut, 'utf-8'), un) + with patch('salt.utils.path.which', return_value=False): + self.assertRaises(CommandNotFoundError, salt.utils.check_or_die, 'FAKE COMMAND') diff --git a/tests/unit/utils/test_versions.py b/tests/unit/utils/test_versions.py index bc6a665ba6..d6b6f0f165 100644 --- a/tests/unit/utils/test_versions.py +++ b/tests/unit/utils/test_versions.py @@ -10,15 +10,20 @@ # Import python libs from __future__ import absolute_import +import sys +import warnings # Import Salt Testing libs -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf +from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON # Import Salt libs +import salt.version +import salt.utils.versions from salt.utils.versions import LooseVersion, StrictVersion # Import 3rd-party libs -import salt.ext.six as six +from salt.ext import six if six.PY2: cmp_method = '__cmp__' @@ -89,3 +94,162 @@ class VersionTestCase(TestCase): self.assertEqual(res, wanted, 'cmp(%s, %s) should be %s, got %s' % (v1, v2, wanted, res)) + + +class VersionFuncsTestCase(TestCase): + + @skipIf(NO_MOCK, NO_MOCK_REASON) + def test_compare(self): + ret = salt.utils.versions.compare('1.0', '==', '1.0') + self.assertTrue(ret) + + ret = salt.utils.versions.compare('1.0', '!=', '1.0') + self.assertFalse(ret) + + with patch.object(salt.utils.versions, 'log') as log_mock: + ret = salt.utils.versions.compare('1.0', 'HAH I AM NOT A COMP OPERATOR! I AM YOUR FATHER!', '1.0') + self.assertTrue(log_mock.error.called) + + def test_kwargs_warn_until(self): + # Test invalid version arg + self.assertRaises(RuntimeError, salt.utils.versions.kwargs_warn_until, {}, []) + + def test_warn_until_warning_raised(self): + # We *always* want *all* warnings thrown on this module + warnings.filterwarnings('always', '', DeprecationWarning, __name__) + + def raise_warning(_version_info_=(0, 16, 0)): + salt.utils.versions.warn_until( + (0, 17), 'Deprecation Message!', + _version_info_=_version_info_ + + ) + + def raise_named_version_warning(_version_info_=(0, 16, 0)): + salt.utils.versions.warn_until( + 'Hydrogen', 'Deprecation Message!', + _version_info_=_version_info_ + ) + + # raise_warning should show warning until version info is >= (0, 17) + with warnings.catch_warnings(record=True) as recorded_warnings: + raise_warning() + self.assertEqual( + 'Deprecation Message!', str(recorded_warnings[0].message) + ) + + # raise_warning should show warning until version info is >= (0, 17) + with warnings.catch_warnings(record=True) as recorded_warnings: + raise_named_version_warning() + self.assertEqual( + 'Deprecation Message!', str(recorded_warnings[0].message) + ) + + # the deprecation warning is not issued because we passed + # _dont_call_warning + with warnings.catch_warnings(record=True) as recorded_warnings: + salt.utils.versions.warn_until( + (0, 17), 'Foo', _dont_call_warnings=True, + _version_info_=(0, 16) + ) + self.assertEqual(0, len(recorded_warnings)) + + # Let's set version info to (0, 17), a RuntimeError should be raised + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'0.17.0 is released. Current version is now 0.17.0. ' + r'Please remove the warning.'): + raise_warning(_version_info_=(0, 17, 0)) + + # Let's set version info to (0, 17), a RuntimeError should be raised + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'(.*) is released. Current version is now ' + r'([\d.]+). Please remove the warning.'): + raise_named_version_warning(_version_info_=(getattr(sys, 'maxint', None) or getattr(sys, 'maxsize'), 16, 0)) + + # Even though we're calling warn_until, we pass _dont_call_warnings + # because we're only after the RuntimeError + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'0.17.0 is released. Current version is now ' + r'(.*). Please remove the warning.'): + salt.utils.versions.warn_until( + (0, 17), 'Foo', _dont_call_warnings=True + ) + + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'(.*) is released. Current version is now ' + r'(.*). Please remove the warning.'): + salt.utils.versions.warn_until( + 'Hydrogen', 'Foo', _dont_call_warnings=True, + _version_info_=(getattr(sys, 'maxint', None) or getattr(sys, 'maxsize'), 16, 0) + ) + + # version on the deprecation message gets properly formatted + with warnings.catch_warnings(record=True) as recorded_warnings: + vrs = salt.version.SaltStackVersion.from_name('Helium') + salt.utils.versions.warn_until( + 'Helium', 'Deprecation Message until {version}!', + _version_info_=(vrs.major - 1, 0) + ) + self.assertEqual( + 'Deprecation Message until {0}!'.format(vrs.formatted_version), + str(recorded_warnings[0].message) + ) + + def test_kwargs_warn_until_warning_raised(self): + # We *always* want *all* warnings thrown on this module + warnings.filterwarnings('always', '', DeprecationWarning, __name__) + + def raise_warning(**kwargs): + _version_info_ = kwargs.pop('_version_info_', (0, 16, 0)) + salt.utils.versions.kwargs_warn_until( + kwargs, + (0, 17), + _version_info_=_version_info_ + ) + + # raise_warning({...}) should show warning until version info is >= (0, 17) + with warnings.catch_warnings(record=True) as recorded_warnings: + raise_warning(foo=42) # with a kwarg + self.assertEqual( + 'The following parameter(s) have been deprecated and ' + 'will be removed in \'0.17.0\': \'foo\'.', + str(recorded_warnings[0].message) + ) + # With no **kwargs, should not show warning until version info is >= (0, 17) + with warnings.catch_warnings(record=True) as recorded_warnings: + salt.utils.versions.kwargs_warn_until( + {}, # no kwargs + (0, 17), + _version_info_=(0, 16, 0) + ) + self.assertEqual(0, len(recorded_warnings)) + + # Let's set version info to (0, 17), a RuntimeError should be raised + # regardless of whether or not we pass any **kwargs. + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'0.17.0 is released. Current version is now 0.17.0. ' + r'Please remove the warning.'): + raise_warning(_version_info_=(0, 17)) # no kwargs + + with self.assertRaisesRegex( + RuntimeError, + r'The warning triggered on filename \'(.*)test_versions.py\', ' + r'line number ([\d]+), is supposed to be shown until version ' + r'0.17.0 is released. Current version is now 0.17.0. ' + r'Please remove the warning.'): + raise_warning(bar='baz', qux='quux', _version_info_=(0, 17)) # some kwargs diff --git a/tests/unit/utils/test_vt.py b/tests/unit/utils/test_vt.py index a828ae3f15..b65f904c5c 100644 --- a/tests/unit/utils/test_vt.py +++ b/tests/unit/utils/test_vt.py @@ -9,7 +9,7 @@ VirtualTerminal tests ''' -# Import python libs +# Import Python libs from __future__ import absolute_import import os import sys @@ -20,9 +20,9 @@ import time # Import Salt Testing libs from tests.support.unit import TestCase, skipIf -# Import salt libs -import salt.utils +# Import Salt libs import salt.utils.files +import salt.utils.platform import salt.utils.vt # Import 3rd-party libs @@ -72,7 +72,7 @@ class VTTestCase(TestCase): stdout, _ = proc.communicate() return int(stdout.strip()) except (ValueError, OSError, IOError): - if salt.utils.is_darwin(): + if salt.utils.platform.is_darwin(): # We're unable to findout how many PTY's are open self.skipTest( 'Unable to find out how many PTY\'s are open on Darwin - ' diff --git a/tests/unit/utils/test_warnings.py b/tests/unit/utils/test_warnings.py deleted file mode 100644 index e658def504..0000000000 --- a/tests/unit/utils/test_warnings.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- -''' - :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` - - - tests.unit.utils.test_warnings - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Test ``salt.utils.warn_until`` and ``salt.utils.kwargs_warn_until`` -''' - -# Import python libs -from __future__ import absolute_import -import sys -import warnings - -# Import Salt Testing libs -from tests.support.unit import TestCase - -# Import salt libs -from salt.utils import warn_until, kwargs_warn_until -from salt.version import SaltStackVersion - - -class WarnUntilTestCase(TestCase): - - def test_warn_until_warning_raised(self): - # We *always* want *all* warnings thrown on this module - warnings.filterwarnings('always', '', DeprecationWarning, __name__) - - def raise_warning(_version_info_=(0, 16, 0)): - warn_until( - (0, 17), 'Deprecation Message!', - _version_info_=_version_info_ - - ) - - def raise_named_version_warning(_version_info_=(0, 16, 0)): - warn_until( - 'Hydrogen', 'Deprecation Message!', - _version_info_=_version_info_ - ) - - # raise_warning should show warning until version info is >= (0, 17) - with warnings.catch_warnings(record=True) as recorded_warnings: - raise_warning() - self.assertEqual( - 'Deprecation Message!', str(recorded_warnings[0].message) - ) - - # raise_warning should show warning until version info is >= (0, 17) - with warnings.catch_warnings(record=True) as recorded_warnings: - raise_named_version_warning() - self.assertEqual( - 'Deprecation Message!', str(recorded_warnings[0].message) - ) - - # the deprecation warning is not issued because we passed - # _dont_call_warning - with warnings.catch_warnings(record=True) as recorded_warnings: - warn_until( - (0, 17), 'Foo', _dont_call_warnings=True, - _version_info_=(0, 16) - ) - self.assertEqual(0, len(recorded_warnings)) - - # Let's set version info to (0, 17), a RuntimeError should be raised - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'0.17.0 is released. Current version is now 0.17.0. ' - r'Please remove the warning.'): - raise_warning(_version_info_=(0, 17, 0)) - - # Let's set version info to (0, 17), a RuntimeError should be raised - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'(.*) is released. Current version is now ' - r'([\d.]+). Please remove the warning.'): - raise_named_version_warning(_version_info_=(getattr(sys, 'maxint', None) or getattr(sys, 'maxsize'), 16, 0)) - - # Even though we're calling warn_until, we pass _dont_call_warnings - # because we're only after the RuntimeError - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'0.17.0 is released. Current version is now ' - r'(.*). Please remove the warning.'): - warn_until( - (0, 17), 'Foo', _dont_call_warnings=True - ) - - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'(.*) is released. Current version is now ' - r'(.*). Please remove the warning.'): - warn_until( - 'Hydrogen', 'Foo', _dont_call_warnings=True, - _version_info_=(getattr(sys, 'maxint', None) or getattr(sys, 'maxsize'), 16, 0) - ) - - # version on the deprecation message gets properly formatted - with warnings.catch_warnings(record=True) as recorded_warnings: - vrs = SaltStackVersion.from_name('Helium') - warn_until( - 'Helium', 'Deprecation Message until {version}!', - _version_info_=(vrs.major - 1, 0) - ) - self.assertEqual( - 'Deprecation Message until {0}!'.format(vrs.formatted_version), - str(recorded_warnings[0].message) - ) - - def test_kwargs_warn_until_warning_raised(self): - # We *always* want *all* warnings thrown on this module - warnings.filterwarnings('always', '', DeprecationWarning, __name__) - - def raise_warning(**kwargs): - _version_info_ = kwargs.pop('_version_info_', (0, 16, 0)) - kwargs_warn_until( - kwargs, - (0, 17), - _version_info_=_version_info_ - ) - - # raise_warning({...}) should show warning until version info is >= (0, 17) - with warnings.catch_warnings(record=True) as recorded_warnings: - raise_warning(foo=42) # with a kwarg - self.assertEqual( - 'The following parameter(s) have been deprecated and ' - 'will be removed in \'0.17.0\': \'foo\'.', - str(recorded_warnings[0].message) - ) - # With no **kwargs, should not show warning until version info is >= (0, 17) - with warnings.catch_warnings(record=True) as recorded_warnings: - kwargs_warn_until( - {}, # no kwargs - (0, 17), - _version_info_=(0, 16, 0) - ) - self.assertEqual(0, len(recorded_warnings)) - - # Let's set version info to (0, 17), a RuntimeError should be raised - # regardless of whether or not we pass any **kwargs. - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'0.17.0 is released. Current version is now 0.17.0. ' - r'Please remove the warning.'): - raise_warning(_version_info_=(0, 17)) # no kwargs - - with self.assertRaisesRegex( - RuntimeError, - r'The warning triggered on filename \'(.*)test_warnings.py\', ' - r'line number ([\d]+), is supposed to be shown until version ' - r'0.17.0 is released. Current version is now 0.17.0. ' - r'Please remove the warning.'): - raise_warning(bar='baz', qux='quux', _version_info_=(0, 17)) # some kwargs diff --git a/tests/unit/utils/test_which.py b/tests/unit/utils/test_which.py index 9ab674791d..4dfae90efe 100644 --- a/tests/unit/utils/test_which.py +++ b/tests/unit/utils/test_which.py @@ -9,29 +9,29 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch # Import salt libs -import salt.utils +import salt.utils.path @skipIf(NO_MOCK, NO_MOCK_REASON) class TestWhich(TestCase): ''' - Tests salt.utils.which function to ensure that it returns True as + Tests salt.utils.path.which function to ensure that it returns True as expected. ''' # The mock patch below will make sure that ALL calls to the which function # returns None def test_missing_binary_in_linux(self): - with patch('salt.utils.which', lambda exe: None): + with patch('salt.utils.path.which', lambda exe: None): self.assertTrue( - salt.utils.which('this-binary-does-not-exist') is None + salt.utils.path.which('this-binary-does-not-exist') is None ) # The mock patch below will make sure that ALL calls to the which function # return whatever is sent to it def test_existing_binary_in_linux(self): - with patch('salt.utils.which', lambda exe: exe): - self.assertTrue(salt.utils.which('this-binary-exists-under-linux')) + with patch('salt.utils.path.which', lambda exe: exe): + self.assertTrue(salt.utils.path.which('this-binary-exists-under-linux')) def test_existing_binary_in_windows(self): with patch('os.access') as osaccess: @@ -50,10 +50,10 @@ class TestWhich(TestCase): # Let's patch os.environ to provide a custom PATH variable with patch.dict(os.environ, {'PATH': '/bin'}): # Let's also patch is_windows to return True - with patch('salt.utils.is_windows', lambda: True): + with patch('salt.utils.platform.is_windows', lambda: True): with patch('os.path.isfile', lambda x: True): self.assertEqual( - salt.utils.which('this-binary-exists-under-windows'), + salt.utils.path.which('this-binary-exists-under-windows'), # The returned path should return the .exe suffix '/bin/this-binary-exists-under-windows.EXE' ) @@ -72,11 +72,11 @@ class TestWhich(TestCase): # Let's patch os.environ to provide a custom PATH variable with patch.dict(os.environ, {'PATH': '/bin'}): # Let's also patch is_widows to return True - with patch('salt.utils.is_windows', lambda: True): + with patch('salt.utils.platform.is_windows', lambda: True): self.assertEqual( # Since we're passing the .exe suffix, the last True above # will not matter. The result will be None - salt.utils.which('this-binary-is-missing-in-windows.exe'), + salt.utils.path.which('this-binary-is-missing-in-windows.exe'), None ) @@ -102,10 +102,10 @@ class TestWhich(TestCase): 'PATHEXT': '.COM;.EXE;.BAT;.CMD;.VBS;' '.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY'}): # Let's also patch is_windows to return True - with patch('salt.utils.is_windows', lambda: True): + with patch('salt.utils.platform.is_windows', lambda: True): with patch('os.path.isfile', lambda x: True): self.assertEqual( - salt.utils.which('this-binary-exists-under-windows'), + salt.utils.path.which('this-binary-exists-under-windows'), # The returned path should return the .exe suffix '/bin/this-binary-exists-under-windows.CMD' ) diff --git a/tests/unit/utils/test_yamlloader.py b/tests/unit/utils/test_yamlloader.py index da25c0b820..dd7870925c 100644 --- a/tests/unit/utils/test_yamlloader.py +++ b/tests/unit/utils/test_yamlloader.py @@ -5,6 +5,7 @@ # Import python libs from __future__ import absolute_import +import textwrap # Import Salt Libs from yaml.constructor import ConstructorError @@ -36,12 +37,11 @@ class YamlLoaderTestCase(TestCase): ''' Test parsing an ordinary path ''' - self.assertEqual( - self._render_yaml(b''' -p1: - - alpha - - beta'''), + self._render_yaml(textwrap.dedent('''\ + p1: + - alpha + - beta''')), {'p1': ['alpha', 'beta']} ) @@ -49,38 +49,37 @@ p1: ''' Test YAML anchors ''' - # Simple merge test self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v2: beta'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v2: beta''')), {'p1': {'v1': 'alpha'}, 'p2': {'v1': 'alpha', 'v2': 'beta'}} ) # Test that keys/nodes are overwritten self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v1: new_alpha'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v1: new_alpha''')), {'p1': {'v1': 'alpha'}, 'p2': {'v1': 'new_alpha'}} ) # Test merging of lists self.assertEqual( - self._render_yaml(b''' -p1: &p1 - v1: &v1 - - t1 - - t2 -p2: - v2: *v1'''), + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: &v1 + - t1 + - t2 + p2: + v2: *v1''')), {"p2": {"v2": ["t1", "t2"]}, "p1": {"v1": ["t1", "t2"]}} ) @@ -89,15 +88,27 @@ p2: Test that duplicates still throw an error ''' with self.assertRaises(ConstructorError): - self._render_yaml(b''' -p1: alpha -p1: beta''') + self._render_yaml(textwrap.dedent('''\ + p1: alpha + p1: beta''')) with self.assertRaises(ConstructorError): - self._render_yaml(b''' -p1: &p1 - v1: alpha -p2: - <<: *p1 - v2: beta - v2: betabeta''') + self._render_yaml(textwrap.dedent('''\ + p1: &p1 + v1: alpha + p2: + <<: *p1 + v2: beta + v2: betabeta''')) + + def test_yaml_with_unicode_literals(self): + ''' + Test proper loading of unicode literals + ''' + self.assertEqual( + self._render_yaml(textwrap.dedent('''\ + foo: + a: Д + b: {'a': u'\\u0414'}''')), + {'foo': {'a': u'\u0414', 'b': {'a': u'\u0414'}}} + ) diff --git a/tests/unit/utils/vmware_test/__init__.py b/tests/unit/utils/vmware/__init__.py similarity index 100% rename from tests/unit/utils/vmware_test/__init__.py rename to tests/unit/utils/vmware/__init__.py diff --git a/tests/unit/utils/vmware_test/test_cluster.py b/tests/unit/utils/vmware/test_cluster.py similarity index 91% rename from tests/unit/utils/vmware_test/test_cluster.py rename to tests/unit/utils/vmware/test_cluster.py index daf157709c..9a628c9b36 100644 --- a/tests/unit/utils/vmware_test/test_cluster.py +++ b/tests/unit/utils/vmware/test_cluster.py @@ -178,6 +178,18 @@ class CreateClusterTestCase(TestCase): self.mock_create_cluster_ex.assert_called_once_with( 'fake_cluster', self.mock_cluster_spec) + def test_create_cluster_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_dc.hostFolder.CreateClusterEx = MagicMock( + side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_cluster(self.mock_dc, 'fake_cluster', + self.mock_cluster_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_create_cluster_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -234,6 +246,17 @@ class UpdateClusterTestCase(TestCase): self.mock_reconfigure_compute_resource_task.assert_called_once_with( self.mock_cluster_spec, modify=True) + def test_reconfigure_compute_resource_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_cluster.ReconfigureComputeResource_Task = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vmware.update_cluster(self.mock_cluster, self.mock_cluster_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_reconfigure_compute_resource_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' diff --git a/tests/unit/utils/vmware_test/test_common.py b/tests/unit/utils/vmware/test_common.py similarity index 88% rename from tests/unit/utils/vmware_test/test_common.py rename to tests/unit/utils/vmware/test_common.py index 0de56813e5..fd50ed12d0 100644 --- a/tests/unit/utils/vmware_test/test_common.py +++ b/tests/unit/utils/vmware/test_common.py @@ -43,6 +43,19 @@ class WaitForTaskTestCase(TestCase): patcher.start() self.addCleanup(patcher.stop) + def test_first_task_info_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + mock_task = MagicMock() + type(mock_task).info = PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.wait_for_task(mock_task, + 'fake_instance_name', + 'task_type') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_first_task_info_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -65,6 +78,22 @@ class WaitForTaskTestCase(TestCase): 'task_type') self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + def test_inner_loop_task_info_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + mock_task = MagicMock() + mock_info1 = MagicMock() + type(mock_task).info = PropertyMock( + side_effect=[mock_info1, exc]) + type(mock_info1).state = PropertyMock(side_effect=['running', 'bad']) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.wait_for_task(mock_task, + 'fake_instance_name', + 'task_type') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_inner_loop_task_info_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -161,6 +190,22 @@ class WaitForTaskTestCase(TestCase): 'task_type') self.assertEqual(str(excinfo.exception), 'error exc') + def test_info_error_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + mock_task = MagicMock() + prop_mock_state = PropertyMock(return_value='error') + prop_mock_error = PropertyMock(side_effect=exc) + type(mock_task.info).state = prop_mock_state + type(mock_task.info).error = prop_mock_error + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.wait_for_task(mock_task, + 'fake_instance_name', + 'task_type') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_info_error_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -658,6 +703,19 @@ class GetContentTestCase(TestCase): # check destroy is called self.assertEqual(self.destroy_mock.call_count, 1) + def test_create_container_view_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.si_mock.content.viewManager.CreateContainerView = \ + MagicMock(side_effect=exc) + with patch('salt.utils.vmware.get_root_folder', + self.get_root_folder_mock): + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_content(self.si_mock, self.obj_type_mock) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_create_container_view_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -680,6 +738,19 @@ class GetContentTestCase(TestCase): salt.utils.vmware.get_content(self.si_mock, self.obj_type_mock) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + def test_destroy_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.si_mock.content.viewManager.CreateContainerView = MagicMock( + return_value=MagicMock(Destroy=MagicMock(side_effect=exc))) + with patch('salt.utils.vmware.get_root_folder', + self.get_root_folder_mock): + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_content(self.si_mock, self.obj_type_mock) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_destroy_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -745,6 +816,17 @@ class GetContentTestCase(TestCase): [self.filter_spec_ret_mock]) self.assertEqual(ret, self.result_mock) + def test_retrieve_contents_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.si_mock.content.propertyCollector.RetrieveContents = \ + MagicMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_content(self.si_mock, self.obj_type_mock) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_retrieve_contents_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -789,6 +871,16 @@ class GetRootFolderTestCase(TestCase): self.mock_si = MagicMock( RetrieveContent=MagicMock(return_value=self.mock_content)) + def test_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_content).rootFolder = PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_root_folder(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' diff --git a/tests/unit/utils/vmware_test/test_connection.py b/tests/unit/utils/vmware/test_connection.py similarity index 93% rename from tests/unit/utils/vmware_test/test_connection.py rename to tests/unit/utils/vmware/test_connection.py index 812cb1231c..4a95e9b67f 100644 --- a/tests/unit/utils/vmware_test/test_connection.py +++ b/tests/unit/utils/vmware/test_connection.py @@ -21,7 +21,7 @@ import salt.exceptions as excs # Import Salt libraries import salt.utils.vmware # Import Third Party Libs -import salt.ext.six as six +from salt.ext import six try: from pyVmomi import vim, vmodl @@ -40,6 +40,11 @@ if sys.version_info[:3] > (2, 7, 8): else: SSL_VALIDATION = False +if hasattr(ssl, '_create_unverified_context'): + ssl_context = 'ssl._create_unverified_context' +else: + ssl_context = 'ssl._create_stdlib_context' + # Get Logging Started log = logging.getLogger(__name__) @@ -337,14 +342,14 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_successful_connection(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' mock_sc = MagicMock(side_effect=[exc, None]) mock_ssl = MagicMock() with patch('salt.utils.vmware.SmartConnect', mock_sc): - with patch('ssl._create_unverified_context', + with patch(ssl_context, mock_ssl): salt.utils.vmware._get_service_instance( @@ -378,7 +383,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_successful_connection(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed') @@ -387,9 +392,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): mock_ssl_context = MagicMock() with patch('salt.utils.vmware.SmartConnect', mock_sc): - with patch('ssl._create_unverified_context', - mock_ssl_unverif): - + with patch(ssl_context, mock_ssl_unverif): with patch('ssl.SSLContext', mock_ssl_context): salt.utils.vmware._get_service_instance( @@ -473,7 +476,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_unsuccsessful_connection_default_error(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('Exception') @@ -498,7 +501,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_second_attempt_unsuccsessful_connection_vim_fault(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = vim.fault.VimFault() @@ -523,7 +526,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_unsuccessful_connection_detault_error(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed') @@ -548,7 +551,7 @@ class PrivateGetServiceInstanceTestCase(TestCase): @skipIf(not SSL_VALIDATION, 'SSL validation is not enabled') def test_third_attempt_unsuccessful_connection_vim_fault(self): with patch('ssl.SSLContext', MagicMock()), \ - patch('ssl._create_unverified_context', MagicMock()): + patch(ssl_context, MagicMock()): exc = vim.fault.HostConnectFault() exc.msg = '[SSL: CERTIFICATE_VERIFY_FAILED]' exc2 = Exception('certificate verify failed') @@ -597,7 +600,7 @@ class GetServiceInstanceTestCase(TestCase): None) def test_no_cached_service_instance_same_host_on_proxy(self): - with patch('salt.utils.is_proxy', MagicMock(return_value=True)): + with patch('salt.utils.platform.is_proxy', MagicMock(return_value=True)): # Service instance is uncached when using class default mock objs mock_get_si = MagicMock() with patch('salt.utils.vmware._get_service_instance', mock_get_si): @@ -688,6 +691,26 @@ class GetServiceInstanceTestCase(TestCase): self.assertEqual(mock_disconnect.call_count, 1) self.assertEqual(mock_get_si.call_count, 2) + def test_current_time_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + with patch('salt.utils.vmware._get_service_instance', + MagicMock(return_value=MagicMock( + CurrentTime=MagicMock(side_effect=exc)))): + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_service_instance( + host='fake_host', + username='fake_username', + password='fake_password', + protocol='fake_protocol', + port=1, + mechanism='fake_mechanism', + principal='fake_principal', + domain='fake_domain') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_current_time_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -741,6 +764,17 @@ class DisconnectTestCase(TestCase): service_instance=self.mock_si) mock_disconnect.assert_called_once_with(self.mock_si) + def test_disconnect_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + with patch('salt.utils.vmware.Disconnect', MagicMock(side_effect=exc)): + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.disconnect( + service_instance=self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_disconnect_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' @@ -765,6 +799,17 @@ class DisconnectTestCase(TestCase): class IsConnectionToAVCenterTestCase(TestCase): '''Tests for salt.utils.vmware.is_connection_to_a_vcenter''' + def test_api_type_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + mock_si = MagicMock() + type(mock_si.content.about).apiType = PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.is_connection_to_a_vcenter(mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_api_type_raise_vim_fault(self): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' diff --git a/tests/unit/utils/vmware_test/test_datacenter.py b/tests/unit/utils/vmware/test_datacenter.py similarity index 92% rename from tests/unit/utils/vmware_test/test_datacenter.py rename to tests/unit/utils/vmware/test_datacenter.py index 86d1f0995a..c8045fb566 100644 --- a/tests/unit/utils/vmware_test/test_datacenter.py +++ b/tests/unit/utils/vmware/test_datacenter.py @@ -164,6 +164,19 @@ class CreateDatacenterTestCase(TestCase): vmware.create_datacenter(self.mock_si, 'fake_dc') self.mock_create_datacenter.assert_called_once_with('fake_dc') + def test_create_datacenter_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_root_folder = MagicMock( + CreateDatacenter=MagicMock(side_effect=exc)) + with patch('salt.utils.vmware.get_root_folder', + MagicMock(return_value=self.mock_root_folder)): + with self.assertRaises(VMwareApiError) as excinfo: + vmware.create_datacenter(self.mock_si, 'fake_dc') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + def test_create_datacenter_raise_vim_fault(self): exc = vim.VimFault() exc.msg = 'VimFault msg' diff --git a/tests/unit/utils/vmware_test/test_host.py b/tests/unit/utils/vmware/test_host.py similarity index 100% rename from tests/unit/utils/vmware_test/test_host.py rename to tests/unit/utils/vmware/test_host.py