diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 642eabb9eb..ff255b5aec 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,4 +12,10 @@ Remove this section if not relevant Yes/No +### Commits signed with GPG? + +Yes/No + Please review [Salt's Contributing Guide](https://docs.saltstack.com/en/latest/topics/development/contributing.html) for best practices. + +See GitHub's [page on GPG signing](https://help.github.com/articles/signing-commits-using-gpg/) for more information about signing commits with GPG. diff --git a/.github/stale.yml b/.github/stale.yml index 2aa60bdc61..798d9d32b2 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 950 is approximately 2 years and 7 months -daysUntilStale: 950 +# 910 is approximately 2 years and 6 months +daysUntilStale: 910 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 diff --git a/doc/ref/clouds/all/index.rst b/doc/ref/clouds/all/index.rst index 15fb4b1ae3..3ec40b0ed7 100644 --- a/doc/ref/clouds/all/index.rst +++ b/doc/ref/clouds/all/index.rst @@ -34,6 +34,7 @@ Full list of Salt Cloud modules scaleway softlayer softlayer_hw + vagrant virtualbox vmware vultrpy diff --git a/doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst b/doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst new file mode 100644 index 0000000000..ba3dcbe2d7 --- /dev/null +++ b/doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst @@ -0,0 +1,6 @@ +========================= +salt.cloud.clouds.vagrant +========================= + +.. automodule:: salt.cloud.clouds.vagrant + :members: diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index 21f7c93fa7..713c09ef7d 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -299,6 +299,7 @@ execution modules openstack_mng openvswitch opkg + opsgenie oracle osquery out diff --git a/doc/ref/modules/all/salt.modules.opsgenie.rst b/doc/ref/modules/all/salt.modules.opsgenie.rst new file mode 100644 index 0000000000..e16999297e --- /dev/null +++ b/doc/ref/modules/all/salt.modules.opsgenie.rst @@ -0,0 +1,6 @@ +=================== +salt.modules.opsgenie +=================== + +.. automodule:: salt.modules.opsgenie + :members: diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 0b681ace7e..b79d6068d7 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -188,6 +188,7 @@ state modules openstack_config openvswitch_bridge openvswitch_port + opsgenie pagerduty pagerduty_escalation_policy pagerduty_schedule @@ -267,6 +268,7 @@ state modules tuned uptime user + vagrant vault vbox_guest victorops diff --git a/doc/ref/states/all/salt.states.opsgenie.rst b/doc/ref/states/all/salt.states.opsgenie.rst new file mode 100644 index 0000000000..f5b0561f3e --- /dev/null +++ b/doc/ref/states/all/salt.states.opsgenie.rst @@ -0,0 +1,6 @@ +===================== +salt.states.opsgenie +===================== + +.. automodule:: salt.states.opsgenie + :members: diff --git a/doc/ref/states/all/salt.states.vagrant.rst b/doc/ref/states/all/salt.states.vagrant.rst new file mode 100644 index 0000000000..5d5b6e9f9c --- /dev/null +++ b/doc/ref/states/all/salt.states.vagrant.rst @@ -0,0 +1,6 @@ +=================== +salt.states.vagrant +=================== + +.. automodule:: salt.states.vagrant + :members: \ No newline at end of file diff --git a/doc/ref/states/parallel.rst b/doc/ref/states/parallel.rst index 8a69eba2df..9edf1750e4 100644 --- a/doc/ref/states/parallel.rst +++ b/doc/ref/states/parallel.rst @@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states in parallel. This is accomplished very easily by adding the ``parallel: True`` option to your state declaration: -.. code_block:: yaml +.. code-block:: yaml nginx: service.running: @@ -24,7 +24,7 @@ state to finish. Given this example: -.. code_block:: yaml +.. code-block:: yaml sleep 10: cmd.run: @@ -74,16 +74,16 @@ also complete. Things to be Careful of ======================= -Parallel States does not prevent you from creating parallel conflicts on your +Parallel States do not prevent you from creating parallel conflicts on your system. This means that if you start multiple package installs using Salt then the package manager will block or fail. If you attempt to manage the same file with multiple states in parallel then the result can produce an unexpected file. Make sure that the states you choose to run in parallel do not conflict, or -else, like in and parallel programming environment, the outcome may not be +else, like in any parallel programming environment, the outcome may not be what you expect. Doing things like just making all states run in parallel -will almost certinly result in unexpected behavior. +will almost certainly result in unexpected behavior. With that said, running states in parallel should be safe the vast majority of the time and the most likely culprit for unexpected behavior is running diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index 173ea4e692..e934a047d0 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -540,6 +540,17 @@ machines which are already installed, but not Salted. For more information about this driver and for configuration examples, please see the :ref:`Gettting Started with Saltify ` documentation. +.. _config_vagrant: + +Vagrant +------- + +The Vagrant driver is a new, experimental driver for controlling a VagrantBox +virtual machine, and installing Salt on it. The target host machine must be a +working salt minion, which is controlled via the salt master using salt-api. +For more information, see +:ref:`Getting Started With Vagrant `. + Extending Profiles and Cloud Providers Configuration ==================================================== diff --git a/doc/topics/cloud/features.rst b/doc/topics/cloud/features.rst index b067dc9a30..f270198dee 100644 --- a/doc/topics/cloud/features.rst +++ b/doc/topics/cloud/features.rst @@ -38,26 +38,30 @@ These are features that are available for almost every cloud host. .. container:: scrollable - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - | |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun| - | |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | | - +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+ - |Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + | |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun| + | |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | | + +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+ + |Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + |destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+ + +[1] Yes, if salt-api is enabled. + +[2] Always returns `{}`. Actions ======= @@ -70,46 +74,46 @@ instance name to be passed in. For example: .. container:: scrollable - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun| - | |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | | - +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+ - |attach_volume | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |create_attach_volumes |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |del_tags |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |delvol_on_destroy | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |detach_volume | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |disable_term_protect |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |enable_term_protect |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_tags |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |keepvol_on_destroy | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_keypairs | | |Yes | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |rename |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |set_tags |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_delvol_on_destroy | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_term_protect | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |take_action | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun| + | |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | | + +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+ + |attach_volume | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |create_attach_volumes |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |del_tags |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |delvol_on_destroy | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |detach_volume | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |disable_term_protect |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |enable_term_protect |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_tags |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |keepvol_on_destroy | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_keypairs | | |Yes | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |rename |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |set_tags |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_delvol_on_destroy | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_term_protect | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |take_action | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ Functions ========= @@ -122,81 +126,83 @@ require the name of the provider to be passed in. For example: .. container:: scrollable - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun| - | |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | | - +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+ - |block_device_mappings |Yes | | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |create_keypair | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |create_volume | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |delete_key | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |delete_keypair | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |delete_volume | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_image | | |Yes | | |Yes | | |Yes | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_ip | |Yes | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_key | |Yes | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_keyid | | |Yes | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_keypair | |Yes | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_networkid | |Yes | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_node | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_password | |Yes | | | | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_size | | |Yes | | |Yes | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_spot_config | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |get_subnetid | | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |iam_profile |Yes | | |Yes| | | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |import_key | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |key_list | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |keyname |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_availability_zones| | | |Yes| | | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_custom_images | | | | | | | | | | | |Yes | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_keys | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |list_vlans | | | | | | | | | | | |Yes |Yes | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |rackconnect | | | | | | | |Yes | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |reboot | | | |Yes| |Yes | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |reformat_node | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |securitygroup |Yes | | |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |securitygroupid | | | |Yes| | | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_image | | | |Yes| | | | |Yes | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_key | | | | | |Yes | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_keypair | | |Yes |Yes| | | | | | | | | | | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ - |show_volume | | | |Yes| | | | | | | | | |Yes | - +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+ + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun| + | |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | | + +=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+ + |block_device_mappings |Yes | | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |create_keypair | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |create_volume | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |delete_key | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |delete_keypair | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |delete_volume | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_image | | |Yes | | |Yes | | |Yes | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_ip | |Yes | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_key | |Yes | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_keyid | | |Yes | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_keypair | |Yes | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_networkid | |Yes | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_node | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_password | |Yes | | | | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_size | | |Yes | | |Yes | | | | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_spot_config | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |get_subnetid | | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |iam_profile |Yes | | |Yes| | | | | | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |import_key | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |key_list | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |keyname |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_availability_zones| | | |Yes| | | | | | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_custom_images | | | | | | | | | | | |Yes | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_keys | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |list_vlans | | | | | | | | | | | |Yes |Yes | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |rackconnect | | | | | | | |Yes | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |reboot | | | |Yes| |Yes | | | | |[1] | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |reformat_node | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |securitygroup |Yes | | |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |securitygroupid | | | |Yes| | | | | | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_image | | | |Yes| | | | |Yes | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_key | | | | | |Yes | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_keypair | | |Yes |Yes| | | | | | | | | | | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + |show_volume | | | |Yes| | | | | | | | | |Yes | + +-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+ + +[1] Yes, if salt-api is enabled. diff --git a/doc/topics/cloud/index.rst b/doc/topics/cloud/index.rst index eff8e2aa8f..d95ca09269 100644 --- a/doc/topics/cloud/index.rst +++ b/doc/topics/cloud/index.rst @@ -129,6 +129,7 @@ Cloud Provider Specifics Getting Started With Scaleway Getting Started With Saltify Getting Started With SoftLayer + Getting Started With Vagrant Getting Started With Vexxhost Getting Started With Virtualbox Getting Started With VMware diff --git a/doc/topics/cloud/misc.rst b/doc/topics/cloud/misc.rst index 2e44aa5612..b485d28909 100644 --- a/doc/topics/cloud/misc.rst +++ b/doc/topics/cloud/misc.rst @@ -1,3 +1,5 @@ +.. _misc-salt-cloud-options: + ================================ Miscellaneous Salt Cloud Options ================================ diff --git a/doc/topics/cloud/saltify.rst b/doc/topics/cloud/saltify.rst index dda9801522..a0f058cd4d 100644 --- a/doc/topics/cloud/saltify.rst +++ b/doc/topics/cloud/saltify.rst @@ -4,7 +4,7 @@ Getting Started With Saltify ============================ -The Saltify driver is a new, experimental driver for installing Salt on existing +The Saltify driver is a driver for installing Salt on existing machines (virtual or bare metal). @@ -33,20 +33,29 @@ the salt-master: However, if you wish to use the more advanced capabilities of salt-cloud, such as rebooting, listing, and disconnecting machines, then the salt master must fill -the role usually performed by a vendor's cloud management system. In order to do -that, you must configure your salt master as a salt-api server, and supply credentials -to use it. (See ``salt-api setup`` below.) +the role usually performed by a vendor's cloud management system. The salt master +must be running on the salt-cloud machine, and created nodes must be connected to the +master. +Additional information about which configuration options apply to which actions +can be studied in the +:ref:`Saltify Module documentation ` +and the +:ref:`Miscellaneous Salt Cloud Options ` +document. Profiles ======== -Saltify requires a profile to be configured for each machine that needs Salt -installed. The initial profile can be set up at ``/etc/salt/cloud.profiles`` +Saltify requires a separate profile to be configured for each machine that +needs Salt installed [#]_. The initial profile can be set up at +``/etc/salt/cloud.profiles`` or in the ``/etc/salt/cloud.profiles.d/`` directory. Each profile requires both an ``ssh_host`` and an ``ssh_username`` key parameter as well as either an ``key_filename`` or a ``password``. +.. [#] Unless you are using a map file to provide the unique parameters. + Profile configuration example: .. code-block:: yaml @@ -68,40 +77,78 @@ The machine can now be "Salted" with the following command: This will install salt on the machine specified by the cloud profile, ``salt-this-machine``, and will give the machine the minion id of ``my-machine``. If the command was executed on the salt-master, its Salt -key will automatically be signed on the master. +key will automatically be accepted by the master. Once a salt-minion has been successfully installed on the instance, connectivity to it can be verified with Salt: .. code-block:: bash - salt my-machine test.ping - + salt my-machine test.version Destroy Options --------------- +.. versionadded:: Oxygen + For obvious reasons, the ``destroy`` action does not actually vaporize hardware. -If the salt master is connected using salt-api, it can tear down parts of -the client machines. It will remove the client's key from the salt master, -and will attempt the following options: +If the salt master is connected, it can tear down parts of the client machines. +It will remove the client's key from the salt master, +and can execute the following options: .. code-block:: yaml - remove_config_on_destroy: true # default: true # Deactivate salt-minion on reboot and - # delete the minion config and key files from its ``/etc/salt`` directory, - # NOTE: If deactivation is unsuccessful (older Ubuntu machines) then when + # delete the minion config and key files from its "/etc/salt" directory, + # NOTE: If deactivation was unsuccessful (older Ubuntu machines) then when # salt-minion restarts it will automatically create a new, unwanted, set - # of key files. The ``force_minion_config`` option must be used in that case. + # of key files. Use the "force_minion_config" option to replace them. - shutdown_on_destroy: false # default: false - # send a ``shutdown`` command to the client. + # last of all, send a "shutdown" command to the client. + +Wake On LAN +----------- .. versionadded:: Oxygen +In addition to connecting a hardware machine to a Salt master, +you have the option of sending a wake-on-LAN +`magic packet`_ +to start that machine running. + +.. _magic packet: https://en.wikipedia.org/wiki/Wake-on-LAN + +The "magic packet" must be sent by an existing salt minion which is on +the same network segment as the target machine. (Or your router +must be set up especially to route WoL packets.) Your target machine +must be set up to listen for WoL and to respond appropriatly. + +You must provide the Salt node id of the machine which will send +the WoL packet \(parameter ``wol_sender_node``\), and +the hardware MAC address of the machine you intend to wake, +\(parameter ``wake_on_lan_mac``\). If both parameters are defined, +the WoL will be sent. The cloud master will then sleep a while +\(parameter ``wol_boot_wait``) to give the target machine time to +boot up before we start probing its SSH port to begin deploying +Salt to it. The default sleep time is 30 seconds. + +.. code-block:: yaml + + # /etc/salt/cloud.profiles.d/saltify.conf + + salt-this-machine: + ssh_host: 12.34.56.78 + ssh_username: root + key_filename: '/etc/salt/mysshkey.pem' + provider: my-saltify-config + wake_on_lan_mac: '00:e0:4c:70:2a:b2' # found with ifconfig + wol_sender_node: bevymaster # its on this network segment + wol_boot_wait: 45 # seconds to sleep + Using Map Files --------------- The settings explained in the section above may also be set in a map file. An @@ -165,67 +212,3 @@ Return values: - ``True``: Credential verification succeeded - ``False``: Credential verification succeeded - ``None``: Credential verification was not attempted. - -Provisioning salt-api -===================== - -In order to query or control minions it created, saltify needs to send commands -to the salt master. It does that using the network interface to salt-api. - -The salt-api is not enabled by default. The following example will provide a -simple installation. - -.. code-block:: yaml - - # file /etc/salt/cloud.profiles.d/my_saltify_profiles.conf - hw_41: # a theoretical example hardware machine - ssh_host: 10.100.9.41 # the hard address of your target - ssh_username: vagrant # a user name which has passwordless sudo - password: vagrant # on your target machine - provider: my_saltify_provider - - -.. code-block:: yaml - - # file /etc/salt/cloud.providers.d/saltify_provider.conf - my_saltify_provider: - driver: saltify - eauth: pam - username: vagrant # supply some sudo-group-member's name - password: vagrant # and password on the salt master - minion: - master: 10.100.9.5 # the hard address of the master - - -.. code-block:: yaml - - # file /etc/salt/master.d/auth.conf - # using salt-api ... members of the 'sudo' group can do anything ... - external_auth: - pam: - sudo%: - - .* - - '@wheel' - - '@runner' - - '@jobs' - - -.. code-block:: yaml - - # file /etc/salt/master.d/api.conf - # see https://docs.saltstack.com/en/latest/ref/netapi/all/salt.netapi.rest_cherrypy.html - rest_cherrypy: - host: localhost - port: 8000 - ssl_crt: /etc/pki/tls/certs/localhost.crt - ssl_key: /etc/pki/tls/certs/localhost.key - thread_pool: 30 - socket_queue_size: 10 - - -Start your target machine as a Salt minion named "node41" by: - -.. code-block:: bash - - $ sudo salt-cloud -p hw_41 node41 - diff --git a/doc/topics/cloud/softlayer.rst b/doc/topics/cloud/softlayer.rst index 468dca283d..d073f39b98 100644 --- a/doc/topics/cloud/softlayer.rst +++ b/doc/topics/cloud/softlayer.rst @@ -94,6 +94,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``: private_vlan: 396 private_network: True private_ssh: True + # Use a dedicated host instead of cloud + dedicated_host_id: 1234 # May be used _instead_of_ image global_identifier: 320d8be5-46c0-dead-cafe-13e3c51 @@ -334,9 +336,21 @@ it can be verified with Salt: # salt 'myserver.example.com' test.ping - -Cloud Profiles +Dedicated Host ~~~~~~~~~~~~~~ +Soflayer allows the creation of new VMs in a dedicated host. This means that +you can order and pay a fixed amount for a bare metal dedicated host and use +it to provision as many VMs as you can fit in there. If you want your VMs to +be launched in a dedicated host, instead of Sofltayer's cloud, set the +``dedicated_host_id`` parameter in your profile. + +dedicated_host_id +----------------- +The id of the dedicated host where the VMs should be created. If not set, VMs +will be created in Softlayer's cloud instead. + +Bare metal Profiles +~~~~~~~~~~~~~~~~~~~ Set up an initial profile at ``/etc/salt/cloud.profiles``: .. code-block:: yaml diff --git a/doc/topics/cloud/vagrant.rst b/doc/topics/cloud/vagrant.rst new file mode 100644 index 0000000000..466544e4b3 --- /dev/null +++ b/doc/topics/cloud/vagrant.rst @@ -0,0 +1,268 @@ +.. _getting-started-with-vagrant: + +============================ +Getting Started With Vagrant +============================ + +The Vagrant driver is a new, experimental driver for spinning up a VagrantBox +virtual machine, and installing Salt on it. + +Dependencies +============ +The Vagrant driver itself has no external dependencies. + +The machine which will host the VagrantBox must be an already existing minion +of the cloud server's Salt master. +It must have Vagrant_ installed, and a Vagrant-compatible virtual machine engine, +such as VirtualBox_. +(Note: The Vagrant driver does not depend on the salt-cloud VirtualBox driver in any way.) + +.. _Vagrant: https://www.vagrantup.com/ +.. _VirtualBox: https://www.virtualbox.org/ + +\[Caution: The version of Vagrant packaged for ``apt install`` in Ubuntu 16.04 will not connect a bridged +network adapter correctly. Use a version downloaded directly from the web site.\] + +Include the Vagrant guest editions plugin: +``vagrant plugin install vagrant-vbguest``. + +Configuration +============= + +Configuration of the client virtual machine (using VirtualBox, VMware, etc) +will be done by Vagrant as specified in the Vagrantfile on the host machine. + +Salt-cloud will push the commands to install and provision a salt minion on +the virtual machine, so you need not (perhaps **should** not) provision salt +in your Vagrantfile, in most cases. + +If, however, your cloud master cannot open an SSH connection to the child VM, +you may **need** to let Vagrant provision the VM with Salt, and use some other +method (such as passing a pillar dictionary to the VM) to pass the master's +IP address to the VM. The VM can then attempt to reach the salt master in the +usual way for non-cloud minions. Specify the profile configuration argument +as ``deploy: False`` to prevent the cloud master from trying. + +.. code-block:: yaml + + # Note: This example is for /etc/salt/cloud.providers file or any file in + # the /etc/salt/cloud.providers.d/ directory. + + my-vagrant-config: + minion: + master: 111.222.333.444 + provider: vagrant + + +Because the Vagrant driver needs a place to store the mapping between the +node name you use for Salt commands and the Vagrantfile which controls the VM, +you must configure your salt minion as a Salt smb server. +(See `host provisioning example`_ below.) + +Profiles +======== + +Vagrant requires a profile to be configured for each machine that needs Salt +installed. The initial profile can be set up at ``/etc/salt/cloud.profiles`` +or in the ``/etc/salt/cloud.profiles.d/`` directory. + +Each profile requires a ``vagrantfile`` parameter. If the Vagrantfile has +definitions for `multiple machines`_ then you need a ``machine`` parameter, + +.. _`multiple machines`: https://www.vagrantup.com/docs/multi-machine/ + +Salt-cloud uses SSH to provision the minion. There must be a routable path +from the cloud master to the VM. Usually, you will want to use +a bridged network adapter for SSH. The address may not be known until +DHCP assigns it. If ``ssh_host`` is not defined, and ``target_network`` +is defined, the driver will attempt to read the address from the output +of an ``ifconfig`` command. Lacking either setting, +the driver will try to use the value Vagrant returns as its ``ssh_host``, +which will work only if the cloud master is running somewhere on the same host. + +The ``target_network`` setting should be used +to identify the IP network your bridged adapter is expected to appear on. +Use CIDR notation, like ``target_network: '2001:DB8::/32'`` +or ``target_network: '192.0.2.0/24'``. + +Profile configuration example: + +.. code-block:: yaml + + # /etc/salt/cloud.profiles.d/vagrant.conf + + vagrant-machine: + host: my-vhost # the Salt id of the virtual machine's host computer. + provider: my-vagrant-config + cwd: /srv/machines # the path to your Virtualbox file. + vagrant_runas: my-username # the username who defined the Vagrantbox on the host + # vagrant_up_timeout: 300 # (seconds) timeout for cmd.run of the "vagrant up" command + # vagrant_provider: '' # option for "vagrant up" like: "--provider vmware_fusion" + # ssh_host: None # "None" means try to find the routable IP address from "ifconfig" + # target_network: None # Expected CIDR address of your bridged network + # force_minion_config: false # Set "true" to re-purpose an existing VM + +The machine can now be created and configured with the following command: + +.. code-block:: bash + + salt-cloud -p vagrant-machine my-id + +This will create the machine specified by the cloud profile +``vagrant-machine``, and will give the machine the minion id of +``my-id``. If the cloud master is also the salt-master, its Salt +key will automatically be accepted on the master. + +Once a salt-minion has been successfully installed on the instance, connectivity +to it can be verified with Salt: + +.. code-block:: bash + + salt my-id test.ping + +.. _host provisioning example: + +Provisioning a Vagrant cloud host (example) +=========================================== + +In order to query or control minions it created, each host +minion needs to track the Salt node names associated with +any guest virtual machines on it. +It does that using a Salt sdb database. + +The Salt sdb is not configured by default. The following example shows a +simple installation. + +This example assumes: + +- you are on a large network using the 10.x.x.x IP address space +- your Salt master's Salt id is "bevymaster" +- it will also be your salt-cloud controller +- it is at hardware address 10.124.30.7 +- it is running a recent Debian family Linux (raspbian) +- your workstation is a Salt minion of bevymaster +- your workstation's minion id is "my_laptop" +- VirtualBox has been installed on "my_laptop" (apt install is okay) +- Vagrant was installed from vagrantup.com. (not the 16.04 Ubuntu apt) +- "my_laptop" has done "vagrant plugin install vagrant-vbguest" +- the VM you want to start is on "my_laptop" at "/home/my_username/Vagrantfile" + +.. code-block:: yaml + + # file /etc/salt/minion.d/vagrant_sdb.conf on host computer "my_laptop" + # -- this sdb database is required by the Vagrant module -- + vagrant_sdb_data: # The sdb database must have this name. + driver: sqlite3 # Let's use SQLite to store the data ... + database: /var/cache/salt/vagrant.sqlite # ... in this file ... + table: sdb # ... using this table name. + create_table: True # if not present + +Remember to re-start your minion after changing its configuration files... + + ``sudo systemctl restart salt-minion`` + +.. code-block:: ruby + + # -*- mode: ruby -*- + # file /home/my_username/Vagrantfile on host computer "my_laptop" + BEVY = "bevy1" + DOMAIN = BEVY + ".test" # .test is an ICANN reserved non-public TLD + + # must supply a list of names to avoid Vagrant asking for interactive input + def get_good_ifc() # try to find a working Ubuntu network adapter name + addr_infos = Socket.getifaddrs + addr_infos.each do |info| + a = info.addr + if a and a.ip? and not a.ip_address.start_with?("127.") + return info.name + end + end + return "eth0" # fall back to an old reliable name + end + + Vagrant.configure(2) do |config| + config.ssh.forward_agent = true # so you can use git ssh://... + + # add a bridged network interface. (try to detect name, then guess MacOS names, too) + interface_guesses = [get_good_ifc(), 'en0: Ethernet', 'en1: Wi-Fi (AirPort)'] + config.vm.network "public_network", bridge: interface_guesses + if ARGV[0] == "up" + puts "Trying bridge network using interfaces: #{interface_guesses}" + end + config.vm.provision "shell", inline: "ip address", run: "always" # make user feel good + + # . . . . . . . . . . . . Define machine QUAIL1 . . . . . . . . . . . . . . + config.vm.define "quail1", primary: true do |quail_config| + quail_config.vm.box = "boxesio/xenial64-standard" # a public VMware & Virtualbox box + quail_config.vm.hostname = "quail1." + DOMAIN # supply a name in our bevy + quail_config.vm.provider "virtualbox" do |v| + v.memory = 1024 # limit memory for the virtual box + v.cpus = 1 + v.linked_clone = true # make a soft copy of the base Vagrant box + v.customize ["modifyvm", :id, "--natnet1", "192.168.128.0/24"] # do not use 10.x network for NAT + end + end + end + +.. code-block:: yaml + + # file /etc/salt/cloud.profiles.d/my_vagrant_profiles.conf on bevymaster + q1: + host: my_laptop # the Salt id of your virtual machine host + machine: quail1 # a machine name in the Vagrantfile (if not primary) + vagrant_runas: my_username # owner of Vagrant box files on "my_laptop" + cwd: '/home/my_username' # the path (on "my_laptop") of the Vagrantfile + provider: my_vagrant_provider # name of entry in provider.conf file + target_network: '10.0.0.0/8' # VM external address will be somewhere here + +.. code-block:: yaml + + # file /etc/salt/cloud.providers.d/vagrant_provider.conf on bevymaster + my_vagrant_provider: + driver: vagrant + minion: + master: 10.124.30.7 # the hard address of the master + + +Create and use your new Salt minion +----------------------------------- + +- Typing on the Salt master computer ``bevymaster``, tell it to create a new minion named ``v1`` using profile ``q1``... + +.. code-block:: bash + + sudo salt-cloud -p q1 v1 + sudo salt v1 network.ip_addrs + [ you get a list of IP addresses, including the bridged one ] + +- logged in to your laptop (or some other computer known to GitHub)... + + \[NOTE:\] if you are using MacOS, you need to type ``ssh-add -K`` after each boot, + unless you use one of the methods in `this gist`_. + +.. _this gist: https://github.com/jirsbek/SSH-keys-in-macOS-Sierra-keychain + +.. code-block:: bash + + ssh -A vagrant@< the bridged network address > + # [ or, if you are at /home/my_username/ on my_laptop ] + vagrant ssh quail1 + +- then typing on your new node "v1" (a.k.a. quail1.bevy1.test)... + +.. code-block:: bash + + password: vagrant + # [ stuff types out ... ] + + ls -al /vagrant + # [ should be shared /home/my_username from my_laptop ] + + # you can access other network facilities using the ssh authorization + # as recorded in your ~.ssh/ directory on my_laptop ... + + sudo apt update + sudo apt install git + git clone ssh://git@github.com/yourID/your_project + # etc... + diff --git a/doc/topics/installation/windows.rst b/doc/topics/installation/windows.rst index 06219e73e6..f2ec33e122 100644 --- a/doc/topics/installation/windows.rst +++ b/doc/topics/installation/windows.rst @@ -45,11 +45,27 @@ but leave any existing config, cache, and PKI information. Salt Minion Installation ======================== +If the system is missing the appropriate version of the Visual C++ +Redistributable (vcredist) the user will be prompted to install it. Click ``OK`` +to install the vcredist. Click ``Cancel`` to abort the installation without +making modifications to the system. + +If Salt is already installed on the system the user will be prompted to remove +the previous installation. Click ``OK`` to uninstall Salt without removing the +configuration, PKI information, or cached files. Click ``Cancel`` to abort the +installation before making any modifications to the system. + After the Welcome and the License Agreement, the installer asks for two bits of information to configure the minion; the master hostname and the minion name. -The installer will update the minion config with these options. If the installer -finds an existing minion config file, these fields will be populated with values -from the existing config. +The installer will update the minion config with these options. + +If the installer finds an existing minion config file, these fields will be +populated with values from the existing config, but they will be grayed out. +There will also be a checkbox to use the existing config. If you continue, the +existing config will be used. If the checkbox is unchecked, default values are +displayed and can be changed. If you continue, the existing config file in +``c:\salt\conf`` will be removed along with the ``c:\salt\conf\minion.d` +directory. The values entered will be used with the default config. The final page allows you to start the minion service and optionally change its startup type. By default, the minion is set to ``Automatic``. You can change the @@ -71,11 +87,6 @@ be managed there or from the command line like any other Windows service. sc start salt-minion net start salt-minion -.. note:: - If the minion won't start, you may need to install the Microsoft Visual C++ - 2008 x64 SP1 redistributable. Allow all Windows updates to run salt-minion - smoothly. - Installation Prerequisites -------------------------- @@ -96,15 +107,29 @@ Minion silently: ========================= ===================================================== Option Description ========================= ===================================================== -``/minion-name=`` A string value to set the minion name. Default is - 'hostname' ``/master=`` A string value to set the IP address or host name of - the master. Default value is 'salt' + the master. Default value is 'salt'. You can pass a + single master or a comma-separated list of masters. + Setting the master will replace existing config with + the default config. Cannot be used in conjunction + with ``/use-existing-config`` +``/minion-name=`` A string value to set the minion name. Default is + 'hostname'. Setting the minion name will replace + existing config with the default config. Cannot be + used in conjunction with ``/use-existing-config`` ``/start-minion=`` Either a 1 or 0. '1' will start the salt-minion service, '0' will not. Default is to start the - service after installation. + service after installation ``/start-minion-delayed`` Set the minion start type to ``Automatic (Delayed Start)`` +``/use-existing-config`` Either a 1 or 0. '1' will use the existing config if + present. '0' will replace existing config with the + default config. Default is '1'. If this is set to '1' + values passed in ``/master`` and ``/minion-name`` + will be ignored +``/S`` Runs the installation silently. Uses the above + settings or the defaults +``/?`` Displays command line help ========================= ===================================================== .. note:: diff --git a/doc/topics/releases/2017.7.2.rst b/doc/topics/releases/2017.7.2.rst index e311827bcf..65ced85367 100644 --- a/doc/topics/releases/2017.7.2.rst +++ b/doc/topics/releases/2017.7.2.rst @@ -14,23 +14,33 @@ CVE-2017-14695 Directory traversal vulnerability in minion id validation in Salt CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net) -Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): - Known Issues ============ On 2017.7.2 when using salt-api and cherrypy version 5.6.0, issue `#43581`_ will occur when starting the salt-api service. We have patched the cherry-py packages for python-cherrypy-5.6.0-2 from repo.saltstack.com. If you are using python-cherrypy-5.6.0-1 please ensure to run `yum install python-cherrypy` to install the new patched version. -*Generated at: 2017-09-26T21:06:19Z* +Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs): -Statistics: +*Generated at: 2017-10-02T21:10:14Z* -- Total Merges: **326** -- Total Issue references: **133** -- Total PR references: **389** +Statistics +========== -Changes: +- Total Merges: **328** +- Total Issue references: **134** +- Total PR references: **391** +Changes +======= + +- **PR** `#43868`_: (*rallytime*) Back-port `#43847`_ to 2017.7.2 + * Fix to module.run + +- **PR** `#43756`_: (*gtmanfred*) split build and install for pkg osx + @ *2017-09-26T20:51:28Z* + + * 88414d5 Merge pull request `#43756`_ from gtmanfred/2017.7.2 + * f7df41f split build and install for pkg osx - **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2 @ *2017-09-19T17:33:34Z* @@ -3110,6 +3120,12 @@ Changes: .. _`#480`: https://github.com/saltstack/salt/issues/480 .. _`#495`: https://github.com/saltstack/salt/issues/495 .. _`#43581`: https://github.com/saltstack/salt/issues/43581 +.. _`#43756`: https://github.com/saltstack/salt/pull/43756 +.. _`#43847`: https://github.com/saltstack/salt/pull/43847 +.. _`#43868`: https://github.com/saltstack/salt/pull/43868 +.. _`#475`: https://github.com/saltstack/salt/issues/475 +.. _`#480`: https://github.com/saltstack/salt/issues/480 +.. _`#495`: https://github.com/saltstack/salt/issues/495 .. _`bp-37424`: https://github.com/saltstack/salt/pull/37424 .. _`bp-39366`: https://github.com/saltstack/salt/pull/39366 .. _`bp-41543`: https://github.com/saltstack/salt/pull/41543 diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index ffe489b903..f3f9d53fe0 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -55,6 +55,7 @@ The new grains added are: * ``fc_wwn``: Show all fibre channel world wide port names for a host * ``iscsi_iqn``: Show the iSCSI IQN name for a host +* ``swap_total``: Show the configured swap_total for Linux, *BSD, OS X and Solaris/SunOS Grains Changes -------------- @@ -116,6 +117,31 @@ The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``te Just like ``mixed_id``, these use the state ID as name in the highstate output. For more information on these output modes, see the docs for the :mod:`Highstate Outputter `. +Windows Installer: Changes to existing config handling +------------------------------------------------------ +Behavior with existing configuration has changed. With previous installers the +existing config was used and the master and minion id could be modified via the +installer. It was problematic in that it didn't account for configuration that +may be defined in the ``minion.d`` directory. This change gives you the option +via a checkbox to either use the existing config with out changes or the default +config using values you pass to the installer. If you choose to use the existing +config then no changes are made. If not, the existing config is deleted, to +include the ``minion.d`` directory, and the default config is used. A +command-line switch (``/use-existing-config``) has also been added to control +this behavior. + +Windows Installer: Multi-master configuration +--------------------------------------------- +The installer now has the ability to apply a multi-master configuration either +from the gui or the command line. The ``master`` field in the gui can accept +either a single master or a comma-separated list of masters. The command-line +switch (``/master=``) can accept the same. + +Windows Installer: Command-line help +------------------------------------ +The Windows installer will now display command-line help when a help switch +(``/?``) is passed. + Salt Cloud Features ------------------- @@ -138,6 +164,56 @@ file. For example: These commands will run in sequence **before** the bootstrap script is executed. +New salt-cloud Grains +===================== + +When salt cloud creates a new minon, it will now add grain information +to the minion configuration file, identifying the resources originally used +to create it. + +The generated grain information will appear similar to: + +.. code-block:: yaml + grains: + salt-cloud: + driver: ec2 + provider: my_ec2:ec2 + profile: ec2-web +The generation of salt-cloud grains can be surpressed by the +option ``enable_cloud_grains: 'False'`` in the cloud configuration file. + +Upgraded Saltify Driver +======================= + +The salt-cloud Saltify driver is used to provision machines which +are not controlled by a dedicated cloud supervisor (such as typical hardware +machines) by pushing a salt-bootstrap command to them and accepting them on +the salt master. Creation of a node has been its only function and no other +salt-cloud commands were implemented. + +With this upgrade, it can use the salt-api to provide advanced control, +such as rebooting a machine, querying it along with conventional cloud minions, +and, ultimately, disconnecting it from its master. + +After disconnection from ("destroying" on) one master, a machine can be +re-purposed by connecting to ("creating" on) a subsequent master. + +New Vagrant Driver +================== + +The salt-cloud Vagrant driver brings virtual machines running in a limited +environment, such as a programmer's workstation, under salt-cloud control. +This can be useful for experimentation, instruction, or testing salt configurations. + +Using salt-api on the master, and a salt-minion running on the host computer, +the Vagrant driver can create (``vagrant up``), restart (``vagrant reload``), +and destroy (``vagrant destroy``) VMs, as controlled by salt-cloud profiles +which designate a ``Vagrantfile`` on the host machine. + +The master can be a very limited machine, such as a Raspberry Pi, or a small +VagrantBox VM. + + New pillar/master_tops module called saltclass ---------------------------------------------- @@ -344,7 +420,7 @@ Solaris Logical Domains In Virtual Grain ---------------------------------------- Support has been added to the ``virtual`` grain for detecting Solaris LDOMs -running on T-Series SPARC hardware. The ``virtual_subtype`` grain is +running on T-Series SPARC hardware. The ``virtual_subtype`` grain is populated as a list of domain roles. Lists of comments in state returns @@ -359,7 +435,7 @@ Beacon configuration changes In order to remain consistent and to align with other Salt components such as states, support for configuring beacons using dictionary based configuration has been deprecated -in favor of list based configuration. All beacons have a validation function which will +in favor of list based configuration. All beacons have a validation function which will check the configuration for the correct format and only load if the validation passes. - ``avahi_announce`` beacon @@ -1020,3 +1096,10 @@ The ``version.py`` file had the following changes: Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be used instead. The removal of these warnings does not have a behavior change. Only the warning text was removed. + +Sentry Log Handler +------------------ + +Configuring sentry raven python client via ``project``, ``servers``, ``public_key +and ``secret_key`` is deprecated and won't work with sentry clients > 3.0. +Instead, the ``dsn`` config param must be used. diff --git a/doc/topics/tutorials/gitfs.rst b/doc/topics/tutorials/gitfs.rst index cc0b1df9f8..990890ecb3 100644 --- a/doc/topics/tutorials/gitfs.rst +++ b/doc/topics/tutorials/gitfs.rst @@ -27,7 +27,7 @@ Installing Dependencies ======================= Both pygit2_ and GitPython_ are supported Python interfaces to git. If -compatible versions of both are installed, pygit2_ will preferred. In these +compatible versions of both are installed, pygit2_ will be preferred. In these cases, GitPython_ can be forced using the :conf_master:`gitfs_provider` parameter in the master config file. diff --git a/pkg/osx/build.sh b/pkg/osx/build.sh index 7850d48cd8..fcf4b4e061 100755 --- a/pkg/osx/build.sh +++ b/pkg/osx/build.sh @@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER echo -n -e "\033]0;Build: Install Salt\007" sudo rm -rf $SRCDIR/build sudo rm -rf $SRCDIR/dist -sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install +sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" +sudo $PYTHON $SRCDIR/setup.py install ############################################################################ # Build Package diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index a8efca2101..6283d057a7 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -11,6 +11,7 @@ !define PRODUCT_UNINST_KEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" !define PRODUCT_UNINST_KEY_OTHER "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" !define PRODUCT_UNINST_ROOT_KEY "HKLM" +!define OUTFILE "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe" # Import Libraries !include "MUI2.nsh" @@ -52,6 +53,15 @@ ${StrStrAdv} Pop "${ResultVar}" !macroend +# Part of the Explode function for Strings +!define Explode "!insertmacro Explode" +!macro Explode Length Separator String + Push `${Separator}` + Push `${String}` + Call Explode + Pop `${Length}` +!macroend + ############################################################################### # Configure Pages, Ordering, and Configuration @@ -92,10 +102,17 @@ Var Dialog Var Label Var CheckBox_Minion_Start Var CheckBox_Minion_Start_Delayed +Var ConfigMasterHost Var MasterHost Var MasterHost_State +Var ConfigMinionName Var MinionName Var MinionName_State +Var ExistingConfigFound +Var UseExistingConfig +Var UseExistingConfig_State +Var WarningExistingConfig +Var WarningDefaultConfig Var StartMinion Var StartMinionDelayed Var DeleteInstallDir @@ -115,27 +132,105 @@ Function pageMinionConfig Abort ${EndIf} + # Master IP or Hostname Dialog Control ${NSD_CreateLabel} 0 0 100% 12u "Master IP or Hostname:" Pop $Label ${NSD_CreateText} 0 13u 100% 12u $MasterHost_State Pop $MasterHost + # Minion ID Dialog Control ${NSD_CreateLabel} 0 30u 100% 12u "Minion Name:" Pop $Label ${NSD_CreateText} 0 43u 100% 12u $MinionName_State Pop $MinionName + # Use Existing Config Checkbox + ${NSD_CreateCheckBox} 0 65u 100% 12u "&Use Existing Config" + Pop $UseExistingConfig + ${NSD_OnClick} $UseExistingConfig pageMinionConfig_OnClick + + # Add Existing Config Warning Label + ${NSD_CreateLabel} 0 80u 100% 60u "The values above are taken from an \ + existing configuration found in `c:\salt\conf\minion`. Configuration \ + settings defined in the `minion.d` directories, if they exist, are not \ + shown here.$\r$\n\ + $\r$\n\ + Clicking `Install` will leave the existing config unchanged." + Pop $WarningExistingConfig + CreateFont $0 "Arial" 10 500 /ITALIC + SendMessage $WarningExistingConfig ${WM_SETFONT} $0 1 + SetCtlColors $WarningExistingConfig 0xBB0000 transparent + + # Add Default Config Warning Label + ${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will remove the \ + the existing minion config file and remove the minion.d directories. \ + The values above will be used in the new default config." + Pop $WarningDefaultConfig + CreateFont $0 "Arial" 10 500 /ITALIC + SendMessage $WarningDefaultConfig ${WM_SETFONT} $0 1 + SetCtlColors $WarningDefaultConfig 0xBB0000 transparent + + # If no existing config found, disable the checkbox and stuff + # Set UseExistingConfig_State to 0 + ${If} $ExistingConfigFound == 0 + StrCpy $UseExistingConfig_State 0 + ShowWindow $UseExistingConfig ${SW_HIDE} + ShowWindow $WarningExistingConfig ${SW_HIDE} + ShowWindow $WarningDefaultConfig ${SW_HIDE} + ${Endif} + + ${NSD_SetState} $UseExistingConfig $UseExistingConfig_State + + Call pageMinionConfig_OnClick + nsDialogs::Show FunctionEnd +Function pageMinionConfig_OnClick + + # You have to pop the top handle to keep the stack clean + Pop $R0 + + # Assign the current checkbox state to the variable + ${NSD_GetState} $UseExistingConfig $UseExistingConfig_State + + # Validate the checkboxes + ${If} $UseExistingConfig_State == ${BST_CHECKED} + # Use Existing Config is checked, show warning + ShowWindow $WarningExistingConfig ${SW_SHOW} + EnableWindow $MasterHost 0 + EnableWindow $MinionName 0 + ${NSD_SetText} $MasterHost $ConfigMasterHost + ${NSD_SetText} $MinionName $ConfigMinionName + ${If} $ExistingConfigFound == 1 + ShowWindow $WarningDefaultConfig ${SW_HIDE} + ${Endif} + ${Else} + # Use Existing Config is not checked, hide the warning + ShowWindow $WarningExistingConfig ${SW_HIDE} + EnableWindow $MasterHost 1 + EnableWindow $MinionName 1 + ${NSD_SetText} $MasterHost $MasterHost_State + ${NSD_SetText} $MinionName $MinionName_State + ${If} $ExistingConfigFound == 1 + ShowWindow $WarningDefaultConfig ${SW_SHOW} + ${Endif} + ${EndIf} + +FunctionEnd + + Function pageMinionConfig_Leave ${NSD_GetText} $MasterHost $MasterHost_State ${NSD_GetText} $MinionName $MinionName_State + ${NSD_GetState} $UseExistingConfig $UseExistingConfig_State + + Call RemoveExistingConfig FunctionEnd @@ -194,7 +289,7 @@ FunctionEnd !else Name "${PRODUCT_NAME} ${PRODUCT_VERSION}" !endif -OutFile "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe" +OutFile "${OutFile}" InstallDir "c:\salt" InstallDirRegKey HKLM "${PRODUCT_DIR_REGKEY}" "" ShowInstDetails show @@ -311,8 +406,6 @@ SectionEnd Function .onInit - Call getMinionConfig - Call parseCommandLineSwitches # Check for existing installation @@ -364,6 +457,23 @@ Function .onInit skipUninstall: + Call getMinionConfig + + IfSilent 0 +2 + Call RemoveExistingConfig + +FunctionEnd + + +Function RemoveExistingConfig + + ${If} $ExistingConfigFound == 1 + ${AndIf} $UseExistingConfig_State == 0 + # Wipe out the Existing Config + Delete "$INSTDIR\conf\minion" + RMDir /r "$INSTDIR\conf\minion.d" + ${EndIf} + FunctionEnd @@ -407,7 +517,9 @@ Section -Post nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000" nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000" - Call updateMinionConfig + ${If} $UseExistingConfig_State == 0 + Call updateMinionConfig + ${EndIf} Push "C:\salt" Call AddToPath @@ -534,18 +646,32 @@ FunctionEnd # Helper Functions ############################################################################### Function MsiQueryProductState + # Used for detecting VCRedist Installation + !define INSTALLSTATE_DEFAULT "5" - !define INSTALLSTATE_DEFAULT "5" - - Pop $R0 - StrCpy $NeedVcRedist "False" - System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0" - StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0 - StrCpy $NeedVcRedist "True" + Pop $R0 + StrCpy $NeedVcRedist "False" + System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0" + StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0 + StrCpy $NeedVcRedist "True" FunctionEnd +#------------------------------------------------------------------------------ +# Trim Function +# - Trim whitespace from the beginning and end of a string +# - Trims spaces, \r, \n, \t +# +# Usage: +# Push " some string " ; String to Trim +# Call Trim +# Pop $0 ; Trimmed String: "some string" +# +# or +# +# ${Trim} $0 $1 ; Trimmed String, String to Trim +#------------------------------------------------------------------------------ Function Trim Exch $R1 # Original string @@ -580,6 +706,95 @@ Function Trim FunctionEnd +#------------------------------------------------------------------------------ +# Explode Function +# - Splits a string based off the passed separator +# - Each item in the string is pushed to the stack +# - The last item pushed to the stack is the length of the array +# +# Usage: +# Push "," ; Separator +# Push "string,to,separate" ; String to explode +# Call Explode +# Pop $0 ; Number of items in the array +# +# or +# +# ${Explode} $0 $1 $2 ; Length, Separator, String +#------------------------------------------------------------------------------ +Function Explode + # Initialize variables + Var /GLOBAL explString + Var /GLOBAL explSeparator + Var /GLOBAL explStrLen + Var /GLOBAL explSepLen + Var /GLOBAL explOffset + Var /GLOBAL explTmp + Var /GLOBAL explTmp2 + Var /GLOBAL explTmp3 + Var /GLOBAL explArrCount + + # Get input from user + Pop $explString + Pop $explSeparator + + # Calculates initial values + StrLen $explStrLen $explString + StrLen $explSepLen $explSeparator + StrCpy $explArrCount 1 + + ${If} $explStrLen <= 1 # If we got a single character + ${OrIf} $explSepLen > $explStrLen # or separator is larger than the string, + Push $explString # then we return initial string with no change + Push 1 # and set array's length to 1 + Return + ${EndIf} + + # Set offset to the last symbol of the string + StrCpy $explOffset $explStrLen + IntOp $explOffset $explOffset - 1 + + # Clear temp string to exclude the possibility of appearance of occasional data + StrCpy $explTmp "" + StrCpy $explTmp2 "" + StrCpy $explTmp3 "" + + # Loop until the offset becomes negative + ${Do} + # If offset becomes negative, it is time to leave the function + ${IfThen} $explOffset == -1 ${|} ${ExitDo} ${|} + + # Remove everything before and after the searched part ("TempStr") + StrCpy $explTmp $explString $explSepLen $explOffset + + ${If} $explTmp == $explSeparator + # Calculating offset to start copy from + IntOp $explTmp2 $explOffset + $explSepLen # Offset equals to the current offset plus length of separator + StrCpy $explTmp3 $explString "" $explTmp2 + + Push $explTmp3 # Throwing array item to the stack + IntOp $explArrCount $explArrCount + 1 # Increasing array's counter + + StrCpy $explString $explString $explOffset 0 # Cutting all characters beginning with the separator entry + StrLen $explStrLen $explString + ${EndIf} + + ${If} $explOffset = 0 # If the beginning of the line met and there is no separator, + # copying the rest of the string + ${If} $explSeparator == "" # Fix for the empty separator + IntOp $explArrCount $explArrCount - 1 + ${Else} + Push $explString + ${EndIf} + ${EndIf} + + IntOp $explOffset $explOffset - 1 + ${Loop} + + Push $explArrCount +FunctionEnd + + #------------------------------------------------------------------------------ # StrStr Function # - find substring in a string @@ -816,6 +1031,9 @@ FunctionEnd ############################################################################### Function getMinionConfig + # Set Config Found Default Value + StrCpy $ExistingConfigFound 0 + confFind: IfFileExists "$INSTDIR\conf\minion" confFound confNotFound @@ -828,24 +1046,42 @@ Function getMinionConfig ${EndIf} confFound: + StrCpy $ExistingConfigFound 1 FileOpen $0 "$INSTDIR\conf\minion" r - ClearErrors confLoop: - FileRead $0 $1 - IfErrors EndOfFile - ${StrLoc} $2 $1 "master:" ">" - ${If} $2 == 0 - ${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" - ${Trim} $2 $2 - StrCpy $MasterHost_State $2 + ClearErrors # Clear Errors + FileRead $0 $1 # Read the next line + IfErrors EndOfFile # Error is probably EOF + ${StrLoc} $2 $1 "master:" ">" # Find `master:` starting at the beginning + ${If} $2 == 0 # If it found it in the first position, then it is defined + ${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" # Read everything after `master: ` + ${Trim} $2 $2 # Trim white space + ${If} $2 == "" # If it's empty, it's probably a list + masterLoop: + ClearErrors # Clear Errors + FileRead $0 $1 # Read the next line + IfErrors EndOfFile # Error is probably EOF + ${StrStrAdv} $2 $1 "- " ">" ">" "0" "0" "0" # Read everything after `- ` + ${Trim} $2 $2 # Trim white space + ${IfNot} $2 == "" # If it's not empty, we found something + ${If} $ConfigMasterHost == "" # Is the default `salt` there + StrCpy $ConfigMasterHost $2 # If so, make the first item the new entry + ${Else} + StrCpy $ConfigMasterHost "$ConfigMasterHost,$2" # Append the new master, comma separated + ${EndIf} + Goto masterLoop # Check the next one + ${EndIf} + ${Else} + StrCpy $ConfigMasterHost $2 # A single master entry ${EndIf} + ${EndIf} ${StrLoc} $2 $1 "id:" ">" ${If} $2 == 0 ${StrStrAdv} $2 $1 "id: " ">" ">" "0" "0" "0" ${Trim} $2 $2 - StrCpy $MinionName_State $2 + StrCpy $ConfigMinionName $2 ${EndIf} Goto confLoop @@ -855,6 +1091,14 @@ Function getMinionConfig confReallyNotFound: + # Set Default Config Values if not found + ${If} $ConfigMasterHost == "" + StrCpy $ConfigMasterHost "salt" + ${EndIf} + ${If} $ConfigMinionName == "" + StrCpy $ConfigMinionName "hostname" + ${EndIf} + FunctionEnd @@ -874,7 +1118,22 @@ Function updateMinionConfig ${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line ${If} $3 == 0 # is it in the first... ${OrIf} $3 == 1 # or second position (account for comments) - StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master + + ${Explode} $9 "," $MasterHost_state # Split the hostname on commas, $9 is the number of items found + ${If} $9 == 1 # 1 means only a single master was passed + StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master + ${Else} # Make a multi-master entry + StrCpy $2 "master:" # Make the first line "master:" + + loop_explode: # Start a loop to go through the list in the config + pop $8 # Pop the next item off the stack + ${Trim} $8 $8 # Trim any whitespace + StrCpy $2 "$2$\r$\n - $8" # Add it to the master variable ($2) + IntOp $9 $9 - 1 # Decrement the list count + ${If} $9 >= 1 # If it's not 0 + Goto loop_explode # Do it again + ${EndIf} # close if statement + ${EndIf} # close if statement ${EndIf} # close if statement ${EndIf} # close if statement @@ -905,6 +1164,67 @@ Function parseCommandLineSwitches # Load the parameters ${GetParameters} $R0 + # Display Help + ClearErrors + ${GetOptions} $R0 "/?" $R1 + IfErrors display_help_not_found + + System::Call 'kernel32::GetStdHandle(i -11)i.r0' + System::Call 'kernel32::AttachConsole(i -1)i.r1' + ${If} $0 = 0 + ${OrIf} $1 = 0 + System::Call 'kernel32::AllocConsole()' + System::Call 'kernel32::GetStdHandle(i -11)i.r0' + ${EndIf} + FileWrite $0 "$\n" + FileWrite $0 "$\n" + FileWrite $0 "Help for Salt Minion installation$\n" + FileWrite $0 "===============================================================================$\n" + FileWrite $0 "$\n" + FileWrite $0 "/minion-name=$\t$\tA string value to set the minion name. Default is$\n" + FileWrite $0 "$\t$\t$\t'hostname'. Setting the minion name will replace$\n" + FileWrite $0 "$\t$\t$\texisting config with a default config. Cannot be$\n" + FileWrite $0 "$\t$\t$\tused in conjunction with /use-existing-config=1$\n" + FileWrite $0 "$\n" + FileWrite $0 "/master=$\t$\tA string value to set the IP address or hostname of$\n" + FileWrite $0 "$\t$\t$\tthe master. Default value is 'salt'. You may pass a$\n" + FileWrite $0 "$\t$\t$\tsingle master, or a comma separated list of masters.$\n" + FileWrite $0 "$\t$\t$\tSetting the master will replace existing config with$\n" + FileWrite $0 "$\t$\t$\ta default config. Cannot be used in conjunction with$\n" + FileWrite $0 "$\t$\t$\t/use-existing-config=1$\n" + FileWrite $0 "$\n" + FileWrite $0 "/start-minion=$\t$\t1 will start the service, 0 will not. Default is 1$\n" + FileWrite $0 "$\n" + FileWrite $0 "/start-minion-delayed$\tSet the minion start type to 'Automatic (Delayed Start)'$\n" + FileWrite $0 "$\n" + FileWrite $0 "/use-existing-config=$\t1 will use the existing config if present, 0 will$\n" + FileWrite $0 "$\t$\t$\treplace existing config with a default config. Default$\n" + FileWrite $0 "$\t$\t$\tis 1. If this is set to 1, values passed in$\n" + FileWrite $0 "$\t$\t$\t/minion-name and /master will be ignored$\n" + FileWrite $0 "$\n" + FileWrite $0 "/S$\t$\t$\tInstall Salt silently$\n" + FileWrite $0 "$\n" + FileWrite $0 "/?$\t$\t$\tDisplay this help screen$\n" + FileWrite $0 "$\n" + FileWrite $0 "-------------------------------------------------------------------------------$\n" + FileWrite $0 "$\n" + FileWrite $0 "Examples:$\n" + FileWrite $0 "$\n" + FileWrite $0 "${OutFile} /S$\n" + FileWrite $0 "$\n" + FileWrite $0 "${OutFile} /S /minion-name=myminion /master=master.mydomain.com /start-minion-delayed$\n" + FileWrite $0 "$\n" + FileWrite $0 "===============================================================================$\n" + FileWrite $0 "Press Enter to continue..." + System::Free $0 + System::Free $1 + System::Call 'kernel32::FreeConsole()' + Abort + display_help_not_found: + + # Set default value for Use Existing Config + StrCpy $UseExistingConfig_State 1 + # Check for start-minion switches # /start-service is to be deprecated, so we must check for both ${GetOptions} $R0 "/start-service=" $R1 @@ -930,19 +1250,31 @@ Function parseCommandLineSwitches start_minion_delayed_not_found: # Minion Config: Master IP/Name + # If setting master, we don't want to use existing config ${GetOptions} $R0 "/master=" $R1 ${IfNot} $R1 == "" StrCpy $MasterHost_State $R1 + StrCpy $UseExistingConfig_State 0 ${ElseIf} $MasterHost_State == "" StrCpy $MasterHost_State "salt" ${EndIf} # Minion Config: Minion ID + # If setting minion id, we don't want to use existing config ${GetOptions} $R0 "/minion-name=" $R1 ${IfNot} $R1 == "" StrCpy $MinionName_State $R1 + StrCpy $UseExistingConfig_State 0 ${ElseIf} $MinionName_State == "" StrCpy $MinionName_State "hostname" ${EndIf} + # Use Existing Config + # Overrides above settings with user passed settings + ${GetOptions} $R0 "/use-existing-config=" $R1 + ${IfNot} $R1 == "" + # Use Existing Config was passed something, set it + StrCpy $UseExistingConfig_State $R1 + ${EndIf} + FunctionEnd diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 6e0da617c2..483a815f5d 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -6,8 +6,6 @@ This system allows for authentication to be managed in a module pluggable way so that any external authentication system can be used inside of Salt ''' -from __future__ import absolute_import - # 1. Create auth loader instance # 2. Accept arguments as a dict # 3. Verify with function introspection @@ -16,7 +14,7 @@ from __future__ import absolute_import # 6. Interface to verify tokens # Import python libs -from __future__ import print_function +from __future__ import absolute_import, print_function import collections import time import logging @@ -31,6 +29,7 @@ import salt.transport.client import salt.utils.args import salt.utils.dictupdate import salt.utils.files +import salt.utils.master import salt.utils.minions import salt.utils.user import salt.utils.versions @@ -430,13 +429,26 @@ class LoadAuth(object): auth_list = self.get_auth_list(load) elif auth_type == 'user': - if not self.authenticate_key(load, key): + auth_ret = self.authenticate_key(load, key) + msg = 'Authentication failure of type "user" occurred' + if not auth_ret: # auth_ret can be a boolean or the effective user id if show_username: - msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username) - else: - msg = 'Authentication failure of type "user" occurred' + msg = '{0} for user {1}.'.format(msg, username) ret['error'] = {'name': 'UserAuthenticationError', 'message': msg} return ret + + # Verify that the caller has root on master + if auth_ret is not True: + if AuthUser(load['user']).is_sudo(): + if not self.opts['sudo_acl'] or not self.opts['publisher_acl']: + auth_ret = True + + if auth_ret is not True: + auth_list = salt.utils.master.get_values_of_matching_keys( + self.opts['publisher_acl'], auth_ret) + if not auth_list: + ret['error'] = {'name': 'UserAuthenticationError', 'message': msg} + return ret else: ret['error'] = {'name': 'SaltInvocationError', 'message': 'Authentication type not supported.'} diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index 0830e39b5d..84bda4681d 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -199,13 +199,42 @@ class Beacon(object): else: self.opts['beacons'][name].append({'enabled': enabled_value}) - def list_beacons(self): + def _get_beacons(self, + include_opts=True, + include_pillar=True): + ''' + Return the beacons data structure + ''' + beacons = {} + if include_pillar: + pillar_beacons = self.opts.get('pillar', {}).get('beacons', {}) + if not isinstance(pillar_beacons, dict): + raise ValueError('Beacons must be of type dict.') + beacons.update(pillar_beacons) + if include_opts: + opts_beacons = self.opts.get('beacons', {}) + if not isinstance(opts_beacons, dict): + raise ValueError('Beacons must be of type dict.') + beacons.update(opts_beacons) + return beacons + + def list_beacons(self, + include_pillar=True, + include_opts=True): ''' List the beacon items + + include_pillar: Whether to include beacons that are + configured in pillar, default is True. + + include_opts: Whether to include beacons that are + configured in opts, default is True. ''' + beacons = self._get_beacons(include_pillar, include_opts) + # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': True, 'beacons': beacons}, tag='/salt/minion/minion_beacons_list_complete') return True @@ -236,8 +265,8 @@ class Beacon(object): del beacon_data['enabled'] valid, vcomment = self.beacons[validate_str](beacon_data) else: - log.info('Beacon %s does not have a validate' - ' function, skipping validation.', name) + vcomment = 'Beacon {0} does not have a validate' \ + ' function, skipping validation.'.format(name) valid = True # Fire the complete event back along with the list of beacons @@ -257,16 +286,23 @@ class Beacon(object): data = {} data[name] = beacon_data - if name in self.opts['beacons']: - log.info('Updating settings for beacon ' - 'item: %s', name) + if name in self._get_beacons(include_opts=False): + comment = 'Cannot update beacon item {0}, ' \ + 'because it is configured in pillar.'.format(name) + complete = False else: - log.info('Added new beacon item %s', name) - self.opts['beacons'].update(data) + if name in self.opts['beacons']: + comment = 'Updating settings for beacon ' \ + 'item: {0}'.format(name) + else: + comment = 'Added new beacon item: {0}'.format(name) + complete = True + self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_add_complete') return True @@ -279,13 +315,20 @@ class Beacon(object): data = {} data[name] = beacon_data - log.info('Updating settings for beacon ' - 'item: %s', name) - self.opts['beacons'].update(data) + if name in self._get_beacons(include_opts=False): + comment = 'Cannot modify beacon item {0}, ' \ + 'it is configured in pillar.'.format(name) + complete = False + else: + comment = 'Updating settings for beacon ' \ + 'item: {0}'.format(name) + complete = True + self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_modify_complete') return True @@ -295,13 +338,22 @@ class Beacon(object): Delete a beacon item ''' - if name in self.opts['beacons']: - log.info('Deleting beacon item %s', name) - del self.opts['beacons'][name] + if name in self._get_beacons(include_opts=False): + comment = 'Cannot delete beacon item {0}, ' \ + 'it is configured in pillar.'.format(name) + complete = False + else: + if name in self.opts['beacons']: + del self.opts['beacons'][name] + comment = 'Deleting beacon item: {0}'.format(name) + else: + comment = 'Beacon item {0} not found.'.format(name) + complete = True # Fire the complete event back along with updated list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_delete_complete') return True @@ -339,11 +391,19 @@ class Beacon(object): Enable a beacon ''' - self._update_enabled(name, True) + if name in self._get_beacons(include_opts=False): + comment = 'Cannot enable beacon item {0}, ' \ + 'it is configured in pillar.'.format(name) + complete = False + else: + self._update_enabled(name, True) + comment = 'Enabling beacon item {0}'.format(name) + complete = True # Fire the complete event back along with updated list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_enabled_complete') return True @@ -353,11 +413,19 @@ class Beacon(object): Disable a beacon ''' - self._update_enabled(name, False) + if name in self._get_beacons(include_opts=False): + comment = 'Cannot disable beacon item {0}, ' \ + 'it is configured in pillar.'.format(name) + complete = False + else: + self._update_enabled(name, False) + comment = 'Disabling beacon item {0}'.format(name) + complete = True # Fire the complete event back along with updated list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacon_disabled_complete') return True diff --git a/salt/beacons/ps.py b/salt/beacons/ps.py index b1f18431f4..d65be4c12e 100644 --- a/salt/beacons/ps.py +++ b/salt/beacons/ps.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -Send events covering service status +Send events covering process status ''' # Import Python Libs diff --git a/salt/beacons/sensehat.py b/salt/beacons/sensehat.py index 0784bb2bfd..1d881d2256 100644 --- a/salt/beacons/sensehat.py +++ b/salt/beacons/sensehat.py @@ -3,6 +3,8 @@ Beacon to monitor temperature, humidity and pressure using the SenseHat of a Raspberry Pi. +.. versionadded:: 2017.7.0 + :maintainer: Benedikt Werner <1benediktwerner@gmail.com> :maturity: new :depends: sense_hat Python module diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 728e0bd2c3..0b69d36f39 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -1595,7 +1595,10 @@ class LocalClient(object): timeout=timeout, tgt=tgt, tgt_type=tgt_type, - expect_minions=(verbose or show_timeout), + # (gtmanfred) expect_minions is popped here incase it is passed from a client + # call. If this is not popped, then it would be passed twice to + # get_iter_returns. + expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug(u'return event: %s', ret) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index 23643f6488..c000490365 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -1417,7 +1417,7 @@ class Cloud(object): if name in vms: prov = vms[name]['provider'] driv = vms[name]['driver'] - msg = six.u('{0} already exists under {1}:{2}').format( + msg = u'{0} already exists under {1}:{2}'.format( name, prov, driv ) log.error(msg) diff --git a/salt/cloud/clouds/gce.py b/salt/cloud/clouds/gce.py index e94667d674..453c3dadec 100644 --- a/salt/cloud/clouds/gce.py +++ b/salt/cloud/clouds/gce.py @@ -2080,6 +2080,7 @@ def attach_disk(name=None, kwargs=None, call=None): disk_name = kwargs['disk_name'] mode = kwargs.get('mode', 'READ_WRITE').upper() boot = kwargs.get('boot', False) + auto_delete = kwargs.get('auto_delete', False) if boot and boot.lower() in ['true', 'yes', 'enabled']: boot = True else: @@ -2109,7 +2110,8 @@ def attach_disk(name=None, kwargs=None, call=None): transport=__opts__['transport'] ) - result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot) + result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot, + ex_auto_delete=auto_delete) __utils__['cloud.fire_event']( 'event', @@ -2389,6 +2391,8 @@ def create_attach_volumes(name, kwargs, call=None): 'type': The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard. 'image': An image to use for this new disk. Optional. 'snapshot': A snapshot to use for this new disk. Optional. + 'auto_delete': An option(bool) to keep or remove the disk upon + instance deletion. Optional, defaults to False. Volumes are attached in the order in which they are given, thus on a new node the first volume will be /dev/sdb, the second /dev/sdc, and so on. @@ -2416,7 +2420,8 @@ def create_attach_volumes(name, kwargs, call=None): 'size': volume['size'], 'type': volume.get('type', 'pd-standard'), 'image': volume.get('image', None), - 'snapshot': volume.get('snapshot', None) + 'snapshot': volume.get('snapshot', None), + 'auto_delete': volume.get('auto_delete', False) } create_disk(volume_dict, 'function') diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index 6519ba066f..d4931abcc5 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -465,18 +465,54 @@ def create(vm_): return ret except Exception as e: # pylint: disable=broad-except - # Try to clean up in as much cases as possible - log.info('Cleaning up after exception clean up items: {0}'.format(cleanup)) - for leftover in cleanup: - what = leftover['what'] - item = leftover['item'] - if what == 'domain': - destroy_domain(conn, item) - if what == 'volume': - item.delete() + do_cleanup(cleanup) + # throw the root cause after cleanup raise e +def do_cleanup(cleanup): + ''' + Clean up clone domain leftovers as much as possible. + + Extra robust clean up in order to deal with some small changes in libvirt + behavior over time. Passed in volumes and domains are deleted, any errors + are ignored. Used when cloning/provisioning a domain fails. + + :param cleanup: list containing dictonaries with two keys: 'what' and 'item'. + If 'what' is domain the 'item' is a libvirt domain object. + If 'what' is volume then the item is a libvirt volume object. + + Returns: + none + + .. versionadded: 2017.7.3 + ''' + log.info('Cleaning up after exception') + for leftover in cleanup: + what = leftover['what'] + item = leftover['item'] + if what == 'domain': + log.info('Cleaning up {0} {1}'.format(what, item.name())) + try: + item.destroy() + log.debug('{0} {1} forced off'.format(what, item.name())) + except libvirtError: + pass + try: + item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+ + libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+ + libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) + log.debug('{0} {1} undefined'.format(what, item.name())) + except libvirtError: + pass + if what == 'volume': + try: + item.delete() + log.debug('{0} {1} cleaned up'.format(what, item.name())) + except libvirtError: + pass + + def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. diff --git a/salt/cloud/clouds/opennebula.py b/salt/cloud/clouds/opennebula.py index 9d03202f17..0fba26ac1d 100644 --- a/salt/cloud/clouds/opennebula.py +++ b/salt/cloud/clouds/opennebula.py @@ -4572,7 +4572,8 @@ def _list_nodes(full=False): pass vms[name]['id'] = vm.find('ID').text - vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text + if vm.find('TEMPLATE').find('TEMPLATE_ID'): + vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text vms[name]['name'] = name vms[name]['size'] = {'cpu': cpu_size, 'memory': memory_size} vms[name]['state'] = vm.find('STATE').text diff --git a/salt/cloud/clouds/saltify.py b/salt/cloud/clouds/saltify.py index 1f3d0867bd..b4aa137dc1 100644 --- a/salt/cloud/clouds/saltify.py +++ b/salt/cloud/clouds/saltify.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- ''' +.. _`saltify-module`: + Saltify Module ============== @@ -7,6 +9,9 @@ The Saltify module is designed to install Salt on a remote machine, virtual or bare metal, using SSH. This module is useful for provisioning machines which are already installed, but not Salted. +.. versionchanged:: Oxygen + The wake_on_lan capability, and actions destroy, reboot, and query functions were added. + Use of this module requires some configuration in cloud profile and provider files as described in the :ref:`Gettting Started with Saltify ` documentation. @@ -15,11 +20,12 @@ files as described in the # Import python libs from __future__ import absolute_import import logging +import time # Import salt libs import salt.utils.cloud import salt.config as config -import salt.netapi +import salt.client import salt.ext.six as six if six.PY3: import ipaddress @@ -32,6 +38,7 @@ from salt.exceptions import SaltCloudException, SaltCloudSystemExit log = logging.getLogger(__name__) try: + # noinspection PyUnresolvedReferences from impacket.smbconnection import SessionError as smbSessionError from impacket.smb3 import SessionError as smb3SessionError HAS_IMPACKET = True @@ -39,7 +46,9 @@ except ImportError: HAS_IMPACKET = False try: + # noinspection PyUnresolvedReferences from winrm.exceptions import WinRMTransportError + # noinspection PyUnresolvedReferences from requests.exceptions import ( ConnectionError, ConnectTimeout, ReadTimeout, SSLError, ProxyError, RetryError, InvalidSchema) @@ -55,24 +64,6 @@ def __virtual__(): return True -def _get_connection_info(): - ''' - Return connection information for the passed VM data - ''' - vm_ = get_configured_provider() - - try: - ret = {'username': vm_['username'], - 'password': vm_['password'], - 'eauth': vm_['eauth'], - 'vm': vm_, - } - except KeyError: - raise SaltCloudException( - 'Configuration must define salt-api "username", "password" and "eauth"') - return ret - - def avail_locations(call=None): ''' This function returns a list of locations available. @@ -81,7 +72,7 @@ def avail_locations(call=None): salt-cloud --list-locations my-cloud-provider - [ saltify will always returns an empty dictionary ] + [ saltify will always return an empty dictionary ] ''' return {} @@ -127,8 +118,6 @@ def list_nodes(call=None): returns a list of dictionaries of defined standard fields. - salt-api setup required for operation. - ..versionadded:: Oxygen ''' @@ -172,8 +161,8 @@ def list_nodes_full(call=None): salt-cloud -F returns a list of dictionaries. + for 'saltify' minions, returns dict of grains (enhanced). - salt-api setup required for operation. ..versionadded:: Oxygen ''' @@ -200,16 +189,9 @@ def _list_nodes_full(call=None): ''' List the nodes, ask all 'saltify' minions, return dict of grains. ''' - local = salt.netapi.NetapiClient(__opts__) - cmd = {'client': 'local', - 'tgt': 'salt-cloud:driver:saltify', - 'fun': 'grains.items', - 'arg': '', - 'tgt_type': 'grain', - } - cmd.update(_get_connection_info()) - - return local.run(cmd) + local = salt.client.LocalClient() + return local.cmd('salt-cloud:driver:saltify', 'grains.items', '', + tgt_type='grain') def list_nodes_select(call=None): @@ -226,27 +208,69 @@ def show_instance(name, call=None): ''' List the a single node, return dict of grains. ''' - local = salt.netapi.NetapiClient(__opts__) - cmd = {'client': 'local', - 'tgt': 'name', - 'fun': 'grains.items', - 'arg': '', - 'tgt_type': 'glob', - } - cmd.update(_get_connection_info()) - ret = local.run(cmd) + local = salt.client.LocalClient() + ret = local.cmd(name, 'grains.items') ret.update(_build_required_items(ret)) return ret def create(vm_): ''' - Provision a single machine + if configuration parameter ``deploy`` is ``True``, + + Provision a single machine, adding its keys to the salt master + + else, + + Test ssh connections to the machine + + Configuration parameters: + + - deploy: (see above) + - provider: name of entry in ``salt/cloud.providers.d/???`` file + - ssh_host: IP address or DNS name of the new machine + - ssh_username: name used to log in to the new machine + - ssh_password: password to log in (unless key_filename is used) + - key_filename: (optional) SSH private key for passwordless login + - ssh_port: (default=22) TCP port for SSH connection + - wake_on_lan_mac: (optional) hardware (MAC) address for wake on lan + - wol_sender_node: (optional) salt minion to send wake on lan command + - wol_boot_wait: (default=30) seconds to delay while client boots + - force_minion_config: (optional) replace the minion configuration files on the new machine + + See also + :ref:`Miscellaneous Salt Cloud Options ` + and + :ref:`Getting Started with Saltify ` + + CLI Example: + + .. code-block:: bash + + salt-cloud -p mymachine my_new_id ''' deploy_config = config.get_cloud_config_value( 'deploy', vm_, __opts__, default=False) if deploy_config: + wol_mac = config.get_cloud_config_value( + 'wake_on_lan_mac', vm_, __opts__, default='') + wol_host = config.get_cloud_config_value( + 'wol_sender_node', vm_, __opts__, default='') + if wol_mac and wol_host: + log.info('sending wake-on-lan to %s using node %s', + wol_mac, wol_host) + local = salt.client.LocalClient() + if isinstance(wol_mac, six.string_types): + wol_mac = [wol_mac] # a smart user may have passed more params + ret = local.cmd(wol_host, 'network.wol', wol_mac) + log.info('network.wol returned value %s', ret) + if ret and ret[wol_host]: + sleep_time = config.get_cloud_config_value( + 'wol_boot_wait', vm_, __opts__, default=30) + if sleep_time > 0.0: + log.info('delaying %d seconds for boot', sleep_time) + time.sleep(sleep_time) log.info('Provisioning existing machine %s', vm_['name']) ret = __utils__['cloud.bootstrap'](vm_, __opts__) else: @@ -365,14 +389,21 @@ def destroy(name, call=None): .. versionadded:: Oxygen + Disconnect a minion from the master, and remove its keys. + + Optionally, (if ``remove_config_on_destroy`` is ``True``), + disables salt-minion from running on the minion, and + erases the Salt configuration files from it. + + Optionally, (if ``shutdown_on_destroy`` is ``True``), + orders the minion to halt. + CLI Example: .. code-block:: bash salt-cloud --destroy mymachine - salt-api setup required for operation. - ''' if call == 'function': raise SaltCloudSystemExit( @@ -391,15 +422,9 @@ def destroy(name, call=None): transport=opts['transport'] ) - local = salt.netapi.NetapiClient(opts) - cmd = {'client': 'local', - 'tgt': name, - 'fun': 'grains.get', - 'arg': ['salt-cloud'], - } - cmd.update(_get_connection_info()) - vm_ = cmd['vm'] - my_info = local.run(cmd) + vm_ = get_configured_provider() + local = salt.client.LocalClient() + my_info = local.cmd(name, 'grains.get', ['salt-cloud']) try: vm_.update(my_info[name]) # get profile name to get config value except (IndexError, TypeError): @@ -407,25 +432,22 @@ def destroy(name, call=None): if config.get_cloud_config_value( 'remove_config_on_destroy', vm_, opts, default=True ): - cmd.update({'fun': 'service.disable', 'arg': ['salt-minion']}) - ret = local.run(cmd) # prevent generating new keys on restart + ret = local.cmd(name, # prevent generating new keys on restart + 'service.disable', + ['salt-minion']) if ret and ret[name]: log.info('disabled salt-minion service on %s', name) - cmd.update({'fun': 'config.get', 'arg': ['conf_file']}) - ret = local.run(cmd) + ret = local.cmd(name, 'config.get', ['conf_file']) if ret and ret[name]: confile = ret[name] - cmd.update({'fun': 'file.remove', 'arg': [confile]}) - ret = local.run(cmd) + ret = local.cmd(name, 'file.remove', [confile]) if ret and ret[name]: log.info('removed minion %s configuration file %s', name, confile) - cmd.update({'fun': 'config.get', 'arg': ['pki_dir']}) - ret = local.run(cmd) + ret = local.cmd(name, 'config.get', ['pki_dir']) if ret and ret[name]: pki_dir = ret[name] - cmd.update({'fun': 'file.remove', 'arg': [pki_dir]}) - ret = local.run(cmd) + ret = local.cmd(name, 'file.remove', [pki_dir]) if ret and ret[name]: log.info( 'removed minion %s key files in %s', @@ -435,8 +457,7 @@ def destroy(name, call=None): if config.get_cloud_config_value( 'shutdown_on_destroy', vm_, opts, default=False ): - cmd.update({'fun': 'system.shutdown', 'arg': ''}) - ret = local.run(cmd) + ret = local.cmd(name, 'system.shutdown') if ret and ret[name]: log.info('system.shutdown for minion %s successful', name) @@ -456,8 +477,6 @@ def reboot(name, call=None): ''' Reboot a saltify minion. - salt-api setup required for operation. - ..versionadded:: Oxygen name @@ -475,13 +494,5 @@ def reboot(name, call=None): 'The reboot action must be called with -a or --action.' ) - local = salt.netapi.NetapiClient(__opts__) - cmd = {'client': 'local', - 'tgt': name, - 'fun': 'system.reboot', - 'arg': '', - } - cmd.update(_get_connection_info()) - ret = local.run(cmd) - - return ret + local = salt.client.LocalClient() + return local.cmd(name, 'system.reboot') diff --git a/salt/cloud/clouds/softlayer.py b/salt/cloud/clouds/softlayer.py index d24bcab660..95bf6c10c0 100644 --- a/salt/cloud/clouds/softlayer.py +++ b/salt/cloud/clouds/softlayer.py @@ -371,6 +371,12 @@ def create(vm_): if post_uri: kwargs['postInstallScriptUri'] = post_uri + dedicated_host_id = config.get_cloud_config_value( + 'dedicated_host_id', vm_, __opts__, default=None + ) + if dedicated_host_id: + kwargs['dedicatedHost'] = {'id': dedicated_host_id} + __utils__['cloud.fire_event']( 'event', 'requesting instance', diff --git a/salt/cloud/clouds/vagrant.py b/salt/cloud/clouds/vagrant.py new file mode 100644 index 0000000000..830c8b57da --- /dev/null +++ b/salt/cloud/clouds/vagrant.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +''' +Vagrant Cloud Driver +==================== + +The Vagrant cloud is designed to "vagrant up" a virtual machine as a +Salt minion. + +Use of this module requires some configuration in cloud profile and provider +files as described in the +:ref:`Getting Started with Vagrant ` documentation. + +.. versionadded:: Oxygen + + +''' + +# Import python libs +from __future__ import absolute_import +import logging +import os +import tempfile + +# Import salt libs +import salt.utils +import salt.config as config +import salt.client +import salt.ext.six as six +if six.PY3: + import ipaddress +else: + import salt.ext.ipaddress as ipaddress +from salt.exceptions import SaltCloudException, SaltCloudSystemExit + +# Get logging started +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Needs no special configuration + ''' + return True + + +def avail_locations(call=None): + r''' + This function returns a list of locations available. + + CLI Example: + + .. code-block:: bash + + salt-cloud --list-locations my-cloud-provider + + # \[ vagrant will always returns an empty dictionary \] + + ''' + + return {} + + +def avail_images(call=None): + '''This function returns a list of images available for this cloud provider. + vagrant will return a list of profiles. + salt-cloud --list-images my-cloud-provider + ''' + vm_ = get_configured_provider() + return {'Profiles': [profile for profile in vm_['profiles']]} + + +def avail_sizes(call=None): + r''' + This function returns a list of sizes available for this cloud provider. + + CLI Example: + + .. code-block:: bash + + salt-cloud --list-sizes my-cloud-provider + + # \[ vagrant always returns an empty dictionary \] + + ''' + return {} + + +def list_nodes(call=None): + ''' + List the nodes which have salt-cloud:driver:vagrant grains. + + CLI Example: + + .. code-block:: bash + + salt-cloud -Q + ''' + nodes = _list_nodes(call) + return _build_required_items(nodes) + + +def _build_required_items(nodes): + ret = {} + for name, grains in nodes.items(): + if grains: + private_ips = [] + public_ips = [] + ips = grains['ipv4'] + grains['ipv6'] + for adrs in ips: + ip_ = ipaddress.ip_address(adrs) + if not ip_.is_loopback: + if ip_.is_private: + private_ips.append(adrs) + else: + public_ips.append(adrs) + + ret[name] = { + 'id': grains['id'], + 'image': grains['salt-cloud']['profile'], + 'private_ips': private_ips, + 'public_ips': public_ips, + 'size': '', + 'state': 'running' + } + + return ret + + +def list_nodes_full(call=None): + ''' + List the nodes, ask all 'vagrant' minions, return dict of grains (enhanced). + + CLI Example: + + .. code-block:: bash + + salt-call -F + ''' + ret = _list_nodes(call) + + for key, grains in ret.items(): # clean up some hyperverbose grains -- everything is too much + try: + del grains['cpu_flags'], grains['disks'], grains['pythonpath'], grains['dns'], grains['gpus'] + except KeyError: + pass # ignore absence of things we are eliminating + except TypeError: + del ret[key] # eliminate all reference to unexpected (None) values. + + reqs = _build_required_items(ret) + for name in ret: + ret[name].update(reqs[name]) + return ret + + +def _list_nodes(call=None): + ''' + List the nodes, ask all 'vagrant' minions, return dict of grains. + ''' + local = salt.client.LocalClient() + ret = local.cmd('salt-cloud:driver:vagrant', 'grains.items', '', tgt_type='grain') + return ret + + +def list_nodes_select(call=None): + ''' + Return a list of the minions that have salt-cloud grains, with + select fields. + ''' + return salt.utils.cloud.list_nodes_select( + list_nodes_full('function'), __opts__['query.selection'], call, + ) + + +def show_instance(name, call=None): + ''' + List the a single node, return dict of grains. + ''' + local = salt.client.LocalClient() + ret = local.cmd(name, 'grains.items', '') + reqs = _build_required_items(ret) + ret[name].update(reqs[name]) + return ret + + +def _get_my_info(name): + local = salt.client.LocalClient() + return local.cmd(name, 'grains.get', ['salt-cloud']) + + +def create(vm_): + ''' + Provision a single machine + + CLI Example: + + .. code-block:: bash + salt-cloud -p my_profile new_node_1 + + ''' + name = vm_['name'] + machine = config.get_cloud_config_value( + 'machine', vm_, __opts__, default='') + vm_['machine'] = machine + host = config.get_cloud_config_value( + 'host', vm_, __opts__, default=NotImplemented) + vm_['cwd'] = config.get_cloud_config_value( + 'cwd', vm_, __opts__, default='/') + vm_['runas'] = config.get_cloud_config_value( + 'vagrant_runas', vm_, __opts__, default=os.getenv('SUDO_USER')) + vm_['timeout'] = config.get_cloud_config_value( + 'vagrant_up_timeout', vm_, __opts__, default=300) + vm_['vagrant_provider'] = config.get_cloud_config_value( + 'vagrant_provider', vm_, __opts__, default='') + vm_['grains'] = {'salt-cloud:vagrant': {'host': host, 'machine': machine}} + + log.info('sending \'vagrant.init %s machine=%s\' command to %s', name, machine, host) + + local = salt.client.LocalClient() + ret = local.cmd(host, 'vagrant.init', [name], kwarg={'vm': vm_, 'start': True}) + log.info('response ==> %s', ret[host]) + + network_mask = config.get_cloud_config_value( + 'network_mask', vm_, __opts__, default='') + if 'ssh_host' not in vm_: + ret = local.cmd(host, + 'vagrant.get_ssh_config', + [name], + kwarg={'network_mask': network_mask, + 'get_private_key': True})[host] + with tempfile.NamedTemporaryFile() as pks: + if 'private_key' not in vm_ and ret.get('private_key', False): + pks.write(ret['private_key']) + pks.flush() + log.debug('wrote private key to %s', pks.name) + vm_['key_filename'] = pks.name + if 'ssh_host' not in vm_: + vm_.setdefault('ssh_username', ret['ssh_username']) + if ret.get('ip_address'): + vm_['ssh_host'] = ret['ip_address'] + else: # if probe failed or not used, use Vagrant's reported ssh info + vm_['ssh_host'] = ret['ssh_host'] + vm_.setdefault('ssh_port', ret['ssh_port']) + + log.info('Provisioning machine %s as node %s using ssh %s', + machine, name, vm_['ssh_host']) + ret = __utils__['cloud.bootstrap'](vm_, __opts__) + return ret + + +def get_configured_provider(): + ''' + Return the first configured instance. + ''' + ret = config.is_provider_configured( + __opts__, + __active_provider_name__ or 'vagrant', + '' + ) + return ret + + +# noinspection PyTypeChecker +def destroy(name, call=None): + ''' + Destroy a node. + + CLI Example: + + .. code-block:: bash + + salt-cloud --destroy mymachine + ''' + if call == 'function': + raise SaltCloudSystemExit( + 'The destroy action must be called with -d, --destroy, ' + '-a, or --action.' + ) + + opts = __opts__ + + __utils__['cloud.fire_event']( + 'event', + 'destroying instance', + 'salt/cloud/{0}/destroying'.format(name), + args={'name': name}, + sock_dir=opts['sock_dir'], + transport=opts['transport'] + ) + my_info = _get_my_info(name) + profile_name = my_info[name]['profile'] + profile = opts['profiles'][profile_name] + host = profile['host'] + local = salt.client.LocalClient() + ret = local.cmd(host, 'vagrant.destroy', [name]) + + if ret[host]: + __utils__['cloud.fire_event']( + 'event', + 'destroyed instance', + 'salt/cloud/{0}/destroyed'.format(name), + args={'name': name}, + sock_dir=opts['sock_dir'], + transport=opts['transport'] + ) + + if opts.get('update_cachedir', False) is True: + __utils__['cloud.delete_minion_cachedir']( + name, __active_provider_name__.split(':')[0], opts) + + return {'Destroyed': '{0} was destroyed.'.format(name)} + else: + return {'Error': 'Error destroying {}'.format(name)} + + +# noinspection PyTypeChecker +def reboot(name, call=None): + ''' + Reboot a vagrant minion. + + name + The name of the VM to reboot. + + CLI Example: + + .. code-block:: bash + + salt-cloud -a reboot vm_name + ''' + if call != 'action': + raise SaltCloudException( + 'The reboot action must be called with -a or --action.' + ) + my_info = _get_my_info(name) + profile_name = my_info[name]['profile'] + profile = __opts__['profiles'][profile_name] + host = profile['host'] + local = salt.client.LocalClient() + return local.cmd(host, 'vagrant.reboot', [name]) diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py index 226119940b..202d70626e 100644 --- a/salt/cloud/clouds/vmware.py +++ b/salt/cloud/clouds/vmware.py @@ -704,7 +704,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None): network_name = devices['network'][device.deviceInfo.label]['name'] adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else '' switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else '' - network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type) + network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type, container_ref) adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label]) device_specs.append(network_spec) nics_map.append(adapter_mapping) @@ -2578,7 +2578,7 @@ def create(vm_): config_spec.memoryMB = memory_mb if devices: - specs = _manage_devices(devices, vm=object_ref, new_vm_name=vm_name) + specs = _manage_devices(devices, vm=object_ref, container_ref=container_ref, new_vm_name=vm_name) config_spec.deviceChange = specs['device_specs'] if extra_config: diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 4bd3f08a14..75562a102f 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -14,6 +14,7 @@ import time import stat # Import salt libs +import salt.acl import salt.crypt import salt.cache import salt.client @@ -69,12 +70,11 @@ def init_git_pillar(opts): for opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: - pillar = salt.utils.gitfs.GitPillar(opts) - pillar.init_remotes( + pillar = salt.utils.gitfs.GitPillar( + opts, opts_dict['git'], - git_pillar.PER_REMOTE_OVERRIDES, - git_pillar.PER_REMOTE_ONLY - ) + per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES, + per_remote_only=git_pillar.PER_REMOTE_ONLY) ret.append(pillar) except FileserverConfigError: if opts.get('git_pillar_verify_config', True): @@ -1176,88 +1176,50 @@ class LocalFuncs(object): ) minions = _res['minions'] - # Check for external auth calls - if extra.get('token', False): - # Authenticate - token = self.loadauth.authenticate_token(extra) - if not token: - return '' - - # Get acl from eauth module. - auth_list = self.loadauth.get_auth_list(extra, token) - - # Authorize the request - if not self.ckminions.auth_check( - auth_list, - load['fun'], - load['arg'], - load['tgt'], - load.get('tgt_type', 'glob'), - minions=minions, - # always accept find_job - whitelist=['saltutil.find_job'], - ): - log.warning('Authentication failure of type "token" occurred.') - return '' - load['user'] = token['name'] - log.debug('Minion tokenized user = "{0}"'.format(load['user'])) - elif 'eauth' in extra: - # Authenticate. - if not self.loadauth.authenticate_eauth(extra): - return '' - - # Get acl from eauth module. - auth_list = self.loadauth.get_auth_list(extra) - - # Authorize the request - if not self.ckminions.auth_check( - auth_list, - load['fun'], - load['arg'], - load['tgt'], - load.get('tgt_type', 'glob'), - minions=minions, - # always accept find_job - whitelist=['saltutil.find_job'], - ): - log.warning('Authentication failure of type "eauth" occurred.') - return '' - load['user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with - # Verify that the caller has root on master + # Check for external auth calls and authenticate + auth_type, err_name, key = self._prep_auth_info(extra) + if auth_type == 'user': + auth_check = self.loadauth.check_authentication(load, auth_type, key=key) else: - auth_ret = self.loadauth.authenticate_key(load, self.key) - if auth_ret is False: + auth_check = self.loadauth.check_authentication(extra, auth_type) + + # Setup authorization list variable and error information + auth_list = auth_check.get('auth_list', []) + error = auth_check.get('error') + err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type) + + if error: + # Authentication error occurred: do not continue. + log.warning(err_msg) + return '' + + # All Token, Eauth, and non-root users must pass the authorization check + if auth_type != 'user' or (auth_type == 'user' and auth_list): + # Authorize the request + authorized = self.ckminions.auth_check( + auth_list, + load['fun'], + load['arg'], + load['tgt'], + load.get('tgt_type', 'glob'), + minions=minions, + # always accept find_job + whitelist=['saltutil.find_job'], + ) + + if not authorized: + # Authorization error occurred. Log warning and do not continue. + log.warning(err_msg) return '' - if auth_ret is not True: - if salt.auth.AuthUser(load['user']).is_sudo(): - if not self.opts['sudo_acl'] or not self.opts['publisher_acl']: - auth_ret = True - - if auth_ret is not True: - # Avoid circular import - import salt.utils.master - auth_list = salt.utils.master.get_values_of_matching_keys( - self.opts['publisher_acl'], - auth_ret) - if not auth_list: - log.warning( - 'Authentication failure of type "user" occurred.' - ) - return '' - - if not self.ckminions.auth_check( - auth_list, - load['fun'], - load['arg'], - load['tgt'], - load.get('tgt_type', 'glob'), - minions=minions, - # always accept find_job - whitelist=['saltutil.find_job'], - ): - log.warning('Authentication failure of type "user" occurred.') - return '' + # Perform some specific auth_type tasks after the authorization check + if auth_type == 'token': + username = auth_check.get('username') + load['user'] = username + log.debug('Minion tokenized user = "{0}"'.format(username)) + elif auth_type == 'eauth': + # The username we are attempting to auth with + load['user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found diff --git a/salt/fileserver/gitfs.py b/salt/fileserver/gitfs.py index 1182ec69be..477e6c40b2 100644 --- a/salt/fileserver/gitfs.py +++ b/salt/fileserver/gitfs.py @@ -71,6 +71,15 @@ log = logging.getLogger(__name__) __virtualname__ = 'git' +def _gitfs(init_remotes=True): + return salt.utils.gitfs.GitFS( + __opts__, + __opts__['gitfs_remotes'], + per_remote_overrides=PER_REMOTE_OVERRIDES, + per_remote_only=PER_REMOTE_ONLY, + init_remotes=init_remotes) + + def __virtual__(): ''' Only load if the desired provider module is present and gitfs is enabled @@ -79,7 +88,7 @@ def __virtual__(): if __virtualname__ not in __opts__['fileserver_backend']: return False try: - salt.utils.gitfs.GitFS(__opts__) + _gitfs(init_remotes=False) # Initialization of the GitFS object did not fail, so we know we have # valid configuration syntax and that a valid provider was detected. return __virtualname__ @@ -92,18 +101,14 @@ def clear_cache(): ''' Completely clear gitfs cache ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - return gitfs.clear_cache() + return _gitfs(init_remotes=False).clear_cache() def clear_lock(remote=None, lock_type='update'): ''' Clear update.lk ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.clear_lock(remote=remote, lock_type=lock_type) + return _gitfs().clear_lock(remote=remote, lock_type=lock_type) def lock(remote=None): @@ -114,30 +119,21 @@ def lock(remote=None): information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.lock(remote=remote) + return _gitfs().lock(remote=remote) def update(): ''' Execute a git fetch on all of the repos ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - gitfs.update() + _gitfs().update() def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.envs(ignore_cache=ignore_cache) + return _gitfs().envs(ignore_cache=ignore_cache) def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 @@ -145,10 +141,7 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 Find the first file to match the path and ref, read the file out of git and send the path to the newly cached file ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.find_file(path, tgt_env=tgt_env, **kwargs) + return _gitfs().find_file(path, tgt_env=tgt_env, **kwargs) def init(): @@ -156,29 +149,21 @@ def init(): Initialize remotes. This is only used by the master's pre-flight checks, and is not invoked by GitFS. ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + _gitfs() def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.serve_file(load, fnd) + return _gitfs().serve_file(load, fnd) def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.file_hash(load, fnd) + return _gitfs().file_hash(load, fnd) def file_list(load): @@ -186,10 +171,7 @@ def file_list(load): Return a list of all files on the file server in a specified environment (specified as a key within the load dict). ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.file_list(load) + return _gitfs().file_list(load) def file_list_emptydirs(load): # pylint: disable=W0613 @@ -204,17 +186,11 @@ def dir_list(load): ''' Return a list of all directories on the master ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.dir_list(load) + return _gitfs().dir_list(load) def symlink_list(load): ''' Return a dict of all symlinks based on a given path in the repo ''' - gitfs = salt.utils.gitfs.GitFS(__opts__) - gitfs.init_remotes(__opts__['gitfs_remotes'], - PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - return gitfs.symlink_list(load) + return _gitfs().symlink_list(load) diff --git a/salt/grains/core.py b/salt/grains/core.py index f54e786fc4..9504d8596d 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -395,46 +395,116 @@ def _sunos_cpudata(): return grains +def _linux_memdata(): + ''' + Return the memory information for Linux-like systems + ''' + grains = {'mem_total': 0, 'swap_total': 0} + + meminfo = '/proc/meminfo' + if os.path.isfile(meminfo): + with salt.utils.files.fopen(meminfo, 'r') as ifile: + for line in ifile: + comps = line.rstrip('\n').split(':') + if not len(comps) > 1: + continue + if comps[0].strip() == 'MemTotal': + # Use floor division to force output to be an integer + grains['mem_total'] = int(comps[1].split()[0]) // 1024 + if comps[0].strip() == 'SwapTotal': + # Use floor division to force output to be an integer + grains['swap_total'] = int(comps[1].split()[0]) // 1024 + return grains + + +def _osx_memdata(): + ''' + Return the memory information for BSD-like systems + ''' + grains = {'mem_total': 0, 'swap_total': 0} + + sysctl = salt.utils.path.which('sysctl') + if sysctl: + mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl)) + swap_total = __salt__['cmd.run']('{0} -n vm.swapusage').split()[2] + if swap_total.endswith('K'): + _power = 2**10 + elif swap_total.endswith('M'): + _power = 2**20 + elif swap_total.endswith('G'): + _power = 2**30 + swap_total = swap_total[:-1] * _power + + grains['mem_total'] = int(mem) // 1024 // 1024 + grains['swap_total'] = int(swap_total) // 1024 // 1024 + return grains + + +def _bsd_memdata(osdata): + ''' + Return the memory information for BSD-like systems + ''' + grains = {'mem_total': 0, 'swap_total': 0} + + sysctl = salt.utils.path.which('sysctl') + if sysctl: + mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl)) + swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl)) + if osdata['kernel'] == 'NetBSD' and mem.startswith('-'): + mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl)) + grains['mem_total'] = int(mem) // 1024 // 1024 + grains['swap_total'] = int(swap_total) // 1024 // 1024 + return grains + + +def _sunos_memdata(): + ''' + Return the memory information for SunOS-like systems + ''' + grains = {'mem_total': 0, 'swap_total': 0} + + prtconf = '/usr/sbin/prtconf 2>/dev/null' + for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines(): + comps = line.split(' ') + if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:': + grains['mem_total'] = int(comps[2].strip()) + + swap_cmd = salt.utils.path.which('swap') + swap_total = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()[1] + grains['swap_total'] = int(swap_total) // 1024 + return grains + + +def _windows_memdata(): + ''' + Return the memory information for Windows systems + ''' + grains = {'mem_total': 0} + # get the Total Physical memory as reported by msinfo32 + tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys'] + # return memory info in gigabytes + grains['mem_total'] = int(tot_bytes / (1024 ** 2)) + return grains + + def _memdata(osdata): ''' Gather information about the system memory ''' # Provides: # mem_total + # swap_total, for supported systems. grains = {'mem_total': 0} if osdata['kernel'] == 'Linux': - meminfo = '/proc/meminfo' - - if os.path.isfile(meminfo): - with salt.utils.files.fopen(meminfo, 'r') as ifile: - for line in ifile: - comps = line.rstrip('\n').split(':') - if not len(comps) > 1: - continue - if comps[0].strip() == 'MemTotal': - # Use floor division to force output to be an integer - grains['mem_total'] = int(comps[1].split()[0]) // 1024 - elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'): - sysctl = salt.utils.path.which('sysctl') - if sysctl: - if osdata['kernel'] == 'Darwin': - mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl)) - else: - mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl)) - if osdata['kernel'] == 'NetBSD' and mem.startswith('-'): - mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl)) - grains['mem_total'] = int(mem) // 1024 // 1024 + grains.update(_linux_memdata()) + elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'): + grains.update(_bsd_memdata(osdata)) + elif osdata['kernel'] == 'Darwin': + grains.update(_osx_memdata()) elif osdata['kernel'] == 'SunOS': - prtconf = '/usr/sbin/prtconf 2>/dev/null' - for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines(): - comps = line.split(' ') - if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:': - grains['mem_total'] = int(comps[2].strip()) + grains.update(_sunos_memdata()) elif osdata['kernel'] == 'Windows' and HAS_WMI: - # get the Total Physical memory as reported by msinfo32 - tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys'] - # return memory info in gigabytes - grains['mem_total'] = int(tot_bytes / (1024 ** 2)) + grains.update(_windows_memdata()) return grains @@ -1410,7 +1480,10 @@ def os_data(): .format(' '.join(init_cmdline)) ) - # Add lsb grains on any distro with lsb-release + # Add lsb grains on any distro with lsb-release. Note that this import + # can fail on systems with lsb-release installed if the system package + # does not install the python package for the python interpreter used by + # Salt (i.e. python2 or python3) try: import lsb_release # pylint: disable=import-error release = lsb_release.get_distro_information() @@ -1459,7 +1532,13 @@ def os_data(): if 'VERSION_ID' in os_release: grains['lsb_distrib_release'] = os_release['VERSION_ID'] if 'PRETTY_NAME' in os_release: - grains['lsb_distrib_codename'] = os_release['PRETTY_NAME'] + codename = os_release['PRETTY_NAME'] + # https://github.com/saltstack/salt/issues/44108 + if os_release['ID'] == 'debian': + codename_match = re.search(r'\((\w+)\)$', codename) + if codename_match: + codename = codename_match.group(1) + grains['lsb_distrib_codename'] = codename if 'CPE_NAME' in os_release: if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']: grains['os'] = "SUSE" diff --git a/salt/log/handlers/sentry_mod.py b/salt/log/handlers/sentry_mod.py index 229ee02bb1..81625b158d 100644 --- a/salt/log/handlers/sentry_mod.py +++ b/salt/log/handlers/sentry_mod.py @@ -123,36 +123,25 @@ def setup_handlers(): url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme)) - dsn_config = {} - if (hasattr(transport_registry, 'compute_scope') and - callable(transport_registry.compute_scope)): - conf_extras = transport_registry.compute_scope(url, dsn_config) - dsn_config.update(conf_extras) - options.update({ - 'project': dsn_config['SENTRY_PROJECT'], - 'servers': dsn_config['SENTRY_SERVERS'], - 'public_key': dsn_config['SENTRY_PUBLIC_KEY'], - 'secret_key': dsn_config['SENTRY_SECRET_KEY'] - }) except ValueError as exc: log.info( 'Raven failed to parse the configuration provided ' 'DSN: {0}'.format(exc) ) - # Allow options to be overridden if previously parsed, or define them - for key in ('project', 'servers', 'public_key', 'secret_key'): - config_value = get_config_value(key) - if config_value is None and key not in options: - log.debug( - 'The required \'sentry_handler\' configuration key, ' - '\'{0}\', is not properly configured. Not configuring ' - 'the sentry logging handler.'.format(key) - ) - return - elif config_value is None: - continue - options[key] = config_value + if not dsn: + for key in ('project', 'servers', 'public_key', 'secret_key'): + config_value = get_config_value(key) + if config_value is None and key not in options: + log.debug( + 'The required \'sentry_handler\' configuration key, ' + '\'{0}\', is not properly configured. Not configuring ' + 'the sentry logging handler.'.format(key) + ) + return + elif config_value is None: + continue + options[key] = config_value # site: An optional, arbitrary string to identify this client installation. options.update({ diff --git a/salt/master.py b/salt/master.py index c79dfccca0..6fc2b3c6a3 100644 --- a/salt/master.py +++ b/salt/master.py @@ -486,11 +486,11 @@ class Master(SMaster): for repo in git_pillars: new_opts[u'ext_pillar'] = [repo] try: - git_pillar = salt.utils.gitfs.GitPillar(new_opts) - git_pillar.init_remotes( + git_pillar = salt.utils.gitfs.GitPillar( + new_opts, repo[u'git'], - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, - salt.pillar.git_pillar.PER_REMOTE_ONLY) + per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY) except FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: @@ -1840,89 +1840,52 @@ class ClearFuncs(object): clear_load.get(u'tgt_type', u'glob'), delimiter ) - minions = _res.get('minions', list()) - missing = _res.get('missing', list()) + minions = _res.get(u'minions', list()) + missing = _res.get(u'missing', list()) - # Check for external auth calls - if extra.get(u'token', False): - # Authenticate. - token = self.loadauth.authenticate_token(extra) - if not token: - return u'' - - # Get acl - auth_list = self.loadauth.get_auth_list(extra, token) - - # Authorize the request - if not self.ckminions.auth_check( - auth_list, - clear_load[u'fun'], - clear_load[u'arg'], - clear_load[u'tgt'], - clear_load.get(u'tgt_type', u'glob'), - minions=minions, - # always accept find_job - whitelist=[u'saltutil.find_job'], - ): - log.warning(u'Authentication failure of type "token" occurred.') - return u'' - clear_load[u'user'] = token[u'name'] - log.debug(u'Minion tokenized user = "%s"', clear_load[u'user']) - elif u'eauth' in extra: - # Authenticate. - if not self.loadauth.authenticate_eauth(extra): - return u'' - - # Get acl from eauth module. - auth_list = self.loadauth.get_auth_list(extra) - - # Authorize the request - if not self.ckminions.auth_check( - auth_list, - clear_load[u'fun'], - clear_load[u'arg'], - clear_load[u'tgt'], - clear_load.get(u'tgt_type', u'glob'), - minions=minions, - # always accept find_job - whitelist=[u'saltutil.find_job'], - ): - log.warning(u'Authentication failure of type "eauth" occurred.') - return u'' - clear_load[u'user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with - # Verify that the caller has root on master + # Check for external auth calls and authenticate + auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra) + if auth_type == 'user': + auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) else: - auth_ret = self.loadauth.authenticate_key(clear_load, self.key) - if auth_ret is False: + auth_check = self.loadauth.check_authentication(extra, auth_type) + + # Setup authorization list variable and error information + auth_list = auth_check.get(u'auth_list', []) + err_msg = u'Authentication failure of type "{0}" occurred.'.format(auth_type) + + if auth_check.get(u'error'): + # Authentication error occurred: do not continue. + log.warning(err_msg) + return u'' + + # All Token, Eauth, and non-root users must pass the authorization check + if auth_type != u'user' or (auth_type == u'user' and auth_list): + # Authorize the request + authorized = self.ckminions.auth_check( + auth_list, + clear_load[u'fun'], + clear_load[u'arg'], + clear_load[u'tgt'], + clear_load.get(u'tgt_type', u'glob'), + minions=minions, + # always accept find_job + whitelist=[u'saltutil.find_job'], + ) + + if not authorized: + # Authorization error occurred. Do not continue. + log.warning(err_msg) return u'' - if auth_ret is not True: - if salt.auth.AuthUser(clear_load[u'user']).is_sudo(): - if not self.opts[u'sudo_acl'] or not self.opts[u'publisher_acl']: - auth_ret = True - - if auth_ret is not True: - auth_list = salt.utils.master.get_values_of_matching_keys( - self.opts[u'publisher_acl'], - auth_ret) - if not auth_list: - log.warning( - u'Authentication failure of type "user" occurred.' - ) - return u'' - - if not self.ckminions.auth_check( - auth_list, - clear_load[u'fun'], - clear_load[u'arg'], - clear_load[u'tgt'], - clear_load.get(u'tgt_type', u'glob'), - minions=minions, - # always accept find_job - whitelist=[u'saltutil.find_job'], - ): - log.warning(u'Authentication failure of type "user" occurred.') - return u'' + # Perform some specific auth_type tasks after the authorization check + if auth_type == u'token': + username = auth_check.get(u'username') + clear_load[u'user'] = username + log.debug(u'Minion tokenized user = "%s"', username) + elif auth_type == u'eauth': + # The username we are attempting to auth with + clear_load[u'user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found diff --git a/salt/minion.py b/salt/minion.py index 9ea6c170db..dca4e1ff7c 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -2063,6 +2063,8 @@ class Minion(MinionBase): func = data.get(u'func', None) name = data.get(u'name', None) beacon_data = data.get(u'beacon_data', None) + include_pillar = data.get(u'include_pillar', None) + include_opts = data.get(u'include_opts', None) if func == u'add': self.beacons.add_beacon(name, beacon_data) @@ -2079,7 +2081,7 @@ class Minion(MinionBase): elif func == u'disable_beacon': self.beacons.disable_beacon(name) elif func == u'list': - self.beacons.list_beacons() + self.beacons.list_beacons(include_opts, include_pillar) elif func == u'list_available': self.beacons.list_available_beacons() elif func == u'validate_beacon': diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index d38bde1ac1..b79e6fa229 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -29,7 +29,6 @@ import json import yaml # pylint: disable=no-name-in-module,import-error,redefined-builtin from salt.ext import six -from salt.ext.six.moves import range from salt.ext.six.moves.urllib.error import HTTPError from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen # pylint: enable=no-name-in-module,import-error,redefined-builtin @@ -1610,7 +1609,7 @@ def _consolidate_repo_sources(sources): combined_comps = set(repo.comps).union(set(combined.comps)) consolidated[key].comps = list(combined_comps) else: - consolidated[key] = sourceslist.SourceEntry(_strip_uri(repo.line)) + consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line)) if repo.file != base_file: delete_files.add(repo.file) @@ -1718,7 +1717,7 @@ def list_repos(): repo['dist'] = source.dist repo['type'] = source.type repo['uri'] = source.uri.rstrip('/') - repo['line'] = _strip_uri(source.line.strip()) + repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip()) repo['architectures'] = getattr(source, 'architectures', []) repos.setdefault(source.uri, []).append(repo) return repos @@ -2477,18 +2476,6 @@ def file_dict(*packages): return __salt__['lowpkg.file_dict'](*packages) -def _strip_uri(repo): - ''' - Remove the trailing slash from the URI in a repo definition - ''' - splits = repo.split() - for idx in range(len(splits)): - if any(splits[idx].startswith(x) - for x in ('http://', 'https://', 'ftp://')): - splits[idx] = splits[idx].rstrip('/') - return ' '.join(splits) - - def expand_repo_def(**kwargs): ''' Take a repository definition and expand it to the full pkg repository dict @@ -2504,7 +2491,7 @@ def expand_repo_def(**kwargs): _check_apt() sanitized = {} - repo = _strip_uri(kwargs['repo']) + repo = salt.utils.pkg.deb.strip_uri(kwargs['repo']) if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'): dist = __grains__['lsb_distrib_codename'] owner_name, ppa_name = repo[4:].split('/', 1) diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 7e095ed656..efd1f5c939 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -28,12 +28,22 @@ __func_alias__ = { } -def list_(return_yaml=True): +def list_(return_yaml=True, + include_pillar=True, + include_opts=True): ''' List the beacons currently configured on the minion - :param return_yaml: Whether to return YAML formatted output, default True - :return: List of currently configured Beacons. + :param return_yaml: Whether to return YAML formatted output, + default True + + :param include_pillar: Whether to include beacons that are + configured in pillar, default is True. + + :param include_opts: Whether to include beacons that are + configured in opts, default is True. + + :return: List of currently configured Beacons. CLI Example: @@ -46,7 +56,10 @@ def list_(return_yaml=True): try: eventer = salt.utils.event.get_event('minion', opts=__opts__) - res = __salt__['event.fire']({'func': 'list'}, 'manage_beacons') + res = __salt__['event.fire']({'func': 'list', + 'include_pillar': include_pillar, + 'include_opts': include_opts}, + 'manage_beacons') if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30) log.debug('event_ret {0}'.format(event_ret)) @@ -133,6 +146,10 @@ def add(name, beacon_data, **kwargs): ret['comment'] = 'Beacon {0} is already configured.'.format(name) return ret + if name not in list_available(return_yaml=False): + ret['comment'] = 'Beacon "{0}" is not available.'.format(name) + return ret + if 'test' in kwargs and kwargs['test']: ret['result'] = True ret['comment'] = 'Beacon: {0} would be added.'.format(name) @@ -170,7 +187,10 @@ def add(name, beacon_data, **kwargs): if name in beacons and beacons[name] == beacon_data: ret['result'] = True ret['comment'] = 'Added beacon: {0}.'.format(name) - return ret + else: + ret['result'] = False + ret['comment'] = event_ret['comment'] + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon add failed.' @@ -262,7 +282,10 @@ def modify(name, beacon_data, **kwargs): if name in beacons and beacons[name] == beacon_data: ret['result'] = True ret['comment'] = 'Modified beacon: {0}.'.format(name) - return ret + else: + ret['result'] = False + ret['comment'] = event_ret['comment'] + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon add failed.' @@ -299,12 +322,14 @@ def delete(name, **kwargs): if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_delete_complete', wait=30) if event_ret and event_ret['complete']: - log.debug('== event_ret {} =='.format(event_ret)) beacons = event_ret['beacons'] if name not in beacons: ret['result'] = True ret['comment'] = 'Deleted beacon: {0}.'.format(name) return ret + else: + ret['result'] = False + ret['comment'] = event_ret['comment'] except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon add failed.' @@ -327,7 +352,7 @@ def save(): ret = {'comment': [], 'result': True} - beacons = list_(return_yaml=False) + beacons = list_(return_yaml=False, include_pillar=False) # move this file into an configurable opt sfn = '{0}/{1}/beacons.conf'.format(__opts__['config_dir'], @@ -380,7 +405,7 @@ def enable(**kwargs): else: ret['result'] = False ret['comment'] = 'Failed to enable beacons on minion.' - return ret + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacons enable job failed.' @@ -420,7 +445,7 @@ def disable(**kwargs): else: ret['result'] = False ret['comment'] = 'Failed to disable beacons on minion.' - return ret + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacons enable job failed.' @@ -483,7 +508,10 @@ def enable_beacon(name, **kwargs): else: ret['result'] = False ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name) - return ret + else: + ret['result'] = False + ret['comment'] = event_ret['comment'] + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon enable job failed.' @@ -536,7 +564,10 @@ def disable_beacon(name, **kwargs): else: ret['result'] = False ret['comment'] = 'Failed to disable beacon on minion.' - return ret + else: + ret['result'] = False + ret['comment'] = event_ret['comment'] + return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Beacon disable job failed.' diff --git a/salt/modules/boto_asg.py b/salt/modules/boto_asg.py index 294c00b8f9..2a6d8f4122 100644 --- a/salt/modules/boto_asg.py +++ b/salt/modules/boto_asg.py @@ -51,6 +51,7 @@ import datetime import logging import json import sys +import time import email.mime.multipart log = logging.getLogger(__name__) @@ -677,11 +678,23 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None, salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - policies = conn.get_all_policies(as_group=as_group) - for policy in policies: - if policy.name == scaling_policy_name: - return policy.policy_arn - log.error('Could not convert: {0}'.format(as_group)) + retries = 30 + while retries > 0: + retries -= 1 + try: + policies = conn.get_all_policies(as_group=as_group) + for policy in policies: + if policy.name == scaling_policy_name: + return policy.policy_arn + log.error('Could not convert: {0}'.format(as_group)) + return None + except boto.exception.BotoServerError as e: + if e.error_code != 'Throttling': + raise + log.debug('Throttled by API, will retry in 5 seconds') + time.sleep(5) + + log.error('Maximum number of retries exceeded') return None @@ -763,11 +776,18 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy", # get full instance info, so that we can return the attribute instances = ec2_conn.get_only_instances(instance_ids=instance_ids) if attributes: - return [[getattr(instance, attr).encode("ascii") for attr in attributes] for instance in instances] + return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances] else: # properly handle case when not all instances have the requested attribute - return [getattr(instance, attribute).encode("ascii") for instance in instances if getattr(instance, attribute)] - return [getattr(instance, attribute).encode("ascii") for instance in instances] + return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)] + + +def _convert_attribute(instance, attribute): + if attribute == "tags": + tags = dict(getattr(instance, attribute)) + return {key.encode("utf-8"): value.encode("utf-8") for key, value in six.iteritems(tags)} + + return getattr(instance, attribute).encode("ascii") def enter_standby(name, instance_ids, should_decrement_desired_capacity=False, diff --git a/salt/modules/boto_ec2.py b/salt/modules/boto_ec2.py index 376ec7fc0d..619c219e0d 100644 --- a/salt/modules/boto_ec2.py +++ b/salt/modules/boto_ec2.py @@ -154,7 +154,7 @@ def get_unassociated_eip_address(domain='standard', region=None, key=None, Return the first unassociated EIP domain - Indicates whether the address is a EC2 address or a VPC address + Indicates whether the address is an EC2 address or a VPC address (standard|vpc). CLI Example: @@ -771,9 +771,9 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None, def exists(instance_id=None, name=None, tags=None, region=None, key=None, keyid=None, profile=None, in_states=None, filters=None): ''' - Given a instance id, check to see if the given instance id exists. + Given an instance id, check to see if the given instance id exists. - Returns True if the given an instance with the given id, name, or tags + Returns True if the given instance with the given id, name, or tags exists; otherwise, False is returned. CLI Example: diff --git a/salt/modules/boto_elasticache.py b/salt/modules/boto_elasticache.py index 21de556b4c..d977e8b739 100644 --- a/salt/modules/boto_elasticache.py +++ b/salt/modules/boto_elasticache.py @@ -75,7 +75,7 @@ def __virtual__(): Only load if boto libraries exist. ''' if not HAS_BOTO: - return (False, 'The modle boto_elasticache could not be loaded: boto libraries not found') + return (False, 'The model boto_elasticache could not be loaded: boto libraries not found') __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return True diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index 6f6bb4c6e9..4df635ab2d 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -661,22 +661,29 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elb.get_health_check myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + retries = 30 - try: - lb = conn.get_all_load_balancers(load_balancer_names=[name]) - lb = lb[0] - ret = odict.OrderedDict() - hc = lb.health_check - ret['interval'] = hc.interval - ret['target'] = hc.target - ret['healthy_threshold'] = hc.healthy_threshold - ret['timeout'] = hc.timeout - ret['unhealthy_threshold'] = hc.unhealthy_threshold - return ret - except boto.exception.BotoServerError as error: - log.debug(error) - log.error('ELB {0} does not exist: {1}'.format(name, error)) - return {} + while True: + try: + lb = conn.get_all_load_balancers(load_balancer_names=[name]) + lb = lb[0] + ret = odict.OrderedDict() + hc = lb.health_check + ret['interval'] = hc.interval + ret['target'] = hc.target + ret['healthy_threshold'] = hc.healthy_threshold + ret['timeout'] = hc.timeout + ret['unhealthy_threshold'] = hc.unhealthy_threshold + return ret + except boto.exception.BotoServerError as e: + if retries and e.code == 'Throttling': + log.debug('Throttled by AWS API, will retry in 5 seconds.') + time.sleep(5) + retries -= 1 + continue + log.error(error) + log.error('ELB {0} not found.'.format(name)) + return {} def set_health_check(name, health_check, region=None, key=None, keyid=None, @@ -691,16 +698,23 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None, salt myminion boto_elb.set_health_check myelb '{"target": "HTTP:80/"}' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + retries = 30 hc = HealthCheck(**health_check) - try: - conn.configure_health_check(name, hc) - log.info('Configured health check on ELB {0}'.format(name)) - except boto.exception.BotoServerError as error: - log.debug(error) - log.info('Failed to configure health check on ELB {0}: {1}'.format(name, error)) - return False - return True + while True: + try: + conn.configure_health_check(name, hc) + log.info('Configured health check on ELB {0}'.format(name)) + return True + except boto.exception.BotoServerError as error: + if retries and e.code == 'Throttling': + log.debug('Throttled by AWS API, will retry in 5 seconds.') + time.sleep(5) + retries -= 1 + continue + log.error(error) + log.error('Failed to configure health check on ELB {0}'.format(name)) + return False def register_instances(name, instances, region=None, key=None, keyid=None, diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py index 8c907d9479..babc6829a9 100644 --- a/salt/modules/boto_vpc.py +++ b/salt/modules/boto_vpc.py @@ -763,7 +763,7 @@ def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None, ''' Describe all VPCs, matching the filter criteria if provided. - Returns a a list of dictionaries with interesting properties. + Returns a list of dictionaries with interesting properties. .. versionadded:: 2015.8.0 diff --git a/salt/modules/cassandra_cql.py b/salt/modules/cassandra_cql.py index b377a49556..afa2030d98 100644 --- a/salt/modules/cassandra_cql.py +++ b/salt/modules/cassandra_cql.py @@ -219,7 +219,7 @@ def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None, # TODO: Call cluster.shutdown() when the module is unloaded on # master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown() # do nothing to allow loaded modules to gracefully handle resources stored - # in __context__ (i.e. connection pools). This means that the the connection + # in __context__ (i.e. connection pools). This means that the connection # pool is orphaned and Salt relies on Cassandra to reclaim connections. # Perhaps if Master/Minion daemons could be enhanced to call an "__unload__" # function, or something similar for each loaded module, connection pools @@ -430,7 +430,7 @@ def cql_query_with_prepare(query, statement_name, statement_arguments, async=Fal values[key] = value ret.append(values) - # If this was a synchronous call, then we either have a empty list + # If this was a synchronous call, then we either have an empty list # because there was no return, or we have a return # If this was an async call we only return the empty list return ret diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index fbc8ad1fbf..3ae58905c3 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -269,6 +269,7 @@ def _run(cmd, python_shell=False, env=None, clean_env=False, + prepend_path=None, rstrip=True, template=None, umask=None, @@ -492,6 +493,9 @@ def _run(cmd, run_env = os.environ.copy() run_env.update(env) + if prepend_path: + run_env['PATH'] = ':'.join((prepend_path, run_env['PATH'])) + if python_shell is None: python_shell = False @@ -782,6 +786,7 @@ def run(cmd, password=None, encoded_cmd=False, raise_err=False, + prepend_path=None, **kwargs): r''' Execute the passed command and return the output as a string @@ -864,6 +869,11 @@ def run(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -949,6 +959,7 @@ def run(cmd, stderr=subprocess.STDOUT, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, rstrip=rstrip, umask=umask, @@ -1004,6 +1015,7 @@ def shell(cmd, use_vt=False, bg=False, password=None, + prepend_path=None, **kwargs): ''' Execute the passed command and return the output as a string. @@ -1079,6 +1091,11 @@ def shell(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -1157,6 +1174,7 @@ def shell(cmd, shell=shell, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, rstrip=rstrip, umask=umask, @@ -1193,6 +1211,7 @@ def run_stdout(cmd, saltenv='base', use_vt=False, password=None, + prepend_path=None, **kwargs): ''' Execute a command, and only return the standard out @@ -1265,6 +1284,11 @@ def run_stdout(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -1319,6 +1343,7 @@ def run_stdout(cmd, python_shell=python_shell, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, rstrip=rstrip, umask=umask, @@ -1374,6 +1399,7 @@ def run_stderr(cmd, saltenv='base', use_vt=False, password=None, + prepend_path=None, **kwargs): ''' Execute a command and only return the standard error @@ -1447,6 +1473,11 @@ def run_stderr(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -1501,6 +1532,7 @@ def run_stderr(cmd, python_shell=python_shell, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, rstrip=rstrip, umask=umask, @@ -1558,6 +1590,7 @@ def run_all(cmd, redirect_stderr=False, password=None, encoded_cmd=False, + prepend_path=None, **kwargs): ''' Execute the passed command and return a dict of return data @@ -1631,6 +1664,11 @@ def run_all(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -1709,6 +1747,7 @@ def run_all(cmd, python_shell=python_shell, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, rstrip=rstrip, umask=umask, @@ -2573,7 +2612,7 @@ def run_chroot(root, - env: - PATH: {{ [current_path, '/my/special/bin']|join(':') }} - clean_env: + clean_env: Attempt to clean out all other shell environment variables and set only those provided in the 'env' argument to this function. @@ -2773,8 +2812,8 @@ def shell_info(shell, list_modules=False): ''' regex_shells = { 'bash': [r'version (\d\S*)', 'bash', '--version'], - 'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test a error result - 'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test a error result + 'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test an error result + 'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test an error result 'zsh': [r'^zsh (\d\S*)', 'zsh', '--version'], 'tcsh': [r'^tcsh (\d\S*)', 'tcsh', '--version'], 'cmd': [r'Version ([\d.]+)', 'cmd.exe', '/C', 'ver'], @@ -3467,6 +3506,7 @@ def run_bg(cmd, ignore_retcode=False, saltenv='base', password=None, + prepend_path=None, **kwargs): r''' .. versionadded: 2016.3.0 @@ -3545,6 +3585,11 @@ def run_bg(cmd, variables and set only those provided in the 'env' argument to this function. + :param str prepend_path: $PATH segment to prepend (trailing ':' not necessary) + to $PATH + + .. versionadded:: Oxygen + :param str template: If this setting is applied then the named templating engine will be used to render the downloaded file. Currently jinja, mako, and wempy are supported @@ -3613,6 +3658,7 @@ def run_bg(cmd, cwd=cwd, env=env, clean_env=clean_env, + prepend_path=prepend_path, template=template, umask=umask, log_callback=log_callback, diff --git a/salt/modules/consul.py b/salt/modules/consul.py index 68a4bd3288..a6592cf1c8 100644 --- a/salt/modules/consul.py +++ b/salt/modules/consul.py @@ -1953,7 +1953,7 @@ def status_peers(consul_url): :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the - datacenter in which the the agent is running. + datacenter in which the agent is running. CLI Example: diff --git a/salt/modules/debbuild.py b/salt/modules/debbuild.py index 37c7da1123..84069d2912 100644 --- a/salt/modules/debbuild.py +++ b/salt/modules/debbuild.py @@ -48,7 +48,7 @@ __virtualname__ = 'pkgbuild' def __virtual__(): ''' - Confirm this module is on a Debian based system, and has required utilities + Confirm this module is on a Debian-based system, and has required utilities ''' if __grains__.get('os_family', False) in ('Kali', 'Debian'): missing_util = False @@ -726,7 +726,7 @@ def make_repo(repodir, if times_looped > number_retries: raise SaltInvocationError( - 'Attemping to sign file {0} failed, timed out after {1} seconds' + 'Attempting to sign file {0} failed, timed out after {1} seconds' .format(abs_file, int(times_looped * interval)) ) time.sleep(interval) @@ -770,7 +770,7 @@ def make_repo(repodir, if times_looped > number_retries: raise SaltInvocationError( - 'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped) + 'Attempting to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped) ) time.sleep(interval) diff --git a/salt/modules/debian_ip.py b/salt/modules/debian_ip.py index 6b5c760c92..2a2c6fbfc4 100644 --- a/salt/modules/debian_ip.py +++ b/salt/modules/debian_ip.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -The networking module for Debian based distros +The networking module for Debian-based distros References: @@ -46,7 +46,7 @@ __virtualname__ = 'ip' def __virtual__(): ''' - Confine this module to Debian based distros + Confine this module to Debian-based distros ''' if __grains__['os_family'] == 'Debian': return __virtualname__ @@ -1389,7 +1389,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface): for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds', 'down_cmds', 'pre_down_cmds', 'post_down_cmds']: if opt in opts: - iface_data['inet'][opt] = opts[opt] + iface_data[def_addrfam][opt] = opts[opt] for addrfam in ['inet', 'inet6']: if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam: @@ -1562,7 +1562,7 @@ def _read_temp_ifaces(iface, data): return '' ifcfg = template.render({'name': iface, 'data': data}) - # Return as a array so the difflib works + # Return as an array so the difflib works return [item + '\n' for item in ifcfg.split('\n')] @@ -1616,7 +1616,7 @@ def _write_file_ifaces(iface, data, **settings): else: fout.write(ifcfg) - # Return as a array so the difflib works + # Return as an array so the difflib works return saved_ifcfg.split('\n') @@ -1646,7 +1646,7 @@ def _write_file_ppp_ifaces(iface, data): with salt.utils.files.fopen(filename, 'w') as fout: fout.write(ifcfg) - # Return as a array so the difflib works + # Return as an array so the difflib works return filename diff --git a/salt/modules/dockercompose.py b/salt/modules/dockercompose.py index a73321e9cf..345f572359 100644 --- a/salt/modules/dockercompose.py +++ b/salt/modules/dockercompose.py @@ -686,7 +686,7 @@ def ps(path): def up(path, service_names=None): ''' - Create and start containers defined in the the docker-compose.yml file + Create and start containers defined in the docker-compose.yml file located in path, service_names is a python list, if omitted create and start all containers diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index 6249988e27..dadb3c8c17 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -915,8 +915,8 @@ def compare_container(first, second, ignore=None): ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} else: if item == 'Links': - val1 = _scrub_links(val1, first) - val2 = _scrub_links(val2, second) + val1 = sorted(_scrub_links(val1, first)) + val2 = sorted(_scrub_links(val2, second)) if val1 != val2: ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} # Check for optionally-present items that were in the second container @@ -938,8 +938,8 @@ def compare_container(first, second, ignore=None): ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} else: if item == 'Links': - val1 = _scrub_links(val1, first) - val2 = _scrub_links(val2, second) + val1 = sorted(_scrub_links(val1, first)) + val2 = sorted(_scrub_links(val2, second)) if val1 != val2: ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} return ret diff --git a/salt/modules/drac.py b/salt/modules/drac.py index 51d44cab75..24919c75c1 100644 --- a/salt/modules/drac.py +++ b/salt/modules/drac.py @@ -465,5 +465,5 @@ def server_pxe(): log.warning('failed to set boot order') return False - log.warning('failed to to configure PXE boot') + log.warning('failed to configure PXE boot') return False diff --git a/salt/modules/dracr.py b/salt/modules/dracr.py index d42ae4b83d..38f722a423 100644 --- a/salt/modules/dracr.py +++ b/salt/modules/dracr.py @@ -923,7 +923,7 @@ def server_pxe(host=None, log.warning('failed to set boot order') return False - log.warning('failed to to configure PXE boot') + log.warning('failed to configure PXE boot') return False diff --git a/salt/modules/environ.py b/salt/modules/environ.py index 26972f4ca9..e06a8aecaa 100644 --- a/salt/modules/environ.py +++ b/salt/modules/environ.py @@ -45,7 +45,7 @@ def setval(key, val, false_unsets=False, permanent=False): permanent On Windows minions this will set the environment variable in the - registry so that it is always added as a environment variable when + registry so that it is always added as an environment variable when applications open. If you want to set the variable to HKLM instead of HKCU just pass in "HKLM" for this parameter. On all other minion types this will be ignored. Note: This will only take affect on applications @@ -144,7 +144,7 @@ def setenv(environ, false_unsets=False, clear_all=False, update_minion=False, pe permanent On Windows minions this will set the environment variable in the - registry so that it is always added as a environment variable when + registry so that it is always added as an environment variable when applications open. If you want to set the variable to HKLM instead of HKCU just pass in "HKLM" for this parameter. On all other minion types this will be ignored. Note: This will only take affect on applications diff --git a/salt/modules/file.py b/salt/modules/file.py index eb8327a187..17d0e253d8 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -4700,6 +4700,7 @@ def check_file_meta( contents File contents ''' + lsattr_cmd = salt.utils.path.which('lsattr') changes = {} if not source_sum: source_sum = {} @@ -4764,13 +4765,14 @@ def check_file_meta( if mode is not None and mode != smode: changes['mode'] = mode - diff_attrs = _cmp_attrs(name, attrs) - if ( - attrs is not None and - diff_attrs[0] is not None or - diff_attrs[1] is not None - ): - changes['attrs'] = attrs + if lsattr_cmd: + diff_attrs = _cmp_attrs(name, attrs) + if ( + attrs is not None and + diff_attrs[0] is not None or + diff_attrs[1] is not None + ): + changes['attrs'] = attrs return changes diff --git a/salt/modules/freebsdports.py b/salt/modules/freebsdports.py index daa3b6fd9a..5779a8a8dc 100644 --- a/salt/modules/freebsdports.py +++ b/salt/modules/freebsdports.py @@ -6,7 +6,7 @@ Install software from the FreeBSD ``ports(7)`` system This module allows you to install ports using ``BATCH=yes`` to bypass configuration prompts. It is recommended to use the :mod:`ports state -` to install ports, but it it also possible to use +` to install ports, but it is also possible to use this module exclusively from the command line. .. code-block:: bash diff --git a/salt/modules/gcp_addon.py b/salt/modules/gcp_addon.py new file mode 100644 index 0000000000..adba567f65 --- /dev/null +++ b/salt/modules/gcp_addon.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +''' +A route is a rule that specifies how certain packets should be handled by the +virtual network. Routes are associated with virtual machine instances by tag, +and the set of routes for a particular VM is called its routing table. +For each packet leaving a virtual machine, the system searches that machine's +routing table for a single best matching route. + +This module will create a route to send traffic destined to the Internet +through your gateway instance. + +:codeauthor: :email:`Pratik Bandarkar ` +:maturity: new +:depends: google-api-python-client +:platform: Linux +''' +from __future__ import absolute_import +import logging +log = logging.getLogger(__name__) + +try: + import googleapiclient.discovery + import oauth2client.service_account + HAS_LIB = True +except ImportError: + HAS_LIB = False + +__virtualname__ = 'gcp' + + +def __virtual__(): + ''' + Check for googleapiclient api + ''' + if HAS_LIB is False: + log.info("Required google API's(googleapiclient, oauth2client) not found") + return (HAS_LIB, "Required google API's(googleapiclient, oauth2client) not found") + + +def _get_network(project_id, network_name, service): + ''' + Fetch network selfLink from network name. + ''' + return service.networks().get(project=project_id, + network=network_name).execute() + + +def _get_instance(project_id, instance_zone, name, service): + ''' + Get instance details + ''' + return service.instances().get(project=project_id, + zone=instance_zone, + instance=name).execute() + + +def route_create(credential_file=None, + project_id=None, + name=None, + dest_range=None, + next_hop_instance=None, + instance_zone=None, + tags=None, + network=None, + priority=None + ): + ''' + Create a route to send traffic destined to the Internet through your + gateway instance + + credential_file : string + File location of application default credential. For more information, + refer: https://developers.google.com/identity/protocols/application-default-credentials + project_id : string + Project ID where instance and network resides. + name : string + name of the route to create + next_hop_instance : string + the name of an instance that should handle traffic matching this route. + instance_zone : string + zone where instance("next_hop_instance") resides + network : string + Specifies the network to which the route will be applied. + dest_range : string + The destination range of outgoing packets that the route will apply to. + tags : list + (optional) Identifies the set of instances that this route will apply to. + priority : int + (optional) Specifies the priority of this route relative to other routes. + default=1000 + + CLI Example: + + salt 'salt-master.novalocal' gcp.route_create + credential_file=/root/secret_key.json + project_id=cp100-170315 + name=derby-db-route1 + next_hop_instance=instance-1 + instance_zone=us-central1-a + network=default + dest_range=0.0.0.0/0 + tags=['no-ip'] + priority=700 + + In above example, the instances which are having tag "no-ip" will route the + packet to instance "instance-1"(if packet is intended to other network) + ''' + + credentials = oauth2client.service_account.ServiceAccountCredentials.\ + from_json_keyfile_name(credential_file) + service = googleapiclient.discovery.build('compute', 'v1', + credentials=credentials) + routes = service.routes() + + routes_config = { + 'name': str(name), + 'network': _get_network(project_id, str(network), + service=service)['selfLink'], + 'destRange': str(dest_range), + 'nextHopInstance': _get_instance(project_id, instance_zone, + next_hop_instance, + service=service)['selfLink'], + 'tags': tags, + 'priority': priority + } + route_create_request = routes.insert(project=project_id, + body=routes_config) + return route_create_request.execute() diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index ab0eed5138..9af3e2cbc5 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -306,7 +306,7 @@ def _bootstrap_yum( root The root of the image to install to. Will be created as a directory if - if does not exist. (e.x.: /root/arch) + it does not exist. (e.x.: /root/arch) pkg_confs The location of the conf files to copy into the image, to point yum @@ -374,7 +374,7 @@ def _bootstrap_deb( root The root of the image to install to. Will be created as a directory if - if does not exist. (e.x.: /root/wheezy) + it does not exist. (e.x.: /root/wheezy) arch Architecture of the target image. (e.x.: amd64) @@ -472,7 +472,7 @@ def _bootstrap_pacman( root The root of the image to install to. Will be created as a directory if - if does not exist. (e.x.: /root/arch) + it does not exist. (e.x.: /root/arch) pkg_confs The location of the conf files to copy into the image, to point pacman @@ -480,7 +480,7 @@ def _bootstrap_pacman( img_format The image format to be used. The ``dir`` type needs no special - treatment, but others need special treatement. + treatment, but others need special treatment. pkgs A list of packages to be installed on this image. For Arch Linux, this diff --git a/salt/modules/keystone.py b/salt/modules/keystone.py index ecd061deaf..a82a5b46b7 100644 --- a/salt/modules/keystone.py +++ b/salt/modules/keystone.py @@ -65,6 +65,9 @@ try: import keystoneclient.exceptions HAS_KEYSTONE = True from keystoneclient.v3 import client as client3 + from keystoneclient import discover + from keystoneauth1 import session + from keystoneauth1.identity import generic # pylint: enable=import-error except ImportError: pass @@ -111,7 +114,8 @@ def _get_kwargs(profile=None, **connection_args): insecure = get('insecure', False) token = get('token') endpoint = get('endpoint', 'http://127.0.0.1:35357/v2.0') - + user_domain_name = get('user_domain_name', 'Default') + project_domain_name = get('project_domain_name', 'Default') if token: kwargs = {'token': token, 'endpoint': endpoint} @@ -120,7 +124,9 @@ def _get_kwargs(profile=None, **connection_args): 'password': password, 'tenant_name': tenant, 'tenant_id': tenant_id, - 'auth_url': auth_url} + 'auth_url': auth_url, + 'user_domain_name': user_domain_name, + 'project_domain_name': project_domain_name} # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined if insecure: @@ -159,14 +165,23 @@ def auth(profile=None, **connection_args): ''' kwargs = _get_kwargs(profile=profile, **connection_args) - if float(api_version(profile=profile, **connection_args).strip('v')) >= 3: + disc = discover.Discover(auth_url=kwargs['auth_url']) + v2_auth_url = disc.url_for('v2.0') + v3_auth_url = disc.url_for('v3.0') + if v3_auth_url: global _OS_IDENTITY_API_VERSION global _TENANTS _OS_IDENTITY_API_VERSION = 3 _TENANTS = 'projects' - return client3.Client(**kwargs) + kwargs['auth_url'] = v3_auth_url else: - return client.Client(**kwargs) + kwargs['auth_url'] = v2_auth_url + kwargs.pop('user_domain_name') + kwargs.pop('project_domain_name') + auth = generic.Password(**kwargs) + sess = session.Session(auth=auth) + ks_cl = disc.create_client(session=sess) + return ks_cl def ec2_credentials_create(user_id=None, name=None, diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index 4a31724663..0428177951 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -155,6 +155,7 @@ def _config_logic(napalm_device, loaded_result['diff'] = None loaded_result['result'] = False loaded_result['comment'] = _compare.get('comment') + __context__['retcode'] = 1 return loaded_result _loaded_res = loaded_result.get('result', False) @@ -174,12 +175,15 @@ def _config_logic(napalm_device, # make sure it notifies # that something went wrong _explicit_close(napalm_device) + __context__['retcode'] = 1 return loaded_result loaded_result['comment'] += 'Configuration discarded.' # loaded_result['result'] = False not necessary # as the result can be true when test=True _explicit_close(napalm_device) + if not loaded_result['result']: + __context__['retcode'] = 1 return loaded_result if not test and commit_config: @@ -210,10 +214,13 @@ def _config_logic(napalm_device, loaded_result['result'] = False # notify if anything goes wrong _explicit_close(napalm_device) + __context__['retcode'] = 1 return loaded_result loaded_result['already_configured'] = True loaded_result['comment'] = 'Already configured.' _explicit_close(napalm_device) + if not loaded_result['result']: + __context__['retcode'] = 1 return loaded_result diff --git a/salt/modules/opkg.py b/salt/modules/opkg.py index 04fa9abfa7..069ec49606 100644 --- a/salt/modules/opkg.py +++ b/salt/modules/opkg.py @@ -132,7 +132,7 @@ def version(*names, **kwargs): return __salt__['pkg_resource.version'](*names, **kwargs) -def refresh_db(failhard=False): +def refresh_db(failhard=False, **kwargs): # pylint: disable=unused-argument ''' Updates the opkg database to latest packages based upon repositories @@ -514,7 +514,7 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument return remove(name=name, pkgs=pkgs) -def upgrade(refresh=True): +def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument ''' Upgrades all packages via ``opkg upgrade`` @@ -803,7 +803,7 @@ def list_pkgs(versions_as_list=False, **kwargs): return ret -def list_upgrades(refresh=True): +def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument ''' List all available package upgrades. @@ -976,7 +976,7 @@ def info_installed(*names, **kwargs): return ret -def upgrade_available(name): +def upgrade_available(name, **kwargs): # pylint: disable=unused-argument ''' Check whether or not an upgrade is available for a given package @@ -989,7 +989,7 @@ def upgrade_available(name): return latest_version(name) != '' -def version_cmp(pkg1, pkg2, ignore_epoch=False): +def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument ''' Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem @@ -1038,7 +1038,7 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False): return None -def list_repos(): +def list_repos(**kwargs): # pylint: disable=unused-argument ''' Lists all repos on /etc/opkg/*.conf @@ -1075,7 +1075,7 @@ def list_repos(): return repos -def get_repo(alias): +def get_repo(alias, **kwargs): # pylint: disable=unused-argument ''' Display a repo from the /etc/opkg/*.conf @@ -1146,7 +1146,7 @@ def _mod_repo_in_file(alias, repostr, filepath): fhandle.writelines(output) -def del_repo(alias): +def del_repo(alias, **kwargs): # pylint: disable=unused-argument ''' Delete a repo from /etc/opkg/*.conf @@ -1260,7 +1260,7 @@ def mod_repo(alias, **kwargs): refresh_db() -def file_list(*packages): +def file_list(*packages, **kwargs): # pylint: disable=unused-argument ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not @@ -1281,7 +1281,7 @@ def file_list(*packages): return {'errors': output['errors'], 'files': files} -def file_dict(*packages): +def file_dict(*packages, **kwargs): # pylint: disable=unused-argument ''' List the files that belong to a package, grouped by package. Not specifying any packages will return a list of _every_ file on the system's @@ -1323,7 +1323,7 @@ def file_dict(*packages): return {'errors': errors, 'packages': ret} -def owner(*paths): +def owner(*paths, **kwargs): # pylint: disable=unused-argument ''' Return the name of the package that owns the file. Multiple file paths can be passed. Like :mod:`pkg.version {0}'.format(filter)} + 'cmd': '{0}'.format(mask)} else: query = {'type': 'op', 'cmd': ''} @@ -1097,6 +1218,7 @@ def get_system_state(filter=None): def get_uncommitted_changes(): ''' Retrieve a list of all uncommitted changes on the device. + Requires PANOS version 8.0.0 or greater. CLI Example: @@ -1105,6 +1227,10 @@ def get_uncommitted_changes(): salt '*' panos.get_uncommitted_changes ''' + _required_version = '8.0.0' + if not __proxy__['panos.is_required_version'](_required_version): + return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version) + query = {'type': 'op', 'cmd': ''} @@ -1145,6 +1271,72 @@ def get_vlans(): return __proxy__['panos.call'](query) +def get_xpath(xpath=''): + ''' + Retrieve a specified xpath from the candidate configuration. + + xpath(str): The specified xpath in the candidate configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_xpath /config/shared/service + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': xpath} + + return __proxy__['panos.call'](query) + + +def get_zone(zone='', vsys='1'): + ''' + Get the candidate configuration for the specified zone. + + zone(str): The name of the zone. + + vsys(str): The string representation of the VSYS ID. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_zone trust + salt '*' panos.get_zone trust 2 + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/' + 'zone/entry[@name=\'{1}\']'.format(vsys, zone)} + + return __proxy__['panos.call'](query) + + +def get_zones(vsys='1'): + ''' + Get all the zones in the candidate configuration. + + vsys(str): The string representation of the VSYS ID. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_zones + salt '*' panos.get_zones 2 + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/' + 'zone'.format(vsys)} + + return __proxy__['panos.call'](query) + + def install_antivirus(version=None, latest=False, synch=False, skip_commit=False,): ''' Install anti-virus packages. diff --git a/salt/modules/parted.py b/salt/modules/parted.py index 72a6d3ce65..d2351e6683 100644 --- a/salt/modules/parted.py +++ b/salt/modules/parted.py @@ -93,7 +93,7 @@ def _validate_partition_boundary(boundary): ''' try: for unit in VALID_UNITS: - if boundary.endswith(unit): + if str(boundary).endswith(unit): return int(boundary) except Exception: diff --git a/salt/modules/redismod.py b/salt/modules/redismod.py index a95e1b9f3f..40ebbdc3a1 100644 --- a/salt/modules/redismod.py +++ b/salt/modules/redismod.py @@ -9,7 +9,7 @@ Module to provide redis functionality to Salt .. code-block:: yaml - redis.host: 'localhost' + redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None diff --git a/salt/modules/sensehat.py b/salt/modules/sensehat.py index 2849f5374f..1f09acb2e9 100644 --- a/salt/modules/sensehat.py +++ b/salt/modules/sensehat.py @@ -2,6 +2,8 @@ ''' Module for controlling the LED matrix or reading environment data on the SenseHat of a Raspberry Pi. +.. versionadded:: 2017.7.0 + :maintainer: Benedikt Werner <1benediktwerner@gmail.com>, Joachim Werner :maturity: new :depends: sense_hat Python module diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index a17fac5217..80b4917ba5 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -24,6 +24,7 @@ import salt.utils.decorators.path import salt.utils.files import salt.utils.path import salt.utils.platform +import salt.utils.versions from salt.exceptions import ( SaltInvocationError, CommandExecutionError, @@ -794,6 +795,22 @@ def set_auth_key( return 'new' +def _get_matched_host_line_numbers(lines, enc): + ''' + Helper function which parses ssh-keygen -F function output and yield line + number of known_hosts entries with encryption key type matching enc, + one by one. + ''' + enc = enc if enc else "rsa" + for i, line in enumerate(lines): + if i % 2 == 0: + line_no = int(line.strip().split()[-1]) + line_enc = lines[i + 1].strip().split()[-2] + if line_enc != enc: + continue + yield line_no + + def _parse_openssh_output(lines, fingerprint_hash_type=None): ''' Helper function which parses ssh-keygen -F and ssh-keyscan function output @@ -830,12 +847,42 @@ def get_known_host(user, Return information about known host from the configfile, if any. If there is no such key, return None. + .. deprecated:: Oxygen + CLI Example: .. code-block:: bash salt '*' ssh.get_known_host ''' + salt.utils.versions.warn_until( + 'Neon', + '\'get_known_host\' has been deprecated in favour of ' + '\'get_known_host_entries\'. \'get_known_host\' will be ' + 'removed in Salt Neon.' + ) + known_hosts = get_known_host_entries(user, hostname, config, port, fingerprint_hash_type) + return known_hosts[0] if known_hosts else None + + +@salt.utils.decorators.path.which('ssh-keygen') +def get_known_host_entries(user, + hostname, + config=None, + port=None, + fingerprint_hash_type=None): + ''' + .. versionadded:: Oxygen + + Return information about known host entries from the configfile, if any. + If there are no entries for a matching hostname, return None. + + CLI Example: + + .. code-block:: bash + + salt '*' ssh.get_known_host_entries + ''' full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): @@ -846,11 +893,11 @@ def get_known_host(user, lines = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False).splitlines() - known_hosts = list( + known_host_entries = list( _parse_openssh_output(lines, fingerprint_hash_type=fingerprint_hash_type) ) - return known_hosts[0] if known_hosts else None + return known_host_entries if known_host_entries else None @salt.utils.decorators.path.which('ssh-keyscan') @@ -863,6 +910,8 @@ def recv_known_host(hostname, ''' Retrieve information about host public key from remote server + .. deprecated:: Oxygen + hostname The name of the remote host (e.g. "github.com") @@ -871,9 +920,8 @@ def recv_known_host(hostname, or ssh-dss port - optional parameter, denoting the port of the remote host, which will be - used in case, if the public key will be requested from it. By default - the port 22 is used. + Optional parameter, denoting the port of the remote host on which an + SSH daemon is running. By default the port 22 is used. hash_known_hosts : True Hash all hostnames and addresses in the known hosts file. @@ -887,8 +935,8 @@ def recv_known_host(hostname, .. versionadded:: 2016.3.0 fingerprint_hash_type - The public key fingerprint hash type that the public key fingerprint - was originally hashed with. This defaults to ``sha256`` if not specified. + The fingerprint hash type that the public key fingerprints were + originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.4 .. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256`` @@ -899,6 +947,61 @@ def recv_known_host(hostname, salt '*' ssh.recv_known_host enc= port= ''' + salt.utils.versions.warn_until( + 'Neon', + '\'recv_known_host\' has been deprecated in favour of ' + '\'recv_known_host_entries\'. \'recv_known_host\' will be ' + 'removed in Salt Neon.' + ) + known_hosts = recv_known_host_entries(hostname, enc, port, hash_known_hosts, timeout, fingerprint_hash_type) + return known_hosts[0] if known_hosts else None + + +@salt.utils.decorators.path.which('ssh-keyscan') +def recv_known_host_entries(hostname, + enc=None, + port=None, + hash_known_hosts=True, + timeout=5, + fingerprint_hash_type=None): + ''' + .. versionadded:: Oxygen + + Retrieve information about host public keys from remote server + + hostname + The name of the remote host (e.g. "github.com") + + enc + Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa + or ssh-dss + + port + Optional parameter, denoting the port of the remote host on which an + SSH daemon is running. By default the port 22 is used. + + hash_known_hosts : True + Hash all hostnames and addresses in the known hosts file. + + timeout : int + Set the timeout for connection attempts. If ``timeout`` seconds have + elapsed since a connection was initiated to a host or since the last + time anything was read from that host, then the connection is closed + and the host in question considered unavailable. Default is 5 seconds. + + fingerprint_hash_type + The fingerprint hash type that the public key fingerprints were + originally hashed with. This defaults to ``sha256`` if not specified. + + .. versionadded:: 2016.11.4 + .. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256`` + + CLI Example: + + .. code-block:: bash + + salt '*' ssh.recv_known_host_entries enc= port= + ''' # The following list of OSes have an old version of openssh-clients # and thus require the '-t' option for ssh-keyscan need_dash_t = ('CentOS-5',) @@ -919,9 +1022,9 @@ def recv_known_host(hostname, while not lines and attempts > 0: attempts = attempts - 1 lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines() - known_hosts = list(_parse_openssh_output(lines, + known_host_entries = list(_parse_openssh_output(lines, fingerprint_hash_type=fingerprint_hash_type)) - return known_hosts[0] if known_hosts else None + return known_host_entries if known_host_entries else None def check_known_host(user=None, hostname=None, key=None, fingerprint=None, @@ -952,18 +1055,20 @@ def check_known_host(user=None, hostname=None, key=None, fingerprint=None, else: config = config or '.ssh/known_hosts' - known_host = get_known_host(user, + known_host_entries = get_known_host_entries(user, hostname, config=config, port=port, fingerprint_hash_type=fingerprint_hash_type) + known_keys = [h['key'] for h in known_host_entries] if known_host_entries else [] + known_fingerprints = [h['fingerprint'] for h in known_host_entries] if known_host_entries else [] - if not known_host or 'fingerprint' not in known_host: + if not known_host_entries: return 'add' if key: - return 'exists' if key == known_host['key'] else 'update' + return 'exists' if key in known_keys else 'update' elif fingerprint: - return ('exists' if fingerprint == known_host['fingerprint'] + return ('exists' if fingerprint in known_fingerprints else 'update') else: return 'exists' @@ -1083,70 +1188,99 @@ def set_known_host(user=None, update_required = False check_required = False - stored_host = get_known_host(user, + stored_host_entries = get_known_host_entries(user, hostname, config=config, port=port, fingerprint_hash_type=fingerprint_hash_type) + stored_keys = [h['key'] for h in stored_host_entries] if stored_host_entries else [] + stored_fingerprints = [h['fingerprint'] for h in stored_host_entries] if stored_host_entries else [] - if not stored_host: + if not stored_host_entries: update_required = True - elif fingerprint and fingerprint != stored_host['fingerprint']: + elif fingerprint and fingerprint not in stored_fingerprints: update_required = True - elif key and key != stored_host['key']: + elif key and key not in stored_keys: update_required = True - elif key != stored_host['key']: + elif key is None and fingerprint is None: check_required = True if not update_required and not check_required: - return {'status': 'exists', 'key': stored_host['key']} + return {'status': 'exists', 'keys': stored_keys} if not key: - remote_host = recv_known_host(hostname, + remote_host_entries = recv_known_host_entries(hostname, enc=enc, port=port, hash_known_hosts=hash_known_hosts, timeout=timeout, fingerprint_hash_type=fingerprint_hash_type) - if not remote_host: + known_keys = [h['key'] for h in remote_host_entries] if remote_host_entries else [] + known_fingerprints = [h['fingerprint'] for h in remote_host_entries] if remote_host_entries else [] + if not remote_host_entries: return {'status': 'error', - 'error': 'Unable to receive remote host key'} + 'error': 'Unable to receive remote host keys'} - if fingerprint and fingerprint != remote_host['fingerprint']: + if fingerprint and fingerprint not in known_fingerprints: return {'status': 'error', - 'error': ('Remote host public key found but its fingerprint ' - 'does not match one you have provided')} + 'error': ('Remote host public keys found but none of their' + 'fingerprints match the one you have provided')} if check_required: - if remote_host['key'] == stored_host['key']: - return {'status': 'exists', 'key': stored_host['key']} + for key in known_keys: + if key in stored_keys: + return {'status': 'exists', 'keys': stored_keys} full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): return full - # Get information about the known_hosts file before rm_known_host() - # because it will create a new file with mode 0600 - orig_known_hosts_st = None - try: - orig_known_hosts_st = os.stat(full) - except OSError as exc: - if exc.args[1] == 'No such file or directory': - log.debug('{0} doesnt exist. Nothing to preserve.'.format(full)) + if os.path.isfile(full): + origmode = os.stat(full).st_mode - # remove everything we had in the config so far - rm_known_host(user, hostname, config=config) + # remove existing known_host entry with matching hostname and encryption key type + # use ssh-keygen -F to find the specific line(s) for this host + enc combo + ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port) + cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full] + lines = __salt__['cmd.run'](cmd, + ignore_retcode=True, + python_shell=False).splitlines() + remove_lines = list( + _get_matched_host_line_numbers(lines, enc) + ) + + if remove_lines: + try: + with salt.utils.files.fopen(full, 'r+') as ofile: + known_hosts_lines = list(ofile) + # Delete from last line to first to avoid invalidating earlier indexes + for line_no in sorted(remove_lines, reverse=True): + del known_hosts_lines[line_no - 1] + # Write out changed known_hosts file + ofile.seek(0) + ofile.truncate() + for line in known_hosts_lines: + ofile.write(line) + except (IOError, OSError) as exception: + raise CommandExecutionError( + "Couldn't remove old entry(ies) from known hosts file: '{0}'".format(exception) + ) + else: + origmode = None # set up new value if key: - remote_host = {'hostname': hostname, 'enc': enc, 'key': key} + remote_host_entries = [{'hostname': hostname, 'enc': enc, 'key': key}] - if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in remote_host['hostname']: - line = '{hostname} {enc} {key}\n'.format(**remote_host) - else: - remote_host['port'] = port - line = '[{hostname}]:{port} {enc} {key}\n'.format(**remote_host) + lines = [] + for entry in remote_host_entries: + if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in entry['hostname']: + line = '{hostname} {enc} {key}\n'.format(**entry) + else: + entry['port'] = port + line = '[{hostname}]:{port} {enc} {key}\n'.format(**entry) + lines.append(line) # ensure ~/.ssh exists ssh_dir = os.path.dirname(full) @@ -1172,27 +1306,25 @@ def set_known_host(user=None, # write line to known_hosts file try: with salt.utils.files.fopen(full, 'a') as ofile: - ofile.write(line) + for line in lines: + ofile.write(line) except (IOError, OSError) as exception: raise CommandExecutionError( "Couldn't append to known hosts file: '{0}'".format(exception) ) - if os.geteuid() == 0: - if user: - os.chown(full, uinfo['uid'], uinfo['gid']) - elif orig_known_hosts_st: - os.chown(full, orig_known_hosts_st.st_uid, orig_known_hosts_st.st_gid) - - if orig_known_hosts_st: - os.chmod(full, orig_known_hosts_st.st_mode) + if os.geteuid() == 0 and user: + os.chown(full, uinfo['uid'], uinfo['gid']) + if origmode: + os.chmod(full, origmode) else: os.chmod(full, 0o600) if key and hash_known_hosts: cmd_result = __salt__['ssh.hash_known_hosts'](user=user, config=full) - return {'status': 'updated', 'old': stored_host, 'new': remote_host} + rval = {'status': 'updated', 'old': stored_host_entries, 'new': remote_host_entries} + return rval def user_keys(user=None, pubfile=None, prvfile=None): diff --git a/salt/modules/state.py b/salt/modules/state.py index c054585d71..af7ce0fe38 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -894,8 +894,8 @@ def highstate(test=None, queue=False, **kwargs): finally: st_.pop_active() - if __salt__['config.option']('state_data', '') == 'terse' or \ - kwargs.get('terse'): + if isinstance(ret, dict) and (__salt__['config.option']('state_data', '') == 'terse' or + kwargs.get('terse')): ret = _filter_running(ret) serial = salt.payload.Serial(__opts__) @@ -923,8 +923,9 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs): salt '*' state.apply test pillar='{"foo": "bar"}' .. note:: - Values passed this way will override Pillar values set via - ``pillar_roots`` or an external Pillar source. + Values passed this way will override existing Pillar values set via + ``pillar_roots`` or an external Pillar source. Pillar values that + are not included in the kwarg will not be overwritten. .. versionchanged:: 2016.3.0 GPG-encrypted CLI Pillar data is now supported via the GPG @@ -1379,6 +1380,20 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs): :conf_minion:`pillarenv` minion config option nor this CLI argument is used, all Pillar environments will be merged together. + pillar + Custom Pillar values, passed as a dictionary of key-value pairs + + .. code-block:: bash + + salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}' + + .. note:: + Values passed this way will override existing Pillar values set via + ``pillar_roots`` or an external Pillar source. Pillar values that + are not included in the kwarg will not be overwritten. + + .. versionadded:: Oxygen + CLI Example: .. code-block:: bash @@ -1399,12 +1414,26 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs): if opts['environment'] is None: opts['environment'] = 'base' + pillar_override = kwargs.get('pillar') + pillar_enc = kwargs.get('pillar_enc') + if pillar_enc is None \ + and pillar_override is not None \ + and not isinstance(pillar_override, dict): + raise SaltInvocationError( + 'Pillar data must be formatted as a dictionary, unless pillar_enc ' + 'is specified.' + ) + try: st_ = salt.state.HighState(opts, + pillar_override, + pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, + pillar_override, + pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) if not _check_pillar(kwargs, st_.opts['pillar']): diff --git a/salt/modules/system.py b/salt/modules/system.py index 87673a372e..650bbeba51 100644 --- a/salt/modules/system.py +++ b/salt/modules/system.py @@ -596,7 +596,7 @@ def set_computer_name(hostname): .. code-block:: bash - salt '*' system.set_conputer_name master.saltstack.com + salt '*' system.set_computer_name master.saltstack.com ''' return __salt__['network.mod_hostname'](hostname) diff --git a/salt/modules/vagrant.py b/salt/modules/vagrant.py index 91f6afcf26..a0b0da2b52 100644 --- a/salt/modules/vagrant.py +++ b/salt/modules/vagrant.py @@ -55,7 +55,6 @@ def __virtual__(): ''' run Vagrant commands if possible ''' - # noinspection PyUnresolvedReferences if salt.utils.path.which('vagrant') is None: return False, 'The vagrant module could not be loaded: vagrant command not found' return __virtualname__ @@ -298,6 +297,11 @@ def vm_state(name='', cwd=None): 'provider': _, # the Vagrant VM provider 'name': _} # salt_id name + Known bug: if there are multiple machines in your Vagrantfile, and you request + the status of the ``primary`` machine, which you defined by leaving the ``machine`` + parameter blank, then you may receive the status of all of them. + Please specify the actual machine name for each VM if there are more than one. + ''' if name: @@ -321,7 +325,7 @@ def vm_state(name='', cwd=None): datum = {'machine': tokens[0], 'state': ' '.join(tokens[1:-1]), 'provider': tokens[-1].lstrip('(').rstrip(')'), - 'name': name or get_machine_id(tokens[0], cwd) + 'name': get_machine_id(tokens[0], cwd) } info.append(datum) except IndexError: @@ -365,7 +369,7 @@ def init(name, # Salt_id for created VM # passed-in keyword arguments overwrite vm dictionary values vm_['cwd'] = cwd or vm_.get('cwd') if not vm_['cwd']: - raise SaltInvocationError('Path to Vagrantfile must be defined by \'cwd\' argument') + raise SaltInvocationError('Path to Vagrantfile must be defined by "cwd" argument') vm_['machine'] = machine or vm_.get('machine', machine) vm_['runas'] = runas or vm_.get('runas', runas) vm_['vagrant_provider'] = vagrant_provider or vm_.get('vagrant_provider', '') @@ -423,7 +427,7 @@ def shutdown(name): ''' Send a soft shutdown (vagrant halt) signal to the named vm. - This does the same thing as vagrant.stop. Other VM control + This does the same thing as vagrant.stop. Other-VM control modules use "stop" and "shutdown" to differentiate between hard and soft shutdowns. @@ -476,7 +480,7 @@ def pause(name): return ret == 0 -def reboot(name): +def reboot(name, provision=False): ''' Reboot a VM. (vagrant reload) @@ -484,12 +488,16 @@ def reboot(name): .. code-block:: bash - salt vagrant.reboot + salt vagrant.reboot provision=True + + :param name: The salt_id name you will use to control this VM + :param provision: (False) also re-run the Vagrant provisioning scripts. ''' vm_ = get_vm_info(name) machine = vm_['machine'] + prov = '--provision' if provision else '' - cmd = 'vagrant reload {}'.format(machine) + cmd = 'vagrant reload {} {}'.format(machine, prov) ret = __salt__['cmd.retcode'](cmd, runas=vm_.get('runas'), cwd=vm_.get('cwd')) diff --git a/salt/modules/vault.py b/salt/modules/vault.py index ab5e5ba684..6717e77f22 100644 --- a/salt/modules/vault.py +++ b/salt/modules/vault.py @@ -21,6 +21,7 @@ Functions to interact with Hashicorp Vault. vault: url: https://vault.service.domain:8200 + verify: /etc/ssl/certs/ca-certificates.crt auth: method: token token: 11111111-2222-3333-4444-555555555555 @@ -32,6 +33,12 @@ Functions to interact with Hashicorp Vault. url Url to your Vault installation. Required. + verify + For details please see + http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification + + .. versionadded:: Oxygen + auth Currently only token auth is supported. The token must be able to create tokens with the policies that should be assigned to minions. Required. diff --git a/salt/modules/virt.py b/salt/modules/virt.py index 2d36b5b2d2..65a89a68e2 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -656,7 +656,7 @@ def _nic_profile(profile_name, hypervisor, **kwargs): if key not in attributes or not attributes[key]: attributes[key] = value - def _assign_mac(attributes): + def _assign_mac(attributes, hypervisor): dmac = kwargs.get('dmac', None) if dmac is not None: log.debug('DMAC address is {0}'.format(dmac)) @@ -666,11 +666,15 @@ def _nic_profile(profile_name, hypervisor, **kwargs): msg = 'Malformed MAC address: {0}'.format(dmac) raise CommandExecutionError(msg) else: - attributes['mac'] = salt.utils.network.gen_mac() + if hypervisor in ['qemu', 'kvm']: + attributes['mac'] = salt.utils.network.gen_mac( + prefix='52:54:00') + else: + attributes['mac'] = salt.utils.network.gen_mac() for interface in interfaces: _normalize_net_types(interface) - _assign_mac(interface) + _assign_mac(interface, hypervisor) if hypervisor in overlays: _apply_default_overlay(interface) diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py index 954f153d2b..1293373867 100644 --- a/salt/modules/win_groupadd.py +++ b/salt/modules/win_groupadd.py @@ -36,6 +36,60 @@ def __virtual__(): return (False, "Module win_groupadd: module only works on Windows systems") +def _get_computer_object(): + ''' + A helper function to get the object for the local machine + + Returns: + object: Returns the computer object for the local machine + ''' + pythoncom.CoInitialize() + nt = win32com.client.Dispatch('AdsNameSpaces') + return nt.GetObject('', 'WinNT://.,computer') + + +def _get_group_object(name): + ''' + A helper function to get a specified group object + + Args: + + name (str): The name of the object + + Returns: + object: The specified group object + ''' + pythoncom.CoInitialize() + nt = win32com.client.Dispatch('AdsNameSpaces') + return nt.GetObject('', 'WinNT://./' + name + ',group') + + +def _get_all_groups(): + ''' + A helper function that gets a list of group objects for all groups on the + machine + + Returns: + iter: A list of objects for all groups on the machine + ''' + pythoncom.CoInitialize() + nt = win32com.client.Dispatch('AdsNameSpaces') + results = nt.GetObject('', 'WinNT://.') + results.Filter = ['group'] + return results + + +def _get_username(member): + ''' + Resolve the username from the member object returned from a group query + + Returns: + str: The username converted to domain\\username format + ''' + return member.ADSPath.replace('WinNT://', '').replace( + '/', '\\').encode('ascii', 'backslashreplace') + + def add(name, **kwargs): ''' Add the specified group @@ -60,10 +114,8 @@ def add(name, **kwargs): 'comment': ''} if not info(name): - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') + compObj = _get_computer_object() try: - compObj = nt.GetObject('', 'WinNT://.,computer') newGroup = compObj.Create('group', name) newGroup.SetInfo() ret['changes'].append('Successfully created group {0}'.format(name)) @@ -104,10 +156,8 @@ def delete(name, **kwargs): 'comment': ''} if info(name): - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') + compObj = _get_computer_object() try: - compObj = nt.GetObject('', 'WinNT://.,computer') compObj.Delete('group', name) ret['changes'].append(('Successfully removed group {0}').format(name)) except pywintypes.com_error as com_err: @@ -144,17 +194,10 @@ def info(name): salt '*' group.info foo ''' - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') - try: - groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') + groupObj = _get_group_object(name) gr_name = groupObj.Name - gr_mem = [] - for member in groupObj.members(): - gr_mem.append( - member.ADSPath.replace('WinNT://', '').replace( - '/', '\\').encode('ascii', 'backslashreplace')) + gr_mem = [_get_username(x) for x in groupObj.members()] except pywintypes.com_error: return False @@ -193,20 +236,12 @@ def getent(refresh=False): ret = [] - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') + results = _get_all_groups() - results = nt.GetObject('', 'WinNT://.') - results.Filter = ['group'] for result in results: - member_list = [] - for member in result.members(): - member_list.append( - member.AdsPath.replace('WinNT://', '').replace( - '/', '\\').encode('ascii', 'backslashreplace')) - group = {'gid': __salt__['file.group_to_gid'](result.name), - 'members': member_list, - 'name': result.name, + group = {'gid': __salt__['file.group_to_gid'](result.Name), + 'members': [_get_username(x) for x in result.members()], + 'name': result.Name, 'passwd': 'x'} ret.append(group) __context__['group.getent'] = ret @@ -240,17 +275,21 @@ def adduser(name, username, **kwargs): 'changes': {'Users Added': []}, 'comment': ''} - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') - groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') - existingMembers = [] - for member in groupObj.members(): - existingMembers.append( - member.ADSPath.replace('WinNT://', '').replace( - '/', '\\').encode('ascii', 'backslashreplace').lower()) + try: + groupObj = _get_group_object(name) + except pywintypes.com_error as com_err: + if len(com_err.excepinfo) >= 2: + friendly_error = com_err.excepinfo[2].rstrip('\r\n') + ret['result'] = False + ret['comment'] = 'Failure accessing group {0}. {1}' \ + ''.format(name, friendly_error) + return ret + + existingMembers = [_get_username(x) for x in groupObj.members()] + username = salt.utils.win_functions.get_sam_name(username) try: - if salt.utils.win_functions.get_sam_name(username) not in existingMembers: + if username not in existingMembers: if not __opts__['test']: groupObj.Add('WinNT://' + username.replace('\\', '/')) @@ -299,14 +338,17 @@ def deluser(name, username, **kwargs): 'changes': {'Users Removed': []}, 'comment': ''} - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') - groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') - existingMembers = [] - for member in groupObj.members(): - existingMembers.append( - member.ADSPath.replace('WinNT://', '').replace( - '/', '\\').encode('ascii', 'backslashreplace').lower()) + try: + groupObj = _get_group_object(name) + except pywintypes.com_error as com_err: + if len(com_err.excepinfo) >= 2: + friendly_error = com_err.excepinfo[2].rstrip('\r\n') + ret['result'] = False + ret['comment'] = 'Failure accessing group {0}. {1}' \ + ''.format(name, friendly_error) + return ret + + existingMembers = [_get_username(x) for x in groupObj.members()] try: if salt.utils.win_functions.get_sam_name(username) in existingMembers: @@ -365,10 +407,8 @@ def members(name, members_list, **kwargs): ret['comment'].append('Members is not a list object') return ret - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') try: - groupObj = nt.GetObject('', 'WinNT://./' + name + ',group') + groupObj = _get_group_object(name) except pywintypes.com_error as com_err: if len(com_err.excepinfo) >= 2: friendly_error = com_err.excepinfo[2].rstrip('\r\n') @@ -377,12 +417,7 @@ def members(name, members_list, **kwargs): 'Failure accessing group {0}. {1}' ).format(name, friendly_error)) return ret - existingMembers = [] - for member in groupObj.members(): - existingMembers.append( - member.ADSPath.replace('WinNT://', '').replace( - '/', '\\').encode('ascii', 'backslashreplace').lower()) - + existingMembers = [_get_username(x) for x in groupObj.members()] existingMembers.sort() members_list.sort() @@ -448,18 +483,14 @@ def list_groups(refresh=False): salt '*' group.list_groups ''' if 'group.list_groups' in __context__ and not refresh: - return __context__['group.getent'] + return __context__['group.list_groups'] + + results = _get_all_groups() ret = [] - pythoncom.CoInitialize() - nt = win32com.client.Dispatch('AdsNameSpaces') - - results = nt.GetObject('', 'WinNT://.') - results.Filter = ['group'] - for result in results: - ret.append(result.name) + ret.append(result.Name) __context__['group.list_groups'] = ret diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 9c970d19d3..8c31b10aeb 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -34,15 +34,18 @@ Current known limitations - pywin32 Python module - lxml - uuid - - codecs - struct - salt.modules.reg ''' # Import Python libs from __future__ import absolute_import +import io import os import logging import re +import locale +import ctypes +import time # Import Salt libs import salt.utils.files @@ -89,7 +92,6 @@ try: import win32net import win32security import uuid - import codecs import lxml import struct from lxml import etree @@ -116,6 +118,16 @@ try: ADMX_DISPLAYNAME_SEARCH_XPATH = etree.XPath('//*[local-name() = "policy" and @*[local-name() = "displayName"] = $display_name and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = $registry_class) ]') PRESENTATION_ANCESTOR_XPATH = etree.XPath('ancestor::*[local-name() = "presentation"]') TEXT_ELEMENT_XPATH = etree.XPath('.//*[local-name() = "text"]') + # Get the System Install Language + # https://msdn.microsoft.com/en-us/library/dd318123(VS.85).aspx + # local.windows_locale is a dict + # GetSystemDefaultUILanguage() returns a 4 digit language code that + # corresponds to an entry in the dict + # Not available in win32api, so we have to use ctypes + # Default to `en-US` (1033) + windll = ctypes.windll.kernel32 + INSTALL_LANGUAGE = locale.windows_locale.get( + windll.GetSystemDefaultUILanguage(), 1033).replace('_', '-') except ImportError: HAS_WINDOWS_MODULES = False @@ -2708,7 +2720,8 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions', helper function to process all ADMX files in the specified policy_def_path and build a single XML doc that we can search/use for ADMX policy processing ''' - display_language_fallback = 'en-US' + # Fallback to the System Install Language + display_language_fallback = INSTALL_LANGUAGE t_policy_definitions = lxml.etree.Element('policyDefinitions') t_policy_definitions.append(lxml.etree.Element('categories')) t_policy_definitions.append(lxml.etree.Element('policies')) @@ -2772,22 +2785,44 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions', temp_ns = policy_ns temp_ns = _updateNamespace(temp_ns, this_namespace) policydefs_policyns_xpath(t_policy_definitions)[0].append(temp_ns) - adml_file = os.path.join(root, display_language, os.path.splitext(t_admfile)[0] + '.adml') + + # We need to make sure the adml file exists. First we'll check + # the passed display_language (eg: en-US). Then we'll try the + # abbreviated version (en) to account for alternate locations. + # We'll do the same for the display_language_fallback (en_US). + adml_file = os.path.join(root, display_language, + os.path.splitext(t_admfile)[0] + '.adml') if not __salt__['file.file_exists'](adml_file): msg = ('An ADML file in the specified ADML language "{0}" ' - 'does not exist for the ADMX "{1}", the fallback ' - 'language will be tried.') + 'does not exist for the ADMX "{1}", the abbreviated ' + 'language code will be tried.') log.info(msg.format(display_language, t_admfile)) - adml_file = os.path.join(root, - display_language_fallback, - os.path.splitext(t_admfile)[0] + '.adml') + + adml_file = os.path.join(root, display_language.split('-')[0], + os.path.splitext(t_admfile)[0] + '.adml') if not __salt__['file.file_exists'](adml_file): - msg = ('An ADML file in the specified ADML language ' - '"{0}" and the fallback language "{1}" do not ' - 'exist for the ADMX "{2}".') - raise SaltInvocationError(msg.format(display_language, - display_language_fallback, - t_admfile)) + msg = ('An ADML file in the specified ADML language code "{0}" ' + 'does not exist for the ADMX "{1}", the fallback ' + 'language will be tried.') + log.info(msg.format(display_language[:2], t_admfile)) + + adml_file = os.path.join(root, display_language_fallback, + os.path.splitext(t_admfile)[0] + '.adml') + if not __salt__['file.file_exists'](adml_file): + msg = ('An ADML file in the specified ADML fallback language "{0}" ' + 'does not exist for the ADMX "{1}", the abbreviated' + 'fallback language code will be tried.') + log.info(msg.format(display_language_fallback, t_admfile)) + + adml_file = os.path.join(root, display_language_fallback.split('-')[0], + os.path.splitext(t_admfile)[0] + '.adml') + if not __salt__['file.file_exists'](adml_file): + msg = ('An ADML file in the specified ADML language ' + '"{0}" and the fallback language "{1}" do not ' + 'exist for the ADMX "{2}".') + raise SaltInvocationError(msg.format(display_language, + display_language_fallback, + t_admfile)) try: xmltree = lxml.etree.parse(adml_file) except lxml.etree.XMLSyntaxError: @@ -2795,8 +2830,8 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions', try: xmltree = _remove_unicode_encoding(adml_file) except Exception: - msg = ('An error was found while processing adml file {0}, all policy' - ' languange data from this file will be unavailable via this module') + msg = ('An error was found while processing adml file {0}, all policy ' + 'language data from this file will be unavailable via this module') log.error(msg.format(adml_file)) continue if None in namespaces: @@ -2827,15 +2862,23 @@ def _findOptionValueInSeceditFile(option): ''' try: _d = uuid.uuid4().hex - _tfile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), + _tfile = '{0}\\{1}'.format(__opts__['cachedir'], 'salt-secedit-dump-{0}.txt'.format(_d)) _ret = __salt__['cmd.run']('secedit /export /cfg {0}'.format(_tfile)) if _ret: - _reader = codecs.open(_tfile, 'r', encoding='utf-16') - _secdata = _reader.readlines() - _reader.close() + with io.open(_tfile, encoding='utf-16') as _reader: + _secdata = _reader.readlines() if __salt__['file.file_exists'](_tfile): - _ret = __salt__['file.remove'](_tfile) + for _ in range(5): + try: + __salt__['file.remove'](_tfile) + except CommandExecutionError: + time.sleep(.1) + continue + else: + break + else: + log.error('error occurred removing {0}'.format(_tfile)) for _line in _secdata: if _line.startswith(option): return True, _line.split('=')[1].strip() @@ -2851,9 +2894,9 @@ def _importSeceditConfig(infdata): ''' try: _d = uuid.uuid4().hex - _tSdbfile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), + _tSdbfile = '{0}\\{1}'.format(__opts__['cachedir'], 'salt-secedit-import-{0}.sdb'.format(_d)) - _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), + _tInfFile = '{0}\\{1}'.format(__opts__['cachedir'], 'salt-secedit-config-{0}.inf'.format(_d)) # make sure our temp files don't already exist if __salt__['file.file_exists'](_tSdbfile): diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 2abe1ee5f3..d7d28276e6 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -1531,24 +1531,26 @@ def install(name=None, to_install.append((pkgname, pkgstr)) break else: - if re.match('kernel(-.+)?', name): - # kernel and its subpackages support multiple - # installs as their paths do not conflict. - # Performing a yum/dnf downgrade will be a no-op - # so just do an install instead. It will fail if - # there are other interdependencies that have - # conflicts, and that's OK. We don't want to force - # anything, we just want to properly handle it if - # someone tries to install a kernel/kernel-devel of - # a lower version than the currently-installed one. - # TODO: find a better way to determine if a package - # supports multiple installs. - to_install.append((pkgname, pkgstr)) - else: - # None of the currently-installed versions are - # greater than the specified version, so this is a - # downgrade. - to_downgrade.append((pkgname, pkgstr)) + if pkgname is not None: + if re.match('kernel(-.+)?', pkgname): + # kernel and its subpackages support multiple + # installs as their paths do not conflict. + # Performing a yum/dnf downgrade will be a + # no-op so just do an install instead. It will + # fail if there are other interdependencies + # that have conflicts, and that's OK. We don't + # want to force anything, we just want to + # properly handle it if someone tries to + # install a kernel/kernel-devel of a lower + # version than the currently-installed one. + # TODO: find a better way to determine if a + # package supports multiple installs. + to_install.append((pkgname, pkgstr)) + else: + # None of the currently-installed versions are + # greater than the specified version, so this + # is a downgrade. + to_downgrade.append((pkgname, pkgstr)) def _add_common_args(cmd): ''' diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index 7b93b32de3..4021632d79 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -909,10 +909,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W f_call = self._format_call_run_job_async(chunk) # fire a job off - try: - pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) - except EauthAuthenticationError: - raise tornado.gen.Return('Not authorized to run this job') + pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 0848d76309..37cb1bd98b 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -891,11 +891,11 @@ class Pillar(object): # Avoid circular import import salt.utils.gitfs import salt.pillar.git_pillar - git_pillar = salt.utils.gitfs.GitPillar(self.opts) - git_pillar.init_remotes( + git_pillar = salt.utils.gitfs.GitPillar( + self.opts, self.ext['git'], - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, - salt.pillar.git_pillar.PER_REMOTE_ONLY) + per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY) git_pillar.fetch_remotes() except TypeError: # Handle malformed ext_pillar diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index fec485263d..732183a089 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -348,12 +348,6 @@ from salt.ext import six PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs') PER_REMOTE_ONLY = ('name', 'mountpoint') -# Fall back to default per-remote-only. This isn't technically needed since -# salt.utils.gitfs.GitBase.init_remotes() will default to -# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for -# runners and other modules that import salt.pillar.git_pillar. -PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY - # Set up logging log = logging.getLogger(__name__) @@ -371,7 +365,7 @@ def __virtual__(): return False try: - salt.utils.gitfs.GitPillar(__opts__) + salt.utils.gitfs.GitPillar(__opts__, init_remotes=False) # Initialization of the GitPillar object did not fail, so we # know we have valid configuration syntax and that a valid # provider was detected. @@ -387,8 +381,11 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument opts = copy.deepcopy(__opts__) opts['pillar_roots'] = {} opts['__git_pillar'] = True - git_pillar = salt.utils.gitfs.GitPillar(opts) - git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + git_pillar = salt.utils.gitfs.GitPillar( + opts, + repos, + per_remote_overrides=PER_REMOTE_OVERRIDES, + per_remote_only=PER_REMOTE_ONLY) if __opts__.get('__role') == 'minion': # If masterless, fetch the remotes. We'll need to remove this once # we make the minion daemon able to run standalone. diff --git a/salt/proxy/panos.py b/salt/proxy/panos.py index f7fb8f574c..86ef275a4f 100644 --- a/salt/proxy/panos.py +++ b/salt/proxy/panos.py @@ -191,7 +191,9 @@ from __future__ import absolute_import import logging # Import Salt Libs +from salt._compat import ElementTree as ET import salt.exceptions +import salt.utils.xmlutil as xml # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['panos'] @@ -214,6 +216,22 @@ def __virtual__(): return __virtualname__ +def _strip_dirty(xmltree): + ''' + Removes dirtyID tags from the candidate config result. Palo Alto devices will make the candidate configuration with + a dirty ID after a change. This can cause unexpected results when parsing. + ''' + dirty = xmltree.attrib.pop('dirtyId', None) + if dirty: + xmltree.attrib.pop('admin', None) + xmltree.attrib.pop('time', None) + + for child in xmltree: + child = _strip_dirty(child) + + return xmltree + + def init(opts): ''' This function gets called when the proxy starts up. For @@ -271,7 +289,7 @@ def call(payload=None): ''' This function captures the query string and sends it to the Palo Alto device. ''' - ret = {} + r = None try: if DETAILS['method'] == 'dev_key': # Pass the api key without the target declaration @@ -280,11 +298,10 @@ def call(payload=None): r = __utils__['http.query'](DETAILS['url'], data=payload, method='POST', - decode_type='xml', + decode_type='plain', decode=True, verify_ssl=False, raise_error=True) - ret = r['dict'][0] elif DETAILS['method'] == 'dev_pass': # Pass credentials without the target declaration r = __utils__['http.query'](DETAILS['url'], @@ -292,11 +309,10 @@ def call(payload=None): password=DETAILS['password'], data=payload, method='POST', - decode_type='xml', + decode_type='plain', decode=True, verify_ssl=False, raise_error=True) - ret = r['dict'][0] elif DETAILS['method'] == 'pan_key': # Pass the api key with the target declaration conditional_payload = {'key': DETAILS['apikey'], @@ -305,11 +321,10 @@ def call(payload=None): r = __utils__['http.query'](DETAILS['url'], data=payload, method='POST', - decode_type='xml', + decode_type='plain', decode=True, verify_ssl=False, raise_error=True) - ret = r['dict'][0] elif DETAILS['method'] == 'pan_pass': # Pass credentials with the target declaration conditional_payload = {'target': DETAILS['serial']} @@ -319,14 +334,23 @@ def call(payload=None): password=DETAILS['password'], data=payload, method='POST', - decode_type='xml', + decode_type='plain', decode=True, verify_ssl=False, raise_error=True) - ret = r['dict'][0] except KeyError as err: raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.") - return ret + + if not r: + raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.") + + xmldata = ET.fromstring(r['text']) + + # If we are pulling the candidate configuration, we need to strip the dirtyId + if payload['type'] == 'config' and payload['action'] == 'get': + xmldata = (_strip_dirty(xmldata)) + + return xml.to_dict(xmldata, True) def is_required_version(required_version='0.0.0'): @@ -382,7 +406,7 @@ def grains(): DETAILS['grains_cache'] = GRAINS_CACHE try: query = {'type': 'op', 'cmd': ''} - DETAILS['grains_cache'] = call(query)['system'] + DETAILS['grains_cache'] = call(query)['result']['system'] except Exception as err: pass return DETAILS['grains_cache'] @@ -402,7 +426,7 @@ def ping(): ''' try: query = {'type': 'op', 'cmd': ''} - if 'system' in call(query): + if 'result' in call(query): return True else: return False diff --git a/salt/returners/local.py b/salt/returners/local.py index da970b1e69..d1c9236a2b 100644 --- a/salt/returners/local.py +++ b/salt/returners/local.py @@ -3,7 +3,9 @@ The local returner is used to test the returner interface, it just prints the return data to the console to verify that it is being passed properly - To use the local returner, append '--return local' to the salt command. ex: +To use the local returner, append '--return local' to the salt command. ex: + +.. code-block:: bash salt '*' test.ping --return local ''' diff --git a/salt/runners/cache.py b/salt/runners/cache.py index 459ff325ea..cbd7475853 100644 --- a/salt/runners/cache.py +++ b/salt/runners/cache.py @@ -328,11 +328,14 @@ def clear_git_lock(role, remote=None, **kwargs): salt.utils.args.invalid_kwargs(kwargs) if role == 'gitfs': - git_objects = [salt.utils.gitfs.GitFS(__opts__)] - git_objects[0].init_remotes( - __opts__['gitfs_remotes'], - salt.fileserver.gitfs.PER_REMOTE_OVERRIDES, - salt.fileserver.gitfs.PER_REMOTE_ONLY) + git_objects = [ + salt.utils.gitfs.GitFS( + __opts__, + __opts__['gitfs_remotes'], + per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES, + per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY + ) + ] elif role == 'git_pillar': git_objects = [] for ext_pillar in __opts__['ext_pillar']: @@ -340,11 +343,11 @@ def clear_git_lock(role, remote=None, **kwargs): if key == 'git': if not isinstance(ext_pillar['git'], list): continue - obj = salt.utils.gitfs.GitPillar(__opts__) - obj.init_remotes( + obj = salt.utils.gitfs.GitPillar( + __opts__, ext_pillar['git'], - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, - salt.pillar.git_pillar.PER_REMOTE_ONLY) + per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY) git_objects.append(obj) elif role == 'winrepo': winrepo_dir = __opts__['winrepo_dir'] @@ -355,11 +358,12 @@ def clear_git_lock(role, remote=None, **kwargs): (winrepo_remotes, winrepo_dir), (__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng']) ): - obj = salt.utils.gitfs.WinRepo(__opts__, base_dir) - obj.init_remotes( + obj = salt.utils.gitfs.WinRepo( + __opts__, remotes, - salt.runners.winrepo.PER_REMOTE_OVERRIDES, - salt.runners.winrepo.PER_REMOTE_ONLY) + per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES, + per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY, + cache_root=base_dir) git_objects.append(obj) else: raise SaltInvocationError('Invalid role \'{0}\''.format(role)) diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py index 6826268076..984c7da8cc 100644 --- a/salt/runners/git_pillar.py +++ b/salt/runners/git_pillar.py @@ -66,10 +66,11 @@ def update(branch=None, repo=None): if pillar_type != 'git': continue pillar_conf = ext_pillar[pillar_type] - pillar = salt.utils.gitfs.GitPillar(__opts__) - pillar.init_remotes(pillar_conf, - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, - salt.pillar.git_pillar.PER_REMOTE_ONLY) + pillar = salt.utils.gitfs.GitPillar( + __opts__, + pillar_conf, + per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY) for remote in pillar.remotes: # Skip this remote if it doesn't match the search criteria if branch is not None: diff --git a/salt/runners/vault.py b/salt/runners/vault.py index 0c8d69345a..d494e8e827 100644 --- a/salt/runners/vault.py +++ b/salt/runners/vault.py @@ -56,14 +56,20 @@ def generate_token(minion_id, signature, impersonated_by_master=False): 'metadata': audit_data } + verify = config.get('verify', None) + log.trace('Sending token creation request to Vault') - response = requests.post(url, headers=headers, json=payload) + response = requests.post(url, headers=headers, json=payload, verify=verify) if response.status_code != 200: return {'error': response.reason} authData = response.json()['auth'] - return {'token': authData['client_token'], 'url': config['url']} + return { + 'token': authData['client_token'], + 'url': config['url'], + 'verify': verify, + } except Exception as e: return {'error': str(e)} diff --git a/salt/runners/winrepo.py b/salt/runners/winrepo.py index 1e73974c4e..4aa20d2b35 100644 --- a/salt/runners/winrepo.py +++ b/salt/runners/winrepo.py @@ -32,7 +32,7 @@ log = logging.getLogger(__name__) PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs') # Fall back to default per-remote-only. This isn't technically needed since -# salt.utils.gitfs.GitBase.init_remotes() will default to +# salt.utils.gitfs.GitBase.__init__ will default to # salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for # runners and other modules that import salt.runners.winrepo. PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY @@ -216,9 +216,12 @@ def update_git_repos(opts=None, clean=False, masterless=False): else: # New winrepo code utilizing salt.utils.gitfs try: - winrepo = salt.utils.gitfs.WinRepo(opts, base_dir) - winrepo.init_remotes( - remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + winrepo = salt.utils.gitfs.WinRepo( + opts, + remotes, + per_remote_overrides=PER_REMOTE_OVERRIDES, + per_remote_only=PER_REMOTE_ONLY, + cache_root=base_dir) winrepo.fetch_remotes() # Since we're not running update(), we need to manually call # clear_old_remotes() to remove directories from remotes that diff --git a/salt/states/blockdev.py b/salt/states/blockdev.py index 6866c0c26b..3e00fb509b 100644 --- a/salt/states/blockdev.py +++ b/salt/states/blockdev.py @@ -159,7 +159,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs): ret['result'] = None return ret - __salt__['disk.format_'](name, fs_type, force=force, **kwargs) + __salt__['disk.format'](name, fs_type, force=force, **kwargs) # Repeat fstype check up to 10 times with 3s sleeping between each # to avoid detection failing although mkfs has succeeded diff --git a/salt/states/boto_secgroup.py b/salt/states/boto_secgroup.py index aa2acdf52b..8ab4abc390 100644 --- a/salt/states/boto_secgroup.py +++ b/salt/states/boto_secgroup.py @@ -126,6 +126,8 @@ def present( vpc_name=None, rules=None, rules_egress=None, + delete_ingress_rules=True, + delete_egress_rules=True, region=None, key=None, keyid=None, @@ -160,6 +162,16 @@ def present( the egress rules will be unmanaged. If set to an empty list, ``[]``, then all egress rules will be removed. + delete_ingress_rules + Some tools (EMR comes to mind) insist on adding rules on-the-fly, which + salt will happily remove on the next run. Set this param to False to + avoid deleting rules which were added outside of salt. + + delete_egress_rules + Some tools (EMR comes to mind) insist on adding rules on-the-fly, which + salt will happily remove on the next run. Set this param to False to + avoid deleting rules which were added outside of salt. + region Region to connect to. @@ -191,17 +203,18 @@ def present( elif ret['result'] is None: return ret if rules is not None: - _ret = _rules_present(name, rules, vpc_id=vpc_id, vpc_name=vpc_name, - region=region, key=key, keyid=keyid, - profile=profile) + _ret = _rules_present(name, rules, delete_ingress_rules, vpc_id=vpc_id, + vpc_name=vpc_name, region=region, key=key, + keyid=keyid, profile=profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if rules_egress is not None: - _ret = _rules_egress_present(name, rules_egress, vpc_id=vpc_id, - vpc_name=vpc_name, region=region, key=key, - keyid=keyid, profile=profile) + _ret = _rules_egress_present(name, rules_egress, delete_egress_rules, + vpc_id=vpc_id, vpc_name=vpc_name, + region=region, key=key, keyid=keyid, + profile=profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: @@ -389,13 +402,14 @@ def _get_rule_changes(rules, _rules): return (to_delete, to_create) -def _rules_present(name, rules, vpc_id=None, vpc_name=None, - region=None, key=None, keyid=None, profile=None): +def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None, + vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' given a group name or group name and vpc_id (or vpc name): 1. get lists of desired rule changes (using _get_rule_changes) - 2. delete/revoke or authorize/create rules - 3. return 'old' and 'new' group rules + 2. authorize/create rules missing rules + 3. if delete_ingress_rules is True, delete/revoke non-requested rules + 4. return 'old' and 'new' group rules ''' ret = {'result': True, 'comment': '', 'changes': {}} sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, @@ -424,11 +438,13 @@ def _rules_present(name, rules, vpc_id=None, vpc_name=None, # rules = rules that exist in salt state # sg['rules'] = that exist in present group to_delete, to_create = _get_rule_changes(rules, sg['rules']) + to_delete = to_delete if delete_ingress_rules else [] if to_create or to_delete: if __opts__['test']: msg = """Security group {0} set to have rules modified. To be created: {1} - To be deleted: {2}""".format(name, pprint.pformat(to_create), pprint.pformat(to_delete)) + To be deleted: {2}""".format(name, pprint.pformat(to_create), + pprint.pformat(to_delete)) ret['comment'] = msg ret['result'] = None return ret @@ -470,13 +486,14 @@ def _rules_present(name, rules, vpc_id=None, vpc_name=None, return ret -def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None, - region=None, key=None, keyid=None, profile=None): +def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=None, + vpc_name=None, region=None, key=None, keyid=None, profile=None): ''' given a group name or group name and vpc_id (or vpc name): 1. get lists of desired rule changes (using _get_rule_changes) - 2. delete/revoke or authorize/create rules - 3. return 'old' and 'new' group rules + 2. authorize/create missing rules + 3. if delete_egress_rules is True, delete/revoke non-requested rules + 4. return 'old' and 'new' group rules ''' ret = {'result': True, 'comment': '', 'changes': {}} sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key, @@ -504,20 +521,20 @@ def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None, rule['source_group_group_id'] = _group_id # rules_egress = rules that exist in salt state # sg['rules_egress'] = that exist in present group - to_delete_egress, to_create_egress = _get_rule_changes( - rules_egress, sg['rules_egress'] - ) - if to_create_egress or to_delete_egress: + to_delete, to_create = _get_rule_changes(rules_egress, sg['rules_egress']) + to_delete = to_delete if delete_egress_rules else [] + if to_create or to_delete: if __opts__['test']: msg = """Security group {0} set to have rules modified. To be created: {1} - To be deleted: {2}""".format(name, pprint.pformat(to_create_egress), pprint.pformat(to_delete_egress)) + To be deleted: {2}""".format(name, pprint.pformat(to_create), + pprint.pformat(to_delete)) ret['comment'] = msg ret['result'] = None return ret - if to_delete_egress: + if to_delete: deleted = True - for rule in to_delete_egress: + for rule in to_delete: _deleted = __salt__['boto_secgroup.revoke']( name, vpc_id=vpc_id, vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile, egress=True, **rule) @@ -530,9 +547,9 @@ def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None, msg = 'Failed to remove egress rule on {0} security group.' ret['comment'] = ' '.join([ret['comment'], msg.format(name)]) ret['result'] = False - if to_create_egress: + if to_create: created = True - for rule in to_create_egress: + for rule in to_create: _created = __salt__['boto_secgroup.authorize']( name, vpc_id=vpc_id, vpc_name=vpc_name, region=region, key=key, keyid=keyid, profile=profile, egress=True, **rule) diff --git a/salt/states/cmd.py b/salt/states/cmd.py index 3d5a13b959..f2f2c60a82 100644 --- a/salt/states/cmd.py +++ b/salt/states/cmd.py @@ -638,6 +638,7 @@ def run(name, runas=None, shell=None, env=None, + prepend_path=None, stateful=False, umask=None, output_loglevel='debug', @@ -712,6 +713,12 @@ def run(name, - env: - PATH: {{ [current_path, '/my/special/bin']|join(':') }} + prepend_path + $PATH segment to prepend (trailing ':' not necessary) to $PATH. This is + an easier alternative to the Jinja workaround. + + .. versionadded:: Oxygen + stateful The command being executed is expected to return data about executing a state. For more information, see the :ref:`stateful-argument` section. @@ -807,6 +814,7 @@ def run(name, 'use_vt': use_vt, 'shell': shell or __grains__['shell'], 'env': env, + 'prepend_path': prepend_path, 'umask': umask, 'output_loglevel': output_loglevel, 'quiet': quiet}) diff --git a/salt/states/docker_container.py b/salt/states/docker_container.py index ca7f2492cc..cbcdb2aeb6 100644 --- a/salt/states/docker_container.py +++ b/salt/states/docker_container.py @@ -1853,7 +1853,7 @@ def stopped(name=None, .. code-block:: yaml stopped_containers: - docker.stopped: + docker_container.stopped: - names: - foo - bar @@ -1862,7 +1862,7 @@ def stopped(name=None, .. code-block:: yaml stopped_containers: - docker.stopped: + docker_container.stopped: - containers: - foo - bar @@ -1998,10 +1998,10 @@ def absent(name, force=False): .. code-block:: yaml mycontainer: - docker.absent + docker_container.absent multiple_containers: - docker.absent: + docker_container.absent: - names: - foo - bar diff --git a/salt/states/grafana4_dashboard.py b/salt/states/grafana4_dashboard.py index 984b96273e..548e3e17af 100644 --- a/salt/states/grafana4_dashboard.py +++ b/salt/states/grafana4_dashboard.py @@ -108,7 +108,8 @@ def present(name, # Build out all dashboard fields new_dashboard = _inherited_dashboard( dashboard, base_dashboards_from_pillar, ret) - new_dashboard['title'] = name + if 'title' not in new_dashboard: + new_dashboard['title'] = name rows = new_dashboard.get('rows', []) for i, row in enumerate(rows): rows[i] = _inherited_row(row, base_rows_from_pillar, ret) diff --git a/salt/states/grafana4_datasource.py b/salt/states/grafana4_datasource.py index 7ae3ef3e95..d4b698daf9 100644 --- a/salt/states/grafana4_datasource.py +++ b/salt/states/grafana4_datasource.py @@ -151,6 +151,12 @@ def present(name, ret['changes'] = data return ret + # At this stage, the datasource exists; however, the object provided by + # Grafana may lack some null keys compared to our "data" dict: + for key in data: + if key not in datasource: + datasource[key] = None + if data == datasource: ret['changes'] = None ret['comment'] = 'Data source {0} already up-to-date'.format(name) diff --git a/salt/states/group.py b/salt/states/group.py index 8153e2da7f..602e43778b 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -65,11 +65,11 @@ def _changes(name, if lgrp['members']: lgrp['members'] = [user.lower() for user in lgrp['members']] if members: - members = [salt.utils.win_functions.get_sam_name(user) for user in members] + members = [salt.utils.win_functions.get_sam_name(user).lower() for user in members] if addusers: - addusers = [salt.utils.win_functions.get_sam_name(user) for user in addusers] + addusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in addusers] if delusers: - delusers = [salt.utils.win_functions.get_sam_name(user) for user in delusers] + delusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in delusers] change = {} if gid: @@ -244,9 +244,7 @@ def present(name, return ret # Group is not present, make it. - if __salt__['group.add'](name, - gid, - system=system): + if __salt__['group.add'](name, gid=gid, system=system): # if members to be added grp_members = None if members: @@ -269,7 +267,7 @@ def present(name, ret['result'] = False ret['comment'] = ( 'Group {0} has been created but, some changes could not' - ' be applied') + ' be applied'.format(name)) ret['changes'] = {'Failed': changes} else: ret['result'] = False diff --git a/salt/states/host.py b/salt/states/host.py index 8345168cd5..a07cf18ffb 100644 --- a/salt/states/host.py +++ b/salt/states/host.py @@ -131,7 +131,7 @@ def absent(name, ip): # pylint: disable=C0103 comments.append('Host {0} ({1}) already absent'.format(name, _ip)) else: if __opts__['test']: - comments.append('Host {0} ({1} needs to be removed'.format(name, _ip)) + comments.append('Host {0} ({1}) needs to be removed'.format(name, _ip)) else: if __salt__['hosts.rm_host'](_ip, name): ret['changes'] = {'host': name} diff --git a/salt/states/opsgenie.py b/salt/states/opsgenie.py new file mode 100644 index 0000000000..cf129b1335 --- /dev/null +++ b/salt/states/opsgenie.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +''' +Create/Close an alert in OpsGenie +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: Oxygen + +This state is useful for creating or closing alerts in OpsGenie +during state runs. + +.. code-block:: yaml + + used_space: + disk.status: + - name: / + - maximum: 79% + - minimum: 20% + + opsgenie_create_action_sender: + opsgenie.create_alert: + - api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + - reason: 'Disk capacity is out of designated range.' + - name: disk.status + - onfail: + - disk: used_space + + opsgenie_close_action_sender: + opsgenie.close_alert: + - api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + - name: disk.status + - require: + - disk: used_space + +''' +# Import Python libs +from __future__ import absolute_import +import logging +import inspect + +# Import Salt libs +import salt.exceptions + +log = logging.getLogger(__name__) + + +def create_alert(name=None, api_key=None, reason=None, action_type="Create"): + ''' + Create an alert in OpsGenie. Example usage with Salt's requisites and other + global state arguments could be found above. + + Required Parameters: + + api_key + It's the API Key you've copied while adding integration in OpsGenie. + + reason + It will be used as alert's default message in OpsGenie. + + Optional Parameters: + + name + It will be used as alert's alias. If you want to use the close + functionality you must provide name field for both states like + in above case. + + action_type + OpsGenie supports the default values Create/Close for action_type. + You can customize this field with OpsGenie's custom actions for + other purposes like adding notes or acknowledging alerts. + ''' + + _, _, _, values = inspect.getargvalues(inspect.currentframe()) + log.info("Arguments values:" + str(values)) + + ret = { + 'result': '', + 'name': '', + 'changes': '', + 'comment': '' + } + + if api_key is None or reason is None: + raise salt.exceptions.SaltInvocationError( + 'API Key or Reason cannot be None.') + + if __opts__['test'] is True: + ret[ + 'comment'] = 'Test: {0} alert request will be processed ' \ + 'using the API Key="{1}".'.format( + action_type, + api_key) + + # Return ``None`` when running with ``test=true``. + ret['result'] = None + + return ret + + response_status_code, response_text = __salt__['opsgenie.post_data']( + api_key=api_key, + name=name, + reason=reason, + action_type=action_type + ) + + if 200 <= response_status_code < 300: + log.info( + "POST Request has succeeded with message:" + + response_text + " status code:" + str( + response_status_code)) + ret[ + 'comment'] = 'Test: {0} alert request will be processed' \ + ' using the API Key="{1}".'.format( + action_type, + api_key) + ret['result'] = True + else: + log.error( + "POST Request has failed with error:" + + response_text + " status code:" + str( + response_status_code)) + ret['result'] = False + + return ret + + +def close_alert(name=None, api_key=None, reason="Conditions are met.", + action_type="Close"): + ''' + Close an alert in OpsGenie. It's a wrapper function for create_alert. + Example usage with Salt's requisites and other global state arguments + could be found above. + + Required Parameters: + + name + It will be used as alert's alias. If you want to use the close + functionality you must provide name field for both states like + in above case. + + Optional Parameters: + + api_key + It's the API Key you've copied while adding integration in OpsGenie. + + reason + It will be used as alert's default message in OpsGenie. + + action_type + OpsGenie supports the default values Create/Close for action_type. + You can customize this field with OpsGenie's custom actions for + other purposes like adding notes or acknowledging alerts. + ''' + if name is None: + raise salt.exceptions.SaltInvocationError( + 'Name cannot be None.') + + return create_alert(name, api_key, reason, action_type) diff --git a/salt/states/panos.py b/salt/states/panos.py index f941ff157a..c10cb5fbc9 100644 --- a/salt/states/panos.py +++ b/salt/states/panos.py @@ -87,6 +87,10 @@ greater than the passed version. For example, proxy['panos.is_required_version'] from __future__ import absolute_import import logging +# Import salt libs +import salt.utils.xmlutil as xml +from salt._compat import ElementTree as ET + log = logging.getLogger(__name__) @@ -135,7 +139,7 @@ def _edit_config(xpath, element): query = {'type': 'config', 'action': 'edit', 'xpath': xpath, - 'element': element} + 'element': element} response = __proxy__['panos.call'](query) @@ -239,21 +243,26 @@ def _validate_response(response): ''' if not response: - return False, "Error during move configuration. Verify connectivity to device." + return False, 'Unable to validate response from device.' elif 'msg' in response: - if response['msg'] == 'command succeeded': - return True, response['msg'] + if 'line' in response['msg']: + if response['msg']['line'] == 'already at the top': + return True, response + elif response['msg']['line'] == 'already at the bottom': + return True, response + else: + return False, response + elif response['msg'] == 'command succeeded': + return True, response else: - return False, response['msg'] - elif 'line' in response: - if response['line'] == 'already at the top': - return True, response['line'] - elif response['line'] == 'already at the bottom': - return True, response['line'] + return False, response + elif 'status' in response: + if response['status'] == "success": + return True, response else: - return False, response['line'] + return False, response else: - return False, "Error during move configuration. Verify connectivity to device." + return False, response def add_config_lock(name): @@ -280,6 +289,247 @@ def add_config_lock(name): return ret +def address_exists(name, + addressname=None, + vsys=1, + ipnetmask=None, + iprange=None, + fqdn=None, + description=None, + commit=False): + ''' + Ensures that an address object exists in the configured state. If it does not exist or is not configured with the + specified attributes, it will be adjusted to match the specified values. + + This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified + value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address + type. + + name: The name of the module function to execute. + + addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters, + which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on + Panorama, unique within its device group and any ancestor or descendant device groups. + + vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. + + ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where + the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6, + you specify only the network portion, not the host portion. + + iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both + can be IPv6. + + fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are + subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the + entries are picked up at the refresh cycle. + + description(str): A description for the policy (up to 255 characters). + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/address/h-10.10.10.10: + panos.address_exists: + - addressname: h-10.10.10.10 + - vsys: 1 + - ipnetmask: 10.10.10.10 + - commit: False + + panos/address/10.0.0.1-10.0.0.50: + panos.address_exists: + - addressname: r-10.0.0.1-10.0.0.50 + - vsys: 1 + - iprange: 10.0.0.1-10.0.0.50 + - commit: False + + panos/address/foo.bar.com: + panos.address_exists: + - addressname: foo.bar.com + - vsys: 1 + - fqdn: foo.bar.com + - description: My fqdn object + - commit: False + + ''' + ret = _default_ret(name) + + if not addressname: + ret.update({'comment': "The service name field must be provided."}) + return ret + + # Check if address object currently exists + address = __salt__['panos.get_address'](addressname, vsys)['result'] + + if address and 'entry' in address: + address = address['entry'] + else: + address = {} + + element = "" + + # Verify the arguments + if ipnetmask: + element = "{0}".format(ipnetmask) + elif iprange: + element = "{0}".format(iprange) + elif fqdn: + element = "{0}".format(fqdn) + else: + ret.update({'comment': "A valid address type must be specified."}) + return ret + + if description: + element += "{0}".format(description) + + full_element = "{1}".format(addressname, element) + + new_address = xml.to_dict(ET.fromstring(full_element), True) + + if address == new_address: + ret.update({ + 'comment': 'Address object already exists. No changes required.', + 'result': True + }) + return ret + else: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/address/" \ + "entry[@name=\'{1}\']".format(vsys, addressname) + + result, msg = _edit_config(xpath, full_element) + + if not result: + ret.update({ + 'comment': msg + }) + return ret + + if commit is True: + ret.update({ + 'changes': {'before': address, 'after': new_address}, + 'commit': __salt__['panos.commit'](), + 'comment': 'Address object successfully configured.', + 'result': True + }) + else: + ret.update({ + 'changes': {'before': address, 'after': new_address}, + 'comment': 'Service object successfully configured.', + 'result': True + }) + + return ret + + +def address_group_exists(name, + groupname=None, + vsys=1, + members=None, + description=None, + commit=False): + ''' + Ensures that an address group object exists in the configured state. If it does not exist or is not configured with + the specified attributes, it will be adjusted to match the specified values. + + This module will enforce group membership. If a group exists and contains members this state does not include, + those members will be removed and replaced with the specified members in the state. + + name: The name of the module function to execute. + + groupname(str): The name of the address group object. The name is case-sensitive and can have up to 31 characters, + which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on + Panorama, unique within its device group and any ancestor or descendant device groups. + + vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. + + members(str, list): The members of the address group. These must be valid address objects or address groups on the + system that already exist prior to the execution of this state. + + description(str): A description for the policy (up to 255 characters). + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/address-group/my-group: + panos.address_group_exists: + - groupname: my-group + - vsys: 1 + - members: + - my-address-object + - my-other-address-group + - description: A group that needs to exist + - commit: False + + ''' + ret = _default_ret(name) + + if not groupname: + ret.update({'comment': "The group name field must be provided."}) + return ret + + # Check if address group object currently exists + group = __salt__['panos.get_address_group'](groupname, vsys)['result'] + + if group and 'entry' in group: + group = group['entry'] + else: + group = {} + + # Verify the arguments + if members: + element = "{0}".format(_build_members(members, True)) + else: + ret.update({'comment': "The group members must be provided."}) + return ret + + if description: + element += "{0}".format(description) + + full_element = "{1}".format(groupname, element) + + new_group = xml.to_dict(ET.fromstring(full_element), True) + + if group == new_group: + ret.update({ + 'comment': 'Address group object already exists. No changes required.', + 'result': True + }) + return ret + else: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/address-group/" \ + "entry[@name=\'{1}\']".format(vsys, groupname) + + result, msg = _edit_config(xpath, full_element) + + if not result: + ret.update({ + 'comment': msg + }) + return ret + + if commit is True: + ret.update({ + 'changes': {'before': group, 'after': new_group}, + 'commit': __salt__['panos.commit'](), + 'comment': 'Address group object successfully configured.', + 'result': True + }) + else: + ret.update({ + 'changes': {'before': group, 'after': new_group}, + 'comment': 'Address group object successfully configured.', + 'result': True + }) + + return ret + + def clone_config(name, xpath=None, newname=None, commit=False): ''' Clone a specific XPATH and set it to a new name. @@ -317,13 +567,16 @@ def clone_config(name, xpath=None, newname=None, commit=False): 'xpath': xpath, 'newname': newname} - response = __proxy__['panos.call'](query) + result, response = _validate_response(__proxy__['panos.call'](query)) ret.update({ 'changes': response, - 'result': True + 'result': result }) + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), @@ -386,15 +639,18 @@ def delete_config(name, xpath=None, commit=False): query = {'type': 'config', 'action': 'delete', - 'xpath': xpath} + 'xpath': xpath} - response = __proxy__['panos.call'](query) + result, response = _validate_response(__proxy__['panos.call'](query)) ret.update({ 'changes': response, - 'result': True + 'result': result }) + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), @@ -434,7 +690,7 @@ def download_software(name, version=None, synch=False, check=False): if check is True: __salt__['panos.check_software']() - versions = __salt__['panos.get_software_info']() + versions = __salt__['panos.get_software_info']()['result'] if 'sw-updates' not in versions \ or 'versions' not in versions['sw-updates'] \ @@ -457,7 +713,7 @@ def download_software(name, version=None, synch=False, check=False): 'changes': __salt__['panos.download_software_version'](version=version, synch=synch) }) - versions = __salt__['panos.get_software_info']() + versions = __salt__['panos.get_software_info']()['result'] if 'sw-updates' not in versions \ or 'versions' not in versions['sw-updates'] \ @@ -508,6 +764,32 @@ def edit_config(name, xpath=None, value=None, commit=False): ''' ret = _default_ret(name) + # Verify if the current XPATH is equal to the specified value. + # If we are equal, no changes required. + xpath_split = xpath.split("/") + + # Retrieve the head of the xpath for validation. + if len(xpath_split) > 0: + head = xpath_split[-1] + if "[" in head: + head = head.split("[")[0] + + current_element = __salt__['panos.get_xpath'](xpath)['result'] + + if head and current_element and head in current_element: + current_element = current_element[head] + else: + current_element = {} + + new_element = xml.to_dict(ET.fromstring(value), True) + + if current_element == new_element: + ret.update({ + 'comment': 'XPATH is already equal to the specified value.', + 'result': True + }) + return ret + result, msg = _edit_config(xpath, value) ret.update({ @@ -515,15 +797,20 @@ def edit_config(name, xpath=None, value=None, commit=False): 'result': result }) - # Ensure we do not commit after a failed action if not result: return ret if commit is True: ret.update({ + 'changes': {'before': current_element, 'after': new_element}, 'commit': __salt__['panos.commit'](), 'result': True }) + else: + ret.update({ + 'changes': {'before': current_element, 'after': new_element}, + 'result': True + }) return ret @@ -585,7 +872,8 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False): result, msg = _move_bottom(xpath) ret.update({ - 'result': result + 'result': result, + 'comment': msg }) if not result: @@ -660,13 +948,16 @@ def rename_config(name, xpath=None, newname=None, commit=False): 'xpath': xpath, 'newname': newname} - response = __proxy__['panos.call'](query) + result, response = _validate_response(__proxy__['panos.call'](query)) ret.update({ 'changes': response, - 'result': True + 'result': result }) + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), @@ -854,7 +1145,12 @@ def security_rule_exists(name, return ret # Check if rule currently exists - rule = __salt__['panos.get_security_rule'](rulename, vsys) + rule = __salt__['panos.get_security_rule'](rulename, vsys)['result'] + + if rule and 'entry' in rule: + rule = rule['entry'] + else: + rule = {} # Build the rule element element = "" @@ -964,29 +1260,32 @@ def security_rule_exists(name, full_element = "{1}".format(rulename, element) - create_rule = False + new_rule = xml.to_dict(ET.fromstring(full_element), True) - if 'result' in rule: - if rule['result'] == "None": - create_rule = True + config_change = False - if create_rule: - xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ - "security/rules".format(vsys) - - result, msg = _set_config(xpath, full_element) - if not result: - ret['changes']['set'] = msg - return ret + if rule == new_rule: + ret.update({ + 'comment': 'Security rule already exists. No changes required.' + }) else: + config_change = True xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ "security/rules/entry[@name=\'{1}\']".format(vsys, rulename) result, msg = _edit_config(xpath, full_element) + if not result: - ret['changes']['edit'] = msg + ret.update({ + 'comment': msg + }) return ret + ret.update({ + 'changes': {'before': rule, 'after': new_rule}, + 'comment': 'Security rule verified successfully.' + }) + if move: movepath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ "security/rules/entry[@name=\'{1}\']".format(vsys, rulename) @@ -1001,19 +1300,244 @@ def security_rule_exists(name, elif move == "bottom": move_result, move_msg = _move_bottom(movepath) + if config_change: + ret.update({ + 'changes': {'before': rule, 'after': new_rule, 'move': move_msg} + }) + else: + ret.update({ + 'changes': {'move': move_msg} + }) + if not move_result: - ret['changes']['move'] = move_msg + ret.update({ + 'comment': move_msg + }) return ret if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), - 'comment': 'Security rule verified successfully.', 'result': True }) else: ret.update({ - 'comment': 'Security rule verified successfully.', + 'result': True + }) + + return ret + + +def service_exists(name, servicename=None, vsys=1, protocol=None, port=None, description=None, commit=False): + ''' + Ensures that a service object exists in the configured state. If it does not exist or is not configured with the + specified attributes, it will be adjusted to match the specified values. + + name: The name of the module function to execute. + + servicename(str): The name of the security object. The name is case-sensitive and can have up to 31 characters, + which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on + Panorama, unique within its device group and any ancestor or descendant device groups. + + vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. + + protocol(str): The protocol that is used by the service object. The only valid options are tcp and udp. + + port(str): The port number that is used by the service object. This can be specified as a single integer or a + valid range of ports. + + description(str): A description for the policy (up to 255 characters). + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/service/tcp-80: + panos.service_exists: + - servicename: tcp-80 + - vsys: 1 + - protocol: tcp + - port: 80 + - description: Hypertext Transfer Protocol + - commit: False + + panos/service/udp-500-550: + panos.service_exists: + - servicename: udp-500-550 + - vsys: 3 + - protocol: udp + - port: 500-550 + - commit: False + + ''' + ret = _default_ret(name) + + if not servicename: + ret.update({'comment': "The service name field must be provided."}) + return ret + + # Check if service object currently exists + service = __salt__['panos.get_service'](servicename, vsys)['result'] + + if service and 'entry' in service: + service = service['entry'] + else: + service = {} + + # Verify the arguments + if not protocol and protocol not in ['tcp', 'udp']: + ret.update({'comment': "The protocol must be provided and must be tcp or udp."}) + return ret + if not port: + ret.update({'comment': "The port field must be provided."}) + return ret + + element = "<{0}>{1}".format(protocol, port) + + if description: + element += "{0}".format(description) + + full_element = "{1}".format(servicename, element) + + new_service = xml.to_dict(ET.fromstring(full_element), True) + + if service == new_service: + ret.update({ + 'comment': 'Service object already exists. No changes required.', + 'result': True + }) + return ret + else: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/service/" \ + "entry[@name=\'{1}\']".format(vsys, servicename) + + result, msg = _edit_config(xpath, full_element) + + if not result: + ret.update({ + 'comment': msg + }) + return ret + + if commit is True: + ret.update({ + 'changes': {'before': service, 'after': new_service}, + 'commit': __salt__['panos.commit'](), + 'comment': 'Service object successfully configured.', + 'result': True + }) + else: + ret.update({ + 'changes': {'before': service, 'after': new_service}, + 'comment': 'Service object successfully configured.', + 'result': True + }) + + return ret + + +def service_group_exists(name, + groupname=None, + vsys=1, + members=None, + description=None, + commit=False): + ''' + Ensures that a service group object exists in the configured state. If it does not exist or is not configured with + the specified attributes, it will be adjusted to match the specified values. + + This module will enforce group membership. If a group exists and contains members this state does not include, + those members will be removed and replaced with the specified members in the state. + + name: The name of the module function to execute. + + groupname(str): The name of the service group object. The name is case-sensitive and can have up to 31 characters, + which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on + Panorama, unique within its device group and any ancestor or descendant device groups. + + vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. + + members(str, list): The members of the service group. These must be valid service objects or service groups on the + system that already exist prior to the execution of this state. + + description(str): A description for the policy (up to 255 characters). + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/service-group/my-group: + panos.service_group_exists: + - groupname: my-group + - vsys: 1 + - members: + - tcp-80 + - custom-port-group + - description: A group that needs to exist + - commit: False + + ''' + ret = _default_ret(name) + + if not groupname: + ret.update({'comment': "The group name field must be provided."}) + return ret + + # Check if service group object currently exists + group = __salt__['panos.get_service_group'](groupname, vsys)['result'] + + if group and 'entry' in group: + group = group['entry'] + else: + group = {} + + # Verify the arguments + if members: + element = "{0}".format(_build_members(members, True)) + else: + ret.update({'comment': "The group members must be provided."}) + return ret + + if description: + element += "{0}".format(description) + + full_element = "{1}".format(groupname, element) + + new_group = xml.to_dict(ET.fromstring(full_element), True) + + if group == new_group: + ret.update({ + 'comment': 'Service group object already exists. No changes required.', + 'result': True + }) + return ret + else: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/service-group/" \ + "entry[@name=\'{1}\']".format(vsys, groupname) + + result, msg = _edit_config(xpath, full_element) + + if not result: + ret.update({ + 'comment': msg + }) + return ret + + if commit is True: + ret.update({ + 'changes': {'before': group, 'after': new_group}, + 'commit': __salt__['panos.commit'](), + 'comment': 'Service group object successfully configured.', + 'result': True + }) + else: + ret.update({ + 'changes': {'before': group, 'after': new_group}, + 'comment': 'Service group object successfully configured.', 'result': True }) @@ -1056,7 +1580,6 @@ def set_config(name, xpath=None, value=None, commit=False): 'result': result }) - # Ensure we do not commit after a failed action if not result: return ret diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py index e7efca0dc1..8abf73a46e 100644 --- a/salt/states/pkgrepo.py +++ b/salt/states/pkgrepo.py @@ -91,7 +91,6 @@ import sys # Import salt libs from salt.exceptions import CommandExecutionError, SaltInvocationError -from salt.modules.aptpkg import _strip_uri from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS import salt.utils.data import salt.utils.files @@ -406,7 +405,7 @@ def managed(name, ppa=None, **kwargs): sanitizedkwargs = kwargs if os_family == 'debian': - repo = _strip_uri(repo) + repo = salt.utils.pkg.deb.strip_uri(repo) if pre: for kwarg in sanitizedkwargs: diff --git a/salt/states/ssh_known_hosts.py b/salt/states/ssh_known_hosts.py index 93325aa18a..c19afdc14a 100644 --- a/salt/states/ssh_known_hosts.py +++ b/salt/states/ssh_known_hosts.py @@ -178,13 +178,13 @@ def present( return dict(ret, result=False, comment=result['error']) else: # 'updated' if key: - new_key = result['new']['key'] + new_key = result['new'][0]['key'] return dict(ret, changes={'old': result['old'], 'new': result['new']}, comment='{0}\'s key saved to {1} (key: {2})'.format( name, config, new_key)) else: - fingerprint = result['new']['fingerprint'] + fingerprint = result['new'][0]['fingerprint'] return dict(ret, changes={'old': result['old'], 'new': result['new']}, comment='{0}\'s key saved to {1} (fingerprint: {2})'.format( @@ -225,7 +225,7 @@ def absent(name, user=None, config=None): ret['result'] = False return dict(ret, comment=comment) - known_host = __salt__['ssh.get_known_host'](user=user, hostname=name, config=config) + known_host = __salt__['ssh.get_known_host_entries'](user=user, hostname=name, config=config) if not known_host: return dict(ret, comment='Host is already absent') diff --git a/salt/states/vagrant.py b/salt/states/vagrant.py new file mode 100644 index 0000000000..edeec1f0db --- /dev/null +++ b/salt/states/vagrant.py @@ -0,0 +1,369 @@ +# -*- coding: utf-8 -*- +r''' +.. index:: Vagrant state function + +Manage Vagrant VMs +================== + +Manange execution of Vagrant virtual machines on Salt minions. + +Vagrant_ is a tool for building and managing virtual machine environments. +It can use various providers, such as VirtualBox_, Docker_, or VMware_, to run its VMs. +Vagrant provides some of the functionality of a light-weight hypervisor. +The combination of Salt modules, Vagrant running on the host, and a +virtual machine provider, gives hypervisor-like functionality for +developers who use Vagrant to quickly define their virtual environments. + +.. _Vagrant: http://www.vagrantup.com/ +.. _VirtualBox: https://www.virtualbox.org/ +.. _Docker: https://www.docker.io/ +.. _VMWare: https://www.vmware.com/ + + .. versionadded:: Oxygen + +The configuration of each virtual machine is defined in a file named +``Vagrantfile`` which must exist on the VM host machine. +The essential parameters which must be defined to start a Vagrant VM +are the directory where the ``Vagrantfile`` is located \(argument ``cwd:``\), +and the username which will own the ``Vagrant box`` created for the VM \( +argument ``vagrant_runas:``\). + +A single ``Vagrantfile`` may define one or more virtual machines. +Use the ``machine`` argument to chose among them. The default (blank) +value will select the ``primary`` (or only) machine in the Vagrantfile. + +\[NOTE:\] Each virtual machine host must have the following: + +- a working salt-minion +- a Salt sdb database configured for ``vagrant_sdb_data``. +- Vagrant installed and the ``vagrant`` command working +- a suitable VM provider + +.. code-block:: yaml + + # EXAMPLE: + # file /etc/salt/minion.d/vagrant_sdb.conf on the host computer + # -- this sdb database is required by the Vagrant module -- + vagrant_sdb_data: # The sdb database must have this name. + driver: sqlite3 # Let's use SQLite to store the data ... + database: /var/cache/salt/vagrant.sqlite # ... in this file ... + table: sdb # ... using this table name. + create_table: True # if not present + +''' +from __future__ import absolute_import + +# Import Python libs +import fnmatch + +# Import Salt libs +import salt.utils.args +from salt.exceptions import CommandExecutionError, SaltInvocationError +import salt.ext.six as six + +__virtualname__ = 'vagrant' + + +def __virtual__(): + ''' + Only if vagrant module is available. + + :return: + ''' + + if 'vagrant.version' in __salt__: + return __virtualname__ + return False + + +def _vagrant_call(node, function, section, comment, status_when_done=None, **kwargs): + ''' + Helper to call the vagrant functions. Wildcards supported. + + :param node: The Salt-id or wildcard + :param function: the vagrant submodule to call + :param section: the name for the state call. + :param comment: what the state reply should say + :param status_when_done: the Vagrant status expected for this state + :return: the dictionary for the state reply + ''' + ret = {'name': node, 'changes': {}, 'result': True, 'comment': ''} + + targeted_nodes = [] + if isinstance(node, six.string_types): + try: # use shortcut if a single node name + if __salt__['vagrant.get_vm_info'](node): + targeted_nodes = [node] + except SaltInvocationError: + pass + + if not targeted_nodes: # the shortcut failed, do this the hard way + all_domains = __salt__['vagrant.list_domains']() + targeted_nodes = fnmatch.filter(all_domains, node) + changed_nodes = [] + ignored_nodes = [] + for node in targeted_nodes: + if status_when_done: + try: + present_state = __salt__['vagrant.vm_state'](node)[0] + if present_state['state'] == status_when_done: + continue # no change is needed + except (IndexError, SaltInvocationError, CommandExecutionError): + pass + try: + response = __salt__['vagrant.{0}'.format(function)](node, **kwargs) + if isinstance(response, dict): + response = response['name'] + changed_nodes.append({'node': node, function: response}) + except (SaltInvocationError, CommandExecutionError) as err: + ignored_nodes.append({'node': node, 'issue': str(err)}) + if not changed_nodes: + ret['result'] = True + ret['comment'] = 'No changes seen' + if ignored_nodes: + ret['changes'] = {'ignored': ignored_nodes} + else: + ret['changes'] = {section: changed_nodes} + ret['comment'] = comment + + return ret + + +def running(name, **kwargs): + r''' + Defines and starts a new VM with specified arguments, or restart a + VM (or group of VMs). (Runs ``vagrant up``.) + + :param name: the Salt_id node name you wish your VM to have. + + If ``name`` contains a "?" or "*" then it will re-start a group of VMs + which have been paused or stopped. + + Each machine must be initially started individually using this function + or the vagrant.init execution module call. + + \[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM. + + Possible keyword arguments: + + - cwd: The directory (path) containing the Vagrantfile + - machine: ('') the name of the machine (in the Vagrantfile) if not default + - vagrant_runas: ('root') the username who owns the vagrantbox file + - vagrant_provider: the provider to run the VM (usually 'virtualbox') + - vm: ({}) a dictionary containing these or other keyword arguments + + .. code-block:: yaml + + node_name: + vagrant.running + + .. code-block:: yaml + + node_name: + vagrant.running: + - cwd: /projects/my_project + - vagrant_runas: my_username + - machine: machine1 + + ''' + if '*' in name or '?' in name: + + return _vagrant_call(name, 'start', 'restarted', + "Machine has been restarted", "running") + + else: + + ret = {'name': name, + 'changes': {}, + 'result': True, + 'comment': '{0} is already running'.format(name) + } + + try: + info = __salt__['vagrant.vm_state'](name) + if info[0]['state'] != 'running': + __salt__['vagrant.start'](name) + ret['changes'][name] = 'Machine started' + ret['comment'] = 'Node {0} started'.format(name) + except (SaltInvocationError, CommandExecutionError): + # there was no viable existing machine to start + ret, kwargs = _find_init_change(name, ret, **kwargs) + kwargs['start'] = True + __salt__['vagrant.init'](name, **kwargs) + ret['changes'][name] = 'Node defined and started' + ret['comment'] = 'Node {0} defined and started'.format(name) + + return ret + + +def _find_init_change(name, ret, **kwargs): + ''' + look for changes from any previous init of machine. + + :return: modified ret and kwargs + ''' + kwargs = salt.utils.args.clean_kwargs(**kwargs) + if 'vm' in kwargs: + kwargs.update(kwargs.pop('vm')) + # the state processing eats 'runas' so we rename + kwargs['runas'] = kwargs.pop('vagrant_runas', '') + try: + vm_ = __salt__['vagrant.get_vm_info'](name) + except SaltInvocationError: + vm_ = {} + for key, value in kwargs.items(): + ret['changes'][key] = {'old': None, 'new': value} + if vm_: # test for changed values + for key in vm_: + value = vm_[key] or '' # supply a blank if value is None + if key != 'name': # will be missing in kwargs + new = kwargs.get(key, '') + if new != value: + if key == 'machine' and new == '': + continue # we don't know the default machine name + ret['changes'][key] = {'old': value, 'new': new} + return ret, kwargs + + +def initialized(name, **kwargs): + r''' + Defines a new VM with specified arguments, but does not start it. + + :param name: the Salt_id node name you wish your VM to have. + + Each machine must be initialized individually using this function + or the "vagrant.running" function, or the vagrant.init execution module call. + + This command will not change the state of a running or paused machine. + + Possible keyword arguments: + + - cwd: The directory (path) containing the Vagrantfile + - machine: ('') the name of the machine (in the Vagrantfile) if not default + - vagrant_runas: ('root') the username who owns the vagrantbox file + - vagrant_provider: the provider to run the VM (usually 'virtualbox') + - vm: ({}) a dictionary containing these or other keyword arguments + + .. code-block:: yaml + + node_name1: + vagrant.initialized + - cwd: /projects/my_project + - vagrant_runas: my_username + - machine: machine1 + + node_name2: + vagrant.initialized + - cwd: /projects/my_project + - vagrant_runas: my_username + - machine: machine2 + + start_nodes: + vagrant.start: + - name: node_name? + ''' + ret = {'name': name, + 'changes': {}, + 'result': True, + 'comment': 'The VM is already correctly defined' + } + + # define a machine to start later + ret, kwargs = _find_init_change(name, ret, **kwargs) + + if ret['changes'] == {}: + return ret + + kwargs['start'] = False + __salt__['vagrant.init'](name, **kwargs) + ret['changes'][name] = 'Node initialized' + ret['comment'] = 'Node {0} defined but not started.'.format(name) + + return ret + + +def stopped(name): + ''' + Stops a VM (or VMs) by shutting it (them) down nicely. (Runs ``vagrant halt``) + + :param name: May be a Salt_id node, or a POSIX-style wildcard string. + + .. code-block:: yaml + + node_name: + vagrant.stopped + ''' + + return _vagrant_call(name, 'shutdown', 'stopped', + 'Machine has been shut down', 'poweroff') + + +def powered_off(name): + ''' + Stops a VM (or VMs) by power off. (Runs ``vagrant halt``.) + + This method is provided for compatibility with other VM-control + state modules. For Vagrant, the action is identical with ``stopped``. + + :param name: May be a Salt_id node or a POSIX-style wildcard string. + + .. code-block:: yaml + + node_name: + vagrant.unpowered + ''' + + return _vagrant_call(name, 'stop', 'unpowered', + 'Machine has been powered off', 'poweroff') + + +def destroyed(name): + ''' + Stops a VM (or VMs) and removes all refences to it (them). (Runs ``vagrant destroy``.) + + Subsequent re-use of the same machine will requere another operation of ``vagrant.running`` + or a call to the ``vagrant.init`` execution module. + + :param name: May be a Salt_id node or a POSIX-style wildcard string. + + .. code-block:: yaml + + node_name: + vagrant.destroyed + ''' + + return _vagrant_call(name, 'destroy', 'destroyed', + 'Machine has been removed') + + +def paused(name): + ''' + Stores the state of a VM (or VMs) for fast restart. (Runs ``vagrant suspend``.) + + :param name: May be a Salt_id node or a POSIX-style wildcard string. + + .. code-block:: yaml + + node_name: + vagrant.paused + ''' + + return _vagrant_call(name, 'pause', 'paused', + 'Machine has been suspended', 'saved') + + +def rebooted(name): + ''' + Reboots a running, paused, or stopped VM (or VMs). (Runs ``vagrant reload``.) + + The will re-run the provisioning + + :param name: May be a Salt_id node or a POSIX-style wildcard string. + + .. code-block:: yaml + + node_name: + vagrant.reloaded + ''' + + return _vagrant_call(name, 'reboot', 'rebooted', 'Machine has been reloaded') diff --git a/salt/states/virtualenv_mod.py b/salt/states/virtualenv_mod.py index c3a9a452a8..fe8626464f 100644 --- a/salt/states/virtualenv_mod.py +++ b/salt/states/virtualenv_mod.py @@ -67,6 +67,10 @@ def managed(name, name Path to the virtualenv. + venv_bin: virtualenv + The name (and optionally path) of the virtualenv command. This can also + be set globally in the minion config file as ``virtualenv.venv_bin``. + requirements: None Path to a pip requirements file. If the path begins with ``salt://`` the file will be transferred from the master file server. diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index aab4fe4ac1..e1a5936991 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -398,13 +398,14 @@ def bootstrap(vm_, opts=None): # NOTE: deploy_kwargs is also used to pass inline_script variable content # to run_inline_script function + host = salt.config.get_cloud_config_value('ssh_host', vm_, opts) deploy_kwargs = { 'opts': opts, - 'host': vm_['ssh_host'], + 'host': host, 'port': salt.config.get_cloud_config_value( 'ssh_port', vm_, opts, default=22 ), - 'salt_host': vm_.get('salt_host', vm_['ssh_host']), + 'salt_host': vm_.get('salt_host', host), 'username': ssh_username, 'script': deploy_script_code, 'inline_script': inline_script_config, diff --git a/salt/utils/files.py b/salt/utils/files.py index 64c7a55878..d6db7cfbc1 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -59,7 +59,9 @@ def guess_archive_type(name): Guess an archive type (tar, zip, or rar) by its file extension ''' name = name.lower() - for ending in ('tar', 'tar.gz', 'tar.bz2', 'tar.xz', 'tgz', 'tbz2', 'txz', + for ending in ('tar', 'tar.gz', 'tgz', + 'tar.bz2', 'tbz2', 'tbz', + 'tar.xz', 'txz', 'tar.lzma', 'tlz'): if name.endswith('.' + ending): return 'tar' diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index fe20fce89a..34c5e7be57 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -20,6 +20,8 @@ import shutil import stat import subprocess import time +import tornado.ioloop +import weakref from datetime import datetime # Import salt libs @@ -1925,12 +1927,47 @@ class GitBase(object): ''' Base class for gitfs/git_pillar ''' - def __init__(self, opts, git_providers=None, cache_root=None): + def __init__(self, opts, remotes=None, per_remote_overrides=(), + per_remote_only=PER_REMOTE_ONLY, git_providers=None, + cache_root=None, init_remotes=True): ''' IMPORTANT: If specifying a cache_root, understand that this is also where the remotes will be cloned. A non-default cache_root is only really designed right now for winrepo, as its repos need to be checked out into the winrepo locations and not within the cachedir. + + As of the Oxygen release cycle, the classes used to interface with + Pygit2 and GitPython can be overridden by passing the git_providers + argument when spawning a class instance. This allows for one to write + classes which inherit from salt.utils.gitfs.Pygit2 or + salt.utils.gitfs.GitPython, and then direct one of the GitBase + subclasses (GitFS, GitPillar, WinRepo) to use the custom class. For + example: + + .. code-block:: Python + + import salt.utils.gitfs + from salt.fileserver.gitfs import PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY + + class CustomPygit2(salt.utils.gitfs.Pygit2): + def fetch_remotes(self): + ... + Alternate fetch behavior here + ... + + git_providers = { + 'pygit2': CustomPygit2, + 'gitpython': salt.utils.gitfs.GitPython, + } + + gitfs = salt.utils.gitfs.GitFS( + __opts__, + __opts__['gitfs_remotes'], + per_remote_overrides=PER_REMOTE_OVERRIDES, + per_remote_only=PER_REMOTE_ONLY, + git_providers=git_providers) + + gitfs.fetch_remotes() ''' self.opts = opts self.git_providers = git_providers if git_providers is not None \ @@ -1946,8 +1983,13 @@ class GitBase(object): self.hash_cachedir = salt.utils.path.join(self.cache_root, 'hash') self.file_list_cachedir = salt.utils.path.join( self.opts['cachedir'], 'file_lists', self.role) + if init_remotes: + self.init_remotes( + remotes if remotes is not None else [], + per_remote_overrides, + per_remote_only) - def init_remotes(self, remotes, per_remote_overrides, + def init_remotes(self, remotes, per_remote_overrides=(), per_remote_only=PER_REMOTE_ONLY): ''' Initialize remotes @@ -2471,9 +2513,51 @@ class GitFS(GitBase): ''' Functionality specific to the git fileserver backend ''' - def __init__(self, opts): - self.role = 'gitfs' - super(GitFS, self).__init__(opts) + role = 'gitfs' + instance_map = weakref.WeakKeyDictionary() + + def __new__(cls, opts, remotes=None, per_remote_overrides=(), + per_remote_only=PER_REMOTE_ONLY, git_providers=None, + cache_root=None, init_remotes=True): + ''' + If we are not initializing remotes (such as in cases where we just want + to load the config so that we can run clear_cache), then just return a + new __init__'ed object. Otherwise, check the instance map and re-use an + instance if one exists for the current process. Weak references are + used to ensure that we garbage collect instances for threads which have + exited. + ''' + # No need to get the ioloop reference if we're not initializing remotes + io_loop = tornado.ioloop.IOLoop.current() if init_remotes else None + if not init_remotes or io_loop not in cls.instance_map: + # We only evaluate the second condition in this if statement if + # we're initializing remotes, so we won't get here unless io_loop + # is something other than None. + obj = object.__new__(cls) + super(GitFS, obj).__init__( + opts, + remotes if remotes is not None else [], + per_remote_overrides=per_remote_overrides, + per_remote_only=per_remote_only, + git_providers=git_providers if git_providers is not None + else GIT_PROVIDERS, + cache_root=cache_root, + init_remotes=init_remotes) + if not init_remotes: + log.debug('Created gitfs object with uninitialized remotes') + else: + log.debug('Created gitfs object for process %s', os.getpid()) + # Add to the instance map so we can re-use later + cls.instance_map[io_loop] = obj + return obj + log.debug('Re-using gitfs object for process %s', os.getpid()) + return cls.instance_map[io_loop] + + def __init__(self, opts, remotes, per_remote_overrides=(), # pylint: disable=super-init-not-called + per_remote_only=PER_REMOTE_ONLY, git_providers=None, + cache_root=None, init_remotes=True): + # Initialization happens above in __new__(), so don't do anything here + pass def dir_list(self, load): ''' @@ -2755,9 +2839,7 @@ class GitPillar(GitBase): ''' Functionality specific to the git external pillar ''' - def __init__(self, opts): - self.role = 'git_pillar' - super(GitPillar, self).__init__(opts) + role = 'git_pillar' def checkout(self): ''' @@ -2845,9 +2927,7 @@ class WinRepo(GitBase): ''' Functionality specific to the winrepo runner ''' - def __init__(self, opts, winrepo_dir): - self.role = 'winrepo' - super(WinRepo, self).__init__(opts, cache_root=winrepo_dir) + role = 'winrepo' def checkout(self): ''' diff --git a/salt/utils/job.py b/salt/utils/job.py index d0f61e06f7..d9432259e4 100644 --- a/salt/utils/job.py +++ b/salt/utils/job.py @@ -102,10 +102,10 @@ def store_job(opts, load, event=None, mminion=None): log.error(emsg) raise KeyError(emsg) - if 'jid' in load \ - and 'get_load' in mminion.returners \ - and not mminion.returners[getfstr](load.get('jid', '')): + try: mminion.returners[savefstr](load['jid'], load) + except KeyError as e: + log.error("Load does not contain 'jid': %s", e) mminion.returners[fstr](load) if (opts.get('job_cache_store_endtime') diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 81060325d2..2049f5549a 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -599,10 +599,9 @@ class CkMinions(object): if search is None: return minions addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port'])) - if '127.0.0.1' in addrs or '0.0.0.0' in addrs: - # Add in possible ip addresses of a locally connected minion + if '127.0.0.1' in addrs: + # Add in the address of a possible locally-connected minion. addrs.discard('127.0.0.1') - addrs.discard('0.0.0.0') addrs.update(set(salt.utils.network.ip_addrs(include_loopback=include_localhost))) if subset: search = subset diff --git a/salt/utils/pkg/deb.py b/salt/utils/pkg/deb.py index 7cd170a4a3..512d371bec 100644 --- a/salt/utils/pkg/deb.py +++ b/salt/utils/pkg/deb.py @@ -26,3 +26,15 @@ def combine_comments(comments): else: comments = [comments] return ' '.join(comments).strip() + + +def strip_uri(repo): + ''' + Remove the trailing slash from the URI in a repo definition + ''' + splits = repo.split() + for idx in range(len(splits)): + if any(splits[idx].startswith(x) + for x in ('http://', 'https://', 'ftp://')): + splits[idx] = splits[idx].rstrip('/') + return ' '.join(splits) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 055c7fc0db..8e9ecec0a2 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -1151,7 +1151,8 @@ class Schedule(object): # Sort the list of "whens" from earlier to later schedules _when.sort() - for i in _when: + # Copy the list so we can loop through it + for i in copy.deepcopy(_when): if i < now and len(_when) > 1: # Remove all missed schedules except the latest one. # We need it to detect if it was triggered previously. diff --git a/salt/utils/templates.py b/salt/utils/templates.py index b5379e5555..b94f67a762 100644 --- a/salt/utils/templates.py +++ b/salt/utils/templates.py @@ -206,8 +206,13 @@ def wrap_tmpl_func(render_str): if six.PY2: output = output.encode(SLS_ENCODING) if salt.utils.platform.is_windows(): + newline = False + if output.endswith(('\n', os.linesep)): + newline = True # Write out with Windows newlines output = os.linesep.join(output.splitlines()) + if newline: + output += os.linesep except SaltRenderError as exc: log.error("Rendering exception occurred: {0}".format(exc)) @@ -331,7 +336,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): # http://jinja.pocoo.org/docs/api/#unicode tmplstr = tmplstr.decode(SLS_ENCODING) - if tmplstr.endswith('\n'): + if tmplstr.endswith(os.linesep): newline = True if not saltenv: @@ -441,7 +446,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): # Workaround a bug in Jinja that removes the final newline # (https://github.com/mitsuhiko/jinja2/issues/75) if newline: - output += '\n' + output += os.linesep return output diff --git a/salt/utils/vault.py b/salt/utils/vault.py index e34a41bb85..b1f7eed8d7 100644 --- a/salt/utils/vault.py +++ b/salt/utils/vault.py @@ -90,7 +90,8 @@ def _get_token_and_url_from_master(): raise salt.exceptions.CommandExecutionError(result) return { 'url': result['url'], - 'token': result['token'] + 'token': result['token'], + 'verify': result['verify'], } @@ -104,7 +105,8 @@ def _get_vault_connection(): try: return { 'url': __opts__['vault']['url'], - 'token': __opts__['vault']['auth']['token'] + 'token': __opts__['vault']['auth']['token'], + 'verify': __opts__['vault'].get('verify', None) } except KeyError as err: errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(err.message) @@ -124,6 +126,8 @@ def make_request(method, resource, profile=None, **args): connection = _get_vault_connection() token, vault_url = connection['token'], connection['url'] + if 'verify' not in args: + args['verify'] = connection['verify'] url = "{0}/{1}".format(vault_url, resource) headers = {'X-Vault-Token': token, 'Content-Type': 'application/json'} diff --git a/salt/utils/virtualbox.py b/salt/utils/virtualbox.py index c055a6ff8a..6aa108f57d 100644 --- a/salt/utils/virtualbox.py +++ b/salt/utils/virtualbox.py @@ -281,7 +281,10 @@ def vb_get_network_addresses(machine_name=None, machine=None): # We can't trust virtualbox to give us up to date guest properties if the machine isn't running # For some reason it may give us outdated (cached?) values if machine.state == _virtualboxManager.constants.MachineState_Running: - total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count')) + try: + total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count')) + except ValueError: + total_slots = 0 for i in range(total_slots): try: address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i)) diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 35d0fabddc..424d7f9206 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -111,7 +111,7 @@ def get_sid_from_name(name): sid = win32security.LookupAccountName(None, name)[0] except pywintypes.error as exc: raise CommandExecutionError( - 'User {0} found: {1}'.format(name, exc.strerror)) + 'User {0} not found: {1}'.format(name, exc.strerror)) return win32security.ConvertSidToStringSid(sid) @@ -144,19 +144,21 @@ def get_current_user(): def get_sam_name(username): - ''' + r''' Gets the SAM name for a user. It basically prefixes a username without a - backslash with the computer name. If the username contains a backslash, it - is returned as is. + backslash with the computer name. If the user does not exist, a SAM + compatible name will be returned using the local hostname as the domain. - Everything is returned lower case + i.e. salt.utils.get_same_name('Administrator') would return 'DOMAIN.COM\Administrator' - i.e. salt.utils.fix_local_user('Administrator') would return 'computername\administrator' + .. note:: Long computer names are truncated to 15 characters ''' - if '\\' not in username: - username = '{0}\\{1}'.format(platform.node(), username) - - return username.lower() + try: + sid_obj = win32security.LookupAccountName(None, username)[0] + except pywintypes.error: + return '\\'.join([platform.node()[:15].upper(), username]) + username, domain, _ = win32security.LookupAccountSid(None, sid_obj) + return '\\'.join([domain, username]) def enable_ctrl_logoff_handler(): diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py index bb07921aaa..2f32985e2e 100644 --- a/salt/utils/xmlutil.py +++ b/salt/utils/xmlutil.py @@ -7,34 +7,37 @@ Various XML utilities from __future__ import absolute_import -def to_dict(xmltree): +def _conv_name(x): ''' - Convert an XML tree into a dict. The tree that is passed in must be an - ElementTree object. + If this XML tree has an xmlns attribute, then etree will add it + to the beginning of the tag, like: "{http://path}tag". + ''' + if '}' in x: + comps = x.split('}') + name = comps[1] + return name + return x + + +def _to_dict(xmltree): + ''' + Converts an XML ElementTree to a dictionary that only contains items. + This is the default behavior in version 2017.7. This will default to prevent + unexpected parsing issues on modules dependant on this. ''' # If this object has no children, the for..loop below will return nothing # for it, so just return a single dict representing it. - if len(xmltree.getchildren()) < 1 and len(xmltree.attrib.items()) < 1: - name = xmltree.tag - if '}' in name: - comps = name.split('}') - name = comps[1] + if len(xmltree.getchildren()) < 1: + name = _conv_name(xmltree.tag) return {name: xmltree.text} xmldict = {} for item in xmltree: - name = item.tag - if '}' in name: - # If this XML tree has an xmlns attribute, then etree will add it - # to the beginning of the tag, like: "{http://path}tag". This - # aggression will not stand, man. - comps = name.split('}') - name = comps[1] + name = _conv_name(item.tag) + if name not in xmldict: if len(item.getchildren()) > 0: - xmldict[name] = to_dict(item) - elif len(item.attrib.items()) > 0: - xmldict[name] = to_dict(item) + xmldict[name] = _to_dict(item) else: xmldict[name] = item.text else: @@ -43,13 +46,56 @@ def to_dict(xmltree): # to happen, and behave accordingly. if not isinstance(xmldict[name], list): xmldict[name] = [xmldict[name]] - xmldict[name].append(to_dict(item)) + xmldict[name].append(_to_dict(item)) + return xmldict + + +def _to_full_dict(xmltree): + ''' + Returns the full XML dictionary including attributes. + ''' + xmldict = {} for attrName, attrValue in xmltree.attrib.items(): - if attrName not in xmldict: - xmldict[attrName] = attrValue + xmldict[attrName] = attrValue + + if len(xmltree.getchildren()) < 1: + if len(xmldict) == 0: + # If we don't have attributes, we should return the value as a string + # ex: test + return xmltree.text + elif xmltree.text: + # XML allows for empty sets with attributes, so we need to make sure that capture this. + # ex: + xmldict[_conv_name(xmltree.tag)] = xmltree.text + + for item in xmltree: + name = _conv_name(item.tag) + + if name not in xmldict: + xmldict[name] = _to_full_dict(item) else: - # Attempt to ensure that items are not overwritten by attributes. - xmldict["attr{0}".format(attrName)] = attrValue + # If a tag appears more than once in the same place, convert it to + # a list. This may require that the caller watch for such a thing + # to happen, and behave accordingly. + if not isinstance(xmldict[name], list): + xmldict[name] = [xmldict[name]] + + xmldict[name].append(_to_full_dict(item)) return xmldict + + +def to_dict(xmltree, attr=False): + ''' + Convert an XML tree into a dict. The tree that is passed in must be an + ElementTree object. + Args: + xmltree: An ElementTree object. + attr: If true, attributes will be parsed. If false, they will be ignored. + + ''' + if attr: + return _to_full_dict(xmltree) + else: + return _to_dict(xmltree) diff --git a/tests/integration/modules/test_groupadd.py b/tests/integration/modules/test_groupadd.py index 9963793ca1..9936fc7411 100644 --- a/tests/integration/modules/test_groupadd.py +++ b/tests/integration/modules/test_groupadd.py @@ -2,18 +2,18 @@ # Import python libs from __future__ import absolute_import -import string +import grp +import os import random +import string # Import Salt Testing libs from tests.support.case import ModuleCase from tests.support.helpers import destructiveTest, skip_if_not_root -# Import 3rd-party libs +# Import Salt libs from salt.ext.six.moves import range -import os -import grp -from salt import utils +import salt.utils.files @skip_if_not_root @@ -66,7 +66,7 @@ class GroupModuleTest(ModuleCase): ''' defs_file = '/etc/login.defs' if os.path.exists(defs_file): - with utils.fopen(defs_file) as defs_fd: + with salt.utils.files.fopen(defs_file) as defs_fd: login_defs = dict([x.split() for x in defs_fd.readlines() if x.strip() @@ -102,12 +102,12 @@ class GroupModuleTest(ModuleCase): ''' Test the add group function ''' - #add a new group + # add a new group self.assertTrue(self.run_function('group.add', [self._group, self._gid])) group_info = self.run_function('group.info', [self._group]) self.assertEqual(group_info['name'], self._group) self.assertEqual(group_info['gid'], self._gid) - #try adding the group again + # try adding the group again self.assertFalse(self.run_function('group.add', [self._group, self._gid])) @destructiveTest @@ -124,7 +124,7 @@ class GroupModuleTest(ModuleCase): group_info = self.run_function('group.info', [self._group]) self.assertEqual(group_info['name'], self._group) self.assertTrue(gid_min <= group_info['gid'] <= gid_max) - #try adding the group again + # try adding the group again self.assertFalse(self.run_function('group.add', [self._group])) @@ -142,7 +142,7 @@ class GroupModuleTest(ModuleCase): group_info = self.run_function('group.info', [self._group]) self.assertEqual(group_info['name'], self._group) self.assertEqual(group_info['gid'], gid) - #try adding the group again + # try adding the group again self.assertFalse(self.run_function('group.add', [self._group, gid])) @@ -153,10 +153,10 @@ class GroupModuleTest(ModuleCase): ''' self.assertTrue(self.run_function('group.add', [self._group])) - #correct functionality + # correct functionality self.assertTrue(self.run_function('group.delete', [self._group])) - #group does not exist + # group does not exist self.assertFalse(self.run_function('group.delete', [self._no_group])) @destructiveTest @@ -193,11 +193,11 @@ class GroupModuleTest(ModuleCase): self.assertTrue(self.run_function('group.adduser', [self._group, self._user])) group_info = self.run_function('group.info', [self._group]) self.assertIn(self._user, group_info['members']) - #try add a non existing user + # try to add a non existing user self.assertFalse(self.run_function('group.adduser', [self._group, self._no_user])) - #try add a user to non existing group + # try to add a user to non existing group self.assertFalse(self.run_function('group.adduser', [self._no_group, self._user])) - #try add a non existing user to a non existing group + # try to add a non existing user to a non existing group self.assertFalse(self.run_function('group.adduser', [self._no_group, self._no_user])) @destructiveTest diff --git a/tests/integration/modules/test_ssh.py b/tests/integration/modules/test_ssh.py index b565129ef0..6971e9921c 100644 --- a/tests/integration/modules/test_ssh.py +++ b/tests/integration/modules/test_ssh.py @@ -104,7 +104,7 @@ class SSHModuleTest(ModuleCase): # user will get an indicator of what went wrong. self.assertEqual(len(list(ret.items())), 0) # Zero keys found - def test_get_known_host(self): + def test_get_known_host_entries(self): ''' Check that known host information is returned from ~/.ssh/config ''' @@ -113,7 +113,7 @@ class SSHModuleTest(ModuleCase): KNOWN_HOSTS) arg = ['root', 'github.com'] kwargs = {'config': KNOWN_HOSTS} - ret = self.run_function('ssh.get_known_host', arg, **kwargs) + ret = self.run_function('ssh.get_known_host_entries', arg, **kwargs)[0] try: self.assertEqual(ret['enc'], 'ssh-rsa') self.assertEqual(ret['key'], self.key) @@ -125,16 +125,16 @@ class SSHModuleTest(ModuleCase): ) ) - def test_recv_known_host(self): + def test_recv_known_host_entries(self): ''' Check that known host information is returned from remote host ''' - ret = self.run_function('ssh.recv_known_host', ['github.com']) + ret = self.run_function('ssh.recv_known_host_entries', ['github.com']) try: self.assertNotEqual(ret, None) - self.assertEqual(ret['enc'], 'ssh-rsa') - self.assertEqual(ret['key'], self.key) - self.assertEqual(ret['fingerprint'], GITHUB_FINGERPRINT) + self.assertEqual(ret[0]['enc'], 'ssh-rsa') + self.assertEqual(ret[0]['key'], self.key) + self.assertEqual(ret[0]['fingerprint'], GITHUB_FINGERPRINT) except AssertionError as exc: raise AssertionError( 'AssertionError: {0}. Function returned: {1}'.format( @@ -215,7 +215,7 @@ class SSHModuleTest(ModuleCase): try: self.assertEqual(ret['status'], 'updated') self.assertEqual(ret['old'], None) - self.assertEqual(ret['new']['fingerprint'], GITHUB_FINGERPRINT) + self.assertEqual(ret['new'][0]['fingerprint'], GITHUB_FINGERPRINT) except AssertionError as exc: raise AssertionError( 'AssertionError: {0}. Function returned: {1}'.format( @@ -223,8 +223,8 @@ class SSHModuleTest(ModuleCase): ) ) # check that item does exist - ret = self.run_function('ssh.get_known_host', ['root', 'github.com'], - config=KNOWN_HOSTS) + ret = self.run_function('ssh.get_known_host_entries', ['root', 'github.com'], + config=KNOWN_HOSTS)[0] try: self.assertEqual(ret['fingerprint'], GITHUB_FINGERPRINT) except AssertionError as exc: diff --git a/tests/integration/proxy/test_simple.py b/tests/integration/proxy/test_simple.py index dbca73bafa..e1af027cf2 100644 --- a/tests/integration/proxy/test_simple.py +++ b/tests/integration/proxy/test_simple.py @@ -67,3 +67,23 @@ class ProxyMinionSimpleTestCase(ModuleCase): ret = self.run_function('service.start', ['samba'], minion_tgt='proxytest') ret = self.run_function('service.status', ['samba'], minion_tgt='proxytest') self.assertTrue(ret) + + def test_service_get_all(self): + ret = self.run_function('service.get_all', minion_tgt='proxytest') + self.assertTrue(ret) + self.assertIn('samba', ' '.join(ret)) + + def test_grains_items(self): + ret = self.run_function('grains.items', minion_tgt='proxytest') + self.assertEqual(ret['kernel'], 'proxy') + self.assertEqual(ret['kernelrelease'], 'proxy') + + def test_state_apply(self): + ret = self.run_function('state.apply', ['core'], minion_tgt='proxytest') + for key, value in ret.items(): + self.assertTrue(value['result']) + + def test_state_highstate(self): + ret = self.run_function('state.highstate', minion_tgt='proxytest') + for key, value in ret.items(): + self.assertTrue(value['result']) diff --git a/tests/integration/spm/__init__.py b/tests/integration/spm/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/tests/integration/spm/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/integration/spm/test_build.py b/tests/integration/spm/test_build.py new file mode 100644 index 0000000000..18eb931ad3 --- /dev/null +++ b/tests/integration/spm/test_build.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +''' +Tests for the spm build utility +''' +# Import python libs +from __future__ import absolute_import +import os +import shutil +import textwrap + +# Import Salt Testing libs +from tests.support.case import SPMCase +from tests.support.helpers import destructiveTest + +# Import Salt Libraries +import salt.utils.files + + +@destructiveTest +class SPMBuildTest(SPMCase): + ''' + Validate the spm build command + ''' + def setUp(self): + self.config = self._spm_config() + self.formula_dir = os.path.join(' '.join(self.config['file_roots']['base']), 'formulas') + self.formula_sls_dir = os.path.join(self.formula_dir, 'apache') + self.formula_sls = os.path.join(self.formula_sls_dir, 'apache.sls') + self.formula_file = os.path.join(self.formula_dir, 'FORMULA') + + dirs = [self.formula_dir, self.formula_sls_dir] + for formula_dir in dirs: + os.makedirs(formula_dir) + + with salt.utils.files.fopen(self.formula_sls, 'w') as fp: + fp.write(textwrap.dedent('''\ + install-apache: + pkg.installed: + - name: apache2 + ''')) + + with salt.utils.files.fopen(self.formula_file, 'w') as fp: + fp.write(textwrap.dedent('''\ + name: apache + os: RedHat, Debian, Ubuntu, Suse, FreeBSD + os_family: RedHat, Debian, Suse, FreeBSD + version: 201506 + release: 2 + summary: Formula for installing Apache + description: Formula for installing Apache + ''')) + + def test_spm_build(self): + ''' + test spm build + ''' + build_spm = self.run_spm('build', self.config, self.formula_dir) + spm_file = os.path.join(self.config['spm_build_dir'], 'apache-201506-2.spm') + # Make sure .spm file gets created + self.assertTrue(os.path.exists(spm_file)) + # Make sure formula path dir is created + self.assertTrue(os.path.isdir(self.config['formula_path'])) + + def tearDown(self): + shutil.rmtree(self._tmp_spm) diff --git a/tests/integration/spm/test_files.py b/tests/integration/spm/test_files.py new file mode 100644 index 0000000000..7b8ae7f5db --- /dev/null +++ b/tests/integration/spm/test_files.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +''' +Tests for the spm files utility +''' +# Import python libs +from __future__ import absolute_import +import os +import shutil + +# Import Salt Testing libs +from tests.support.case import SPMCase +from tests.support.helpers import destructiveTest + + +@destructiveTest +class SPMFilesTest(SPMCase): + ''' + Validate the spm files command + ''' + def setUp(self): + self.config = self._spm_config() + self._spm_build_files(self.config) + + def test_spm_files(self): + ''' + test spm files + ''' + self._spm_create_update_repo(self.config) + install = self.run_spm('install', self.config, 'apache') + get_files = self.run_spm('files', self.config, 'apache') + + os.path.exists(os.path.join(self.config['formula_path'], 'apache', + 'apache.sls')) + + def tearDown(self): + shutil.rmtree(self._tmp_spm) diff --git a/tests/integration/spm/test_info.py b/tests/integration/spm/test_info.py new file mode 100644 index 0000000000..d6ab4dcf3b --- /dev/null +++ b/tests/integration/spm/test_info.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +''' +Tests for the spm info utility +''' +# Import python libs +from __future__ import absolute_import +import shutil + +# Import Salt Testing libs +from tests.support.case import SPMCase +from tests.support.helpers import destructiveTest + + +@destructiveTest +class SPMInfoTest(SPMCase): + ''' + Validate the spm info command + ''' + def setUp(self): + self.config = self._spm_config() + self._spm_build_files(self.config) + + def test_spm_info(self): + ''' + test spm build + ''' + self._spm_create_update_repo(self.config) + install = self.run_spm('install', self.config, 'apache') + get_info = self.run_spm('info', self.config, 'apache') + + check_info = ['Supported OSes', 'Supported OS', 'installing Apache'] + for info in check_info: + self.assertIn(info, ''.join(get_info)) + + def tearDown(self): + shutil.rmtree(self._tmp_spm) diff --git a/tests/integration/spm/test_install.py b/tests/integration/spm/test_install.py new file mode 100644 index 0000000000..30bca50b96 --- /dev/null +++ b/tests/integration/spm/test_install.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +''' +Tests for the spm install utility +''' +# Import python libs +from __future__ import absolute_import +import os +import shutil + +# Import Salt Testing libs +from tests.support.case import SPMCase +from tests.support.helpers import destructiveTest + + +@destructiveTest +class SPMInstallTest(SPMCase): + ''' + Validate the spm install command + ''' + def setUp(self): + self.config = self._spm_config() + self._spm_build_files(self.config) + + def test_spm_install_local_dir(self): + ''' + test spm install from local directory + ''' + build_spm = self.run_spm('build', self.config, self.formula_dir) + spm_file = os.path.join(self.config['spm_build_dir'], + 'apache-201506-2.spm') + + install = self.run_spm('install', self.config, spm_file) + + sls = os.path.join(self.config['formula_path'], 'apache', 'apache.sls') + + self.assertTrue(os.path.exists(sls)) + + def test_spm_install_from_repo(self): + ''' + test spm install from repo + ''' + self._spm_create_update_repo(self.config) + install = self.run_spm('install', self.config, 'apache') + + sls = os.path.join(self.config['formula_path'], 'apache', 'apache.sls') + + self.assertTrue(os.path.exists(sls)) + + def tearDown(self): + shutil.rmtree(self._tmp_spm) diff --git a/tests/integration/spm/test_repo.py b/tests/integration/spm/test_repo.py new file mode 100644 index 0000000000..a4daf10a63 --- /dev/null +++ b/tests/integration/spm/test_repo.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +''' +Tests for the spm repo +''' +# Import python libs +from __future__ import absolute_import +import os +import shutil + +# Import Salt Testing libs +from tests.support.case import SPMCase +from tests.support.helpers import destructiveTest + + +@destructiveTest +class SPMRepoTest(SPMCase): + ''' + Validate commands related to spm repo + ''' + def setUp(self): + self.config = self._spm_config() + self._spm_build_files(self.config) + + def test_spm_create_update_repo(self): + ''' + test spm create_repo + ''' + self._spm_create_update_repo(self.config) + + self.assertTrue(os.path.exists(self.config['spm_db'])) + + l_repo_file = os.path.join(self.config['spm_cache_dir'], 'local_repo.p') + self.assertTrue(os.path.exists(l_repo_file)) + + def tearDown(self): + shutil.rmtree(self._tmp_spm) diff --git a/tests/integration/states/test_ssh.py b/tests/integration/states/test_ssh.py index e09c6ad664..9f9372afcc 100644 --- a/tests/integration/states/test_ssh.py +++ b/tests/integration/states/test_ssh.py @@ -66,7 +66,7 @@ class SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin): raise err self.assertSaltStateChangesEqual( - ret, GITHUB_FINGERPRINT, keys=('new', 'fingerprint') + ret, GITHUB_FINGERPRINT, keys=('new', 0, 'fingerprint') ) # save twice, no changes @@ -81,7 +81,7 @@ class SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin): **dict(kwargs, name=GITHUB_IP)) try: self.assertSaltStateChangesEqual( - ret, GITHUB_FINGERPRINT, keys=('new', 'fingerprint') + ret, GITHUB_FINGERPRINT, keys=('new', 0, 'fingerprint') ) except AssertionError as err: try: @@ -94,8 +94,8 @@ class SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin): # record for every host must be available ret = self.run_function( - 'ssh.get_known_host', ['root', 'github.com'], config=KNOWN_HOSTS - ) + 'ssh.get_known_host_entries', ['root', 'github.com'], config=KNOWN_HOSTS + )[0] try: self.assertNotIn(ret, ('', None)) except AssertionError: @@ -103,8 +103,8 @@ class SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin): 'Salt return \'{0}\' is in (\'\', None).'.format(ret) ) ret = self.run_function( - 'ssh.get_known_host', ['root', GITHUB_IP], config=KNOWN_HOSTS - ) + 'ssh.get_known_host_entries', ['root', GITHUB_IP], config=KNOWN_HOSTS + )[0] try: self.assertNotIn(ret, ('', None, {})) except AssertionError: @@ -144,7 +144,7 @@ class SSHKnownHostsStateTest(ModuleCase, SaltReturnAssertsMixin): # remove once, the key is gone ret = self.run_state('ssh_known_hosts.absent', **kwargs) self.assertSaltStateChangesEqual( - ret, GITHUB_FINGERPRINT, keys=('old', 'fingerprint') + ret, GITHUB_FINGERPRINT, keys=('old', 0, 'fingerprint') ) # remove twice, nothing has changed diff --git a/tests/runtests.py b/tests/runtests.py index b698adc077..6ed70cd64c 100755 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -130,6 +130,9 @@ TEST_SUITES = { 'returners': {'display_name': 'Returners', 'path': 'integration/returners'}, + 'spm': + {'display_name': 'SPM', + 'path': 'integration/spm'}, 'loader': {'display_name': 'Loader', 'path': 'integration/loader'}, @@ -338,6 +341,13 @@ class SaltTestsuiteParser(SaltCoverageTestingParser): action='store_true', help='Run salt/returners/*.py tests' ) + self.test_selection_group.add_option( + '--spm', + dest='spm', + default=False, + action='store_true', + help='Run spm integration tests' + ) self.test_selection_group.add_option( '-l', '--loader', diff --git a/tests/support/case.py b/tests/support/case.py index 77a1928b22..62f7e078d7 100644 --- a/tests/support/case.py +++ b/tests/support/case.py @@ -22,6 +22,7 @@ import time import stat import errno import signal +import textwrap import logging import tempfile import subprocess @@ -564,6 +565,120 @@ class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixi timeout=timeout) +class SPMTestUserInterface(object): + ''' + Test user interface to SPMClient + ''' + def __init__(self): + self._status = [] + self._confirm = [] + self._error = [] + + def status(self, msg): + self._status.append(msg) + + def confirm(self, action): + self._confirm.append(action) + + def error(self, msg): + self._error.append(msg) + + +class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin): + ''' + Class for handling spm commands + ''' + + def _spm_build_files(self, config): + self.formula_dir = os.path.join(' '.join(config['file_roots']['base']), 'formulas') + self.formula_sls_dir = os.path.join(self.formula_dir, 'apache') + self.formula_sls = os.path.join(self.formula_sls_dir, 'apache.sls') + self.formula_file = os.path.join(self.formula_dir, 'FORMULA') + + dirs = [self.formula_dir, self.formula_sls_dir] + for f_dir in dirs: + os.makedirs(f_dir) + + # Late import + import salt.utils.files + + with salt.utils.files.fopen(self.formula_sls, 'w') as fp: + fp.write(textwrap.dedent('''\ + install-apache: + pkg.installed: + - name: apache2 + ''')) + + with salt.utils.files.fopen(self.formula_file, 'w') as fp: + fp.write(textwrap.dedent('''\ + name: apache + os: RedHat, Debian, Ubuntu, Suse, FreeBSD + os_family: RedHat, Debian, Suse, FreeBSD + version: 201506 + release: 2 + summary: Formula for installing Apache + description: Formula for installing Apache + ''')) + + def _spm_config(self): + self._tmp_spm = tempfile.mkdtemp() + config = self.get_temp_config('minion', **{ + 'spm_logfile': os.path.join(self._tmp_spm, 'log'), + 'spm_repos_config': os.path.join(self._tmp_spm, 'etc', 'spm.repos'), + 'spm_cache_dir': os.path.join(self._tmp_spm, 'cache'), + 'spm_build_dir': os.path.join(self._tmp_spm, 'build'), + 'spm_build_exclude': ['.git'], + 'spm_db_provider': 'sqlite3', + 'spm_files_provider': 'local', + 'spm_db': os.path.join(self._tmp_spm, 'packages.db'), + 'extension_modules': os.path.join(self._tmp_spm, 'modules'), + 'file_roots': {'base': [self._tmp_spm, ]}, + 'formula_path': os.path.join(self._tmp_spm, 'spm'), + 'pillar_path': os.path.join(self._tmp_spm, 'pillar'), + 'reactor_path': os.path.join(self._tmp_spm, 'reactor'), + 'assume_yes': True, + 'force': False, + 'verbose': False, + 'cache': 'localfs', + 'cachedir': os.path.join(self._tmp_spm, 'cache'), + 'spm_repo_dups': 'ignore', + 'spm_share_dir': os.path.join(self._tmp_spm, 'share'), + }) + return config + + def _spm_create_update_repo(self, config): + + build_spm = self.run_spm('build', self.config, self.formula_dir) + + c_repo = self.run_spm('create_repo', self.config, + self.config['spm_build_dir']) + + repo_conf_dir = self.config['spm_repos_config'] + '.d' + os.makedirs(repo_conf_dir) + + # Late import + import salt.utils.files + + with salt.utils.files.fopen(os.path.join(repo_conf_dir, 'spm.repo'), 'w') as fp: + fp.write(textwrap.dedent('''\ + local_repo: + url: file://{0} + '''.format(self.config['spm_build_dir']))) + + u_repo = self.run_spm('update_repo', self.config) + + def _spm_client(self, config): + import salt.spm + self.ui = SPMTestUserInterface() + client = salt.spm.SPMClient(self.ui, config) + return client + + def run_spm(self, cmd, config, arg=None): + client = self._spm_client(config) + spm_cmd = client.run([cmd, arg]) + return self.ui._status + + class ModuleCase(TestCase, SaltClientTestCaseMixin): ''' Execute a module function @@ -582,7 +697,7 @@ class ModuleCase(TestCase, SaltClientTestCaseMixin): behavior of the raw function call ''' know_to_return_none = ( - 'file.chown', 'file.chgrp', 'ssh.recv_known_host' + 'file.chown', 'file.chgrp', 'ssh.recv_known_host_entries' ) if 'f_arg' in kwargs: kwargs['arg'] = kwargs.pop('f_arg') diff --git a/tests/unit/beacons/test_btmp_beacon.py b/tests/unit/beacons/test_btmp_beacon.py index 708dae9454..d4bf94abf4 100644 --- a/tests/unit/beacons/test_btmp_beacon.py +++ b/tests/unit/beacons/test_btmp_beacon.py @@ -59,8 +59,10 @@ class BTMPBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, (True, 'Valid beacon configuration')) - ret = btmp.beacon(config) - self.assertEqual(ret, []) + with patch('salt.utils.files.fopen', mock_open()) as m_open: + ret = btmp.beacon(config) + m_open.assert_called_with(btmp.BTMP, 'rb') + self.assertEqual(ret, []) def test_match(self): with patch('salt.utils.files.fopen', diff --git a/tests/unit/beacons/test_wtmp_beacon.py b/tests/unit/beacons/test_wtmp_beacon.py index b1edd97096..c28f3554b6 100644 --- a/tests/unit/beacons/test_wtmp_beacon.py +++ b/tests/unit/beacons/test_wtmp_beacon.py @@ -60,8 +60,10 @@ class WTMPBeaconTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(ret, (True, 'Valid beacon configuration')) - ret = wtmp.beacon(config) - self.assertEqual(ret, []) + with patch('salt.utils.files.fopen', mock_open()) as m_open: + ret = wtmp.beacon(config) + m_open.assert_called_with(wtmp.WTMP, 'rb') + self.assertEqual(ret, []) def test_match(self): with patch('salt.utils.files.fopen', diff --git a/tests/unit/cloud/clouds/test_saltify.py b/tests/unit/cloud/clouds/test_saltify.py index 414a294f3b..4ca83e346c 100644 --- a/tests/unit/cloud/clouds/test_saltify.py +++ b/tests/unit/cloud/clouds/test_saltify.py @@ -8,32 +8,59 @@ from __future__ import absolute_import # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin -from tests.support.unit import TestCase -from tests.support.mock import ( - MagicMock, - patch -) +from tests.support.unit import skipIf, TestCase +from tests.support.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch, ANY + # Import Salt Libs +import salt.client from salt.cloud.clouds import saltify +TEST_PROFILES = { + 'testprofile1': NotImplemented, + 'testprofile2': { # this profile is used in test_saltify_destroy() + 'ssh_username': 'fred', + 'remove_config_on_destroy': False, # expected for test + 'shutdown_on_destroy': True # expected value for test + }, + 'testprofile3': { # this profile is used in test_create_wake_on_lan() + 'wake_on_lan_mac': 'aa-bb-cc-dd-ee-ff', + 'wol_sender_node': 'friend1', + 'wol_boot_wait': 0.01 # we want the wait to be very short + } + } +TEST_PROFILE_NAMES = ['testprofile1', 'testprofile2', 'testprofile3'] + +@skipIf(NO_MOCK, NO_MOCK_REASON) class SaltifyTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.cloud.clouds.saltify ''' - # 'create' function tests: 1 + LOCAL_OPTS = { + 'providers': { + 'sfy1': { + 'saltify': { + 'driver': 'saltify', + 'profiles': TEST_PROFILES + } + }, + }, + 'profiles': TEST_PROFILES, + 'sock_dir': '/var/sockxxx', + 'transport': 'tcp', + } def setup_loader_modules(self): - return { - saltify: { + saltify_globals = { '__active_provider_name__': '', '__utils__': { - 'cloud.bootstrap': MagicMock() - }, - '__opts__': {'providers': {}} - } - } + 'cloud.bootstrap': MagicMock(), + 'cloud.fire_event': MagicMock(), + }, + '__opts__': self.LOCAL_OPTS, + } + return {saltify: saltify_globals} def test_create_no_deploy(self): ''' @@ -43,5 +70,142 @@ class SaltifyTestCase(TestCase, LoaderModuleMockMixin): vm = {'deploy': False, 'driver': 'saltify', 'name': 'dummy' - } + } self.assertTrue(saltify.create(vm)) + + def test_create_and_deploy(self): + ''' + Test if deployment can be done. + ''' + mock_cmd = MagicMock(return_value=True) + with patch.dict( + 'salt.cloud.clouds.saltify.__utils__', + {'cloud.bootstrap': mock_cmd}): + vm_ = {'deploy': True, + 'driver': 'saltify', + 'name': 'new2', + 'profile': 'testprofile2', + } + result = saltify.create(vm_) + mock_cmd.assert_called_once_with(vm_, ANY) + self.assertTrue(result) + + def test_create_wake_on_lan(self): + ''' + Test if wake on lan works + ''' + mock_sleep = MagicMock() + mock_cmd = MagicMock(return_value=True) + mm_cmd = MagicMock(return_value={'friend1': True}) + lcl = salt.client.LocalClient() + lcl.cmd = mm_cmd + with patch('time.sleep', mock_sleep): + with patch('salt.client.LocalClient', return_value=lcl): + with patch.dict( + 'salt.cloud.clouds.saltify.__utils__', + {'cloud.bootstrap': mock_cmd}): + vm_ = {'deploy': True, + 'driver': 'saltify', + 'name': 'new1', + 'profile': 'testprofile3', + } + result = saltify.create(vm_) + mock_cmd.assert_called_once_with(vm_, ANY) + mm_cmd.assert_called_with('friend1', 'network.wol', ['aa-bb-cc-dd-ee-ff']) + mock_sleep.assert_called_with(0.01) + self.assertTrue(result) + + def test_avail_locations(self): + ''' + Test the avail_locations will always return {} + ''' + self.assertEqual(saltify.avail_locations(), {}) + + def test_avail_sizes(self): + ''' + Test the avail_sizes will always return {} + ''' + self.assertEqual(saltify.avail_sizes(), {}) + + def test_avail_images(self): + ''' + Test the avail_images will return profiles + ''' + testlist = list(TEST_PROFILE_NAMES) # copy + self.assertEqual( + saltify.avail_images()['Profiles'].sort(), + testlist.sort()) + + def test_list_nodes(self): + ''' + Test list_nodes will return required fields only + ''' + testgrains = { + 'nodeX1': { + 'id': 'nodeX1', + 'ipv4': [ + '127.0.0.1', '192.1.2.22', '172.16.17.18'], + 'ipv6': [ + '::1', 'fdef:bad:add::f00', '3001:DB8::F00D'], + 'salt-cloud': { + 'driver': 'saltify', + 'provider': 'saltyfy', + 'profile': 'testprofile2' + }, + 'extra_stuff': 'does not belong' + } + } + expected_result = { + 'nodeX1': { + 'id': 'nodeX1', + 'image': 'testprofile2', + 'private_ips': [ + '172.16.17.18', 'fdef:bad:add::f00'], + 'public_ips': [ + '192.1.2.22', '3001:DB8::F00D'], + 'size': '', + 'state': 'running' + } + } + mm_cmd = MagicMock(return_value=testgrains) + lcl = salt.client.LocalClient() + lcl.cmd = mm_cmd + with patch('salt.client.LocalClient', return_value=lcl): + self.assertEqual( + saltify.list_nodes(), + expected_result) + + def test_saltify_reboot(self): + mm_cmd = MagicMock(return_value=True) + lcl = salt.client.LocalClient() + lcl.cmd = mm_cmd + with patch('salt.client.LocalClient', return_value=lcl): + result = saltify.reboot('nodeS1', 'action') + mm_cmd.assert_called_with('nodeS1', 'system.reboot') + self.assertTrue(result) + + def test_saltify_destroy(self): + # destroy calls local.cmd several times and expects + # different results, so we will provide a list of + # results. Each call will get the next value. + # NOTE: this assumes that the call order never changes, + # so to keep things simple, we will not use remove_config... + result_list = [ + {'nodeS1': { # first call is grains.get + 'driver': 'saltify', + 'provider': 'saltify', + 'profile': 'testprofile2'} + }, + # Note: + # testprofile2 has remove_config_on_destroy: False + # and shutdown_on_destroy: True + {'nodeS1': # last call shuts down the minion + 'a system.shutdown worked message'}, + ] + mm_cmd = MagicMock(side_effect=result_list) + lcl = salt.client.LocalClient() + lcl.cmd = mm_cmd + with patch('salt.client.LocalClient', return_value=lcl): + result = saltify.destroy('nodeS1', 'action') + mm_cmd.assert_called_with('nodeS1', 'system.shutdown') + self.assertTrue(result) diff --git a/tests/unit/daemons/test_masterapi.py b/tests/unit/daemons/test_masterapi.py index d2f5931227..b584d7a730 100644 --- a/tests/unit/daemons/test_masterapi.py +++ b/tests/unit/daemons/test_masterapi.py @@ -8,13 +8,16 @@ import salt.config import salt.daemons.masterapi as masterapi # Import Salt Testing Libs -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import ( patch, MagicMock, + NO_MOCK, + NO_MOCK_REASON ) +@skipIf(NO_MOCK, NO_MOCK_REASON) class LocalFuncsTestCase(TestCase): ''' TestCase for salt.daemons.masterapi.LocalFuncs class @@ -24,6 +27,8 @@ class LocalFuncsTestCase(TestCase): opts = salt.config.master_config(None) self.local_funcs = masterapi.LocalFuncs(opts, 'test-key') + # runner tests + def test_runner_token_not_authenticated(self): ''' Asserts that a TokenAuthenticationError is returned when the token can't authenticate. @@ -107,6 +112,8 @@ class LocalFuncsTestCase(TestCase): self.assertDictEqual(mock_ret, ret) + # wheel tests + def test_wheel_token_not_authenticated(self): ''' Asserts that a TokenAuthenticationError is returned when the token can't authenticate. @@ -199,3 +206,105 @@ class LocalFuncsTestCase(TestCase): u'user UNKNOWN.'}} ret = self.local_funcs.wheel({}) self.assertDictEqual(mock_ret, ret) + + # publish tests + + def test_publish_user_is_blacklisted(self): + ''' + Asserts that an empty string is returned when the user has been blacklisted. + ''' + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=True)): + self.assertEqual(u'', self.local_funcs.publish({u'user': u'foo', u'fun': u'test.arg'})) + + def test_publish_cmd_blacklisted(self): + ''' + Asserts that an empty string returned when the command has been blacklisted. + ''' + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=True)): + self.assertEqual(u'', self.local_funcs.publish({u'user': u'foo', u'fun': u'test.arg'})) + + def test_publish_token_not_authenticated(self): + ''' + Asserts that an empty string is returned when the token can't authenticate. + ''' + load = {u'user': u'foo', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'token': u'asdfasdfasdfasdf'}} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_token_authorization_error(self): + ''' + Asserts that an empty string is returned when the token authenticates, but is not + authorized. + ''' + token = u'asdfasdfasdfasdf' + load = {u'user': u'foo', u'fun': u'test.arg', u'tgt': u'test_minion', + u'arg': u'bar', u'kwargs': {u'token': token}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_eauth_not_authenticated(self): + ''' + Asserts that an empty string is returned when the user can't authenticate. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'eauth': u'foo'}} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_eauth_authorization_error(self): + ''' + Asserts that an empty string is returned when the user authenticates, but is not + authorized. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'eauth': u'foo'}, u'arg': u'bar'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_user_not_authenticated(self): + ''' + Asserts that an empty string is returned when the user can't authenticate. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_user_authenticated_missing_auth_list(self): + ''' + Asserts that an empty string is returned when the user has an effective user id and is + authenticated, but the auth_list is empty. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'user': u'test'}, u'arg': u'foo'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_key', MagicMock(return_value='fake-user-key')), \ + patch('salt.utils.master.get_values_of_matching_keys', MagicMock(return_value=[])): + self.assertEqual(u'', self.local_funcs.publish(load)) + + def test_publish_user_authorization_error(self): + ''' + Asserts that an empty string is returned when the user authenticates, but is not + authorized. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'user': u'test'}, u'arg': u'foo'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_key', MagicMock(return_value='fake-user-key')), \ + patch('salt.utils.master.get_values_of_matching_keys', MagicMock(return_value=['test'])), \ + patch('salt.utils.minions.CkMinions.auth_check', MagicMock(return_value=False)): + self.assertEqual(u'', self.local_funcs.publish(load)) diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index 64d8ca5284..bda0182eec 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -5,10 +5,12 @@ # Import Python libs from __future__ import absolute_import +import errno import os import shutil import tempfile import textwrap +import tornado.ioloop import logging import stat try: @@ -40,18 +42,26 @@ import salt.utils.win_functions log = logging.getLogger(__name__) +TMP_SOCK_DIR = tempfile.mkdtemp(dir=TMP) +TMP_REPO_DIR = os.path.join(TMP, 'gitfs_root') +INTEGRATION_BASE_FILES = os.path.join(FILES, 'file', 'base') + + +def _rmtree_error(func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + @skipIf(not HAS_GITPYTHON, 'GitPython is not installed') class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) - self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP) return { gitfs: { '__opts__': { 'cachedir': self.tmp_cachedir, - 'sock_dir': self.tmp_sock_dir, + 'sock_dir': TMP_SOCK_DIR, 'gitfs_root': 'salt', 'fileserver_backend': ['git'], 'gitfs_base': 'master', @@ -81,9 +91,17 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): } } + @classmethod + def setUpClass(cls): + # Clear the instance map so that we make sure to create a new instance + # for this test class. + try: + del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()] + except KeyError: + pass + def tearDown(self): shutil.rmtree(self.tmp_cachedir) - shutil.rmtree(self.tmp_sock_dir) def test_per_saltenv_config(self): opts_override = textwrap.dedent(''' @@ -109,10 +127,11 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin): - mountpoint: abc ''') with patch.dict(gitfs.__opts__, yaml.safe_load(opts_override)): - git_fs = salt.utils.gitfs.GitFS(gitfs.__opts__) - git_fs.init_remotes( + git_fs = salt.utils.gitfs.GitFS( + gitfs.__opts__, gitfs.__opts__['gitfs_remotes'], - gitfs.PER_REMOTE_OVERRIDES, gitfs.PER_REMOTE_ONLY) + per_remote_overrides=gitfs.PER_REMOTE_OVERRIDES, + per_remote_only=gitfs.PER_REMOTE_ONLY) # repo1 (branch: foo) # The mountpoint should take the default (from gitfs_mountpoint), while @@ -169,14 +188,12 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) - self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP) - self.tmp_repo_dir = os.path.join(TMP, 'gitfs_root') return { gitfs: { '__opts__': { 'cachedir': self.tmp_cachedir, - 'sock_dir': self.tmp_sock_dir, - 'gitfs_remotes': ['file://' + self.tmp_repo_dir], + 'sock_dir': TMP_SOCK_DIR, + 'gitfs_remotes': ['file://' + TMP_REPO_DIR], 'gitfs_root': '', 'fileserver_backend': ['git'], 'gitfs_base': 'master', @@ -206,26 +223,26 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): } } - def setUp(self): - ''' - We don't want to check in another .git dir into GH because that just gets messy. - Instead, we'll create a temporary repo on the fly for the tests to examine. - ''' - if not gitfs.__virtual__(): - self.skipTest("GitFS could not be loaded. Skipping GitFS tests!") - self.integration_base_files = os.path.join(FILES, 'file', 'base') + @classmethod + def setUpClass(cls): + # Clear the instance map so that we make sure to create a new instance + # for this test class. + try: + del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()] + except KeyError: + pass # Create the dir if it doesn't already exist try: - shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/') + shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/') except OSError: # We probably caught an error because files already exist. Ignore pass try: - repo = git.Repo(self.tmp_repo_dir) + repo = git.Repo(TMP_REPO_DIR) except git.exc.InvalidGitRepositoryError: - repo = git.Repo.init(self.tmp_repo_dir) + repo = git.Repo.init(TMP_REPO_DIR) if 'USERNAME' not in os.environ: try: @@ -238,9 +255,19 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): '\'root\'.') os.environ['USERNAME'] = 'root' - repo.index.add([x for x in os.listdir(self.tmp_repo_dir) + repo.index.add([x for x in os.listdir(TMP_REPO_DIR) if x != '.git']) repo.index.commit('Test') + + def setUp(self): + ''' + We don't want to check in another .git dir into GH because that just + gets messy. Instead, we'll create a temporary repo on the fly for the + tests to examine. + ''' + if not gitfs.__virtual__(): + self.skipTest("GitFS could not be loaded. Skipping GitFS tests!") + self.tmp_cachedir = tempfile.mkdtemp(dir=TMP) gitfs.update() def tearDown(self): @@ -248,17 +275,11 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): Remove the temporary git repository and gitfs cache directory to ensure a clean environment for each test. ''' - shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error) - shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error) - shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error) - del self.tmp_repo_dir - del self.tmp_cachedir - del self.tmp_sock_dir - del self.integration_base_files - - def _rmtree_error(self, func, path, excinfo): - os.chmod(path, stat.S_IWRITE) - func(path) + try: + shutil.rmtree(self.tmp_cachedir, onerror=_rmtree_error) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise def test_file_list(self): ret = gitfs.file_list(LOAD) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 3655953e36..ba9cdb3ba9 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -463,3 +463,162 @@ PATCHLEVEL = 3 self.assertEqual(os_grains.get('osrelease'), os_release_map['osrelease']) self.assertListEqual(list(os_grains.get('osrelease_info')), os_release_map['osrelease_info']) self.assertEqual(os_grains.get('osmajorrelease'), os_release_map['osmajorrelease']) + + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') + def test_linux_memdata(self): + ''' + Test memdata on Linux systems + ''' + _path_exists_map = { + '/proc/1/cmdline': False, + '/proc/meminfo': True + } + _path_isfile_map = { + '/proc/meminfo': True + } + _cmd_run_map = { + 'dpkg --print-architecture': 'amd64' + } + + path_exists_mock = MagicMock(side_effect=lambda x: _path_exists_map[x]) + path_isfile_mock = MagicMock( + side_effect=lambda x: _path_isfile_map.get(x, False) + ) + cmd_run_mock = MagicMock( + side_effect=lambda x: _cmd_run_map[x] + ) + empty_mock = MagicMock(return_value={}) + + _proc_meminfo_file = '''MemTotal: 16277028 kB +SwapTotal: 4789244 kB''' + + orig_import = __import__ + if six.PY2: + built_in = '__builtin__' + else: + built_in = 'builtins' + + def _import_mock(name, *args): + if name == 'lsb_release': + raise ImportError('No module named lsb_release') + return orig_import(name, *args) + + # Skip the first if statement + with patch.object(salt.utils.platform, 'is_proxy', + MagicMock(return_value=False)): + # Skip the selinux/systemd stuff (not pertinent) + with patch.object(core, '_linux_bin_exists', + MagicMock(return_value=False)): + # Skip the init grain compilation (not pertinent) + with patch.object(os.path, 'exists', path_exists_mock): + # Ensure that lsb_release fails to import + with patch('{0}.__import__'.format(built_in), + side_effect=_import_mock): + # Skip all the /etc/*-release stuff (not pertinent) + with patch.object(os.path, 'isfile', path_isfile_mock): + # Make a bunch of functions return empty dicts, + # we don't care about these grains for the + # purposes of this test. + with patch.object( + core, + '_linux_cpudata', + empty_mock): + with patch.object( + core, + '_linux_gpu_data', + empty_mock): + with patch('salt.utils.files.fopen', mock_open()) as _proc_meminfo: + _proc_meminfo.return_value.__iter__.return_value = _proc_meminfo_file.splitlines() + with patch.object( + core, + '_hw_data', + empty_mock): + with patch.object( + core, + '_virtual', + empty_mock): + with patch.object( + core, + '_ps', + empty_mock): + # Mock the osarch + with patch.dict( + core.__salt__, + {'cmd.run': cmd_run_mock}): + os_grains = core.os_data() + + self.assertEqual(os_grains.get('mem_total'), 15895) + self.assertEqual(os_grains.get('swap_total'), 4676) + + def test_bsd_memdata(self): + ''' + Test to memdata on *BSD systems + ''' + _path_exists_map = {} + _path_isfile_map = {} + _cmd_run_map = { + 'freebsd-version -u': '10.3-RELEASE', + '/sbin/sysctl -n hw.physmem': '2121781248', + '/sbin/sysctl -n vm.swap_total': '419430400' + } + + path_exists_mock = MagicMock(side_effect=lambda x: _path_exists_map[x]) + path_isfile_mock = MagicMock( + side_effect=lambda x: _path_isfile_map.get(x, False) + ) + cmd_run_mock = MagicMock( + side_effect=lambda x: _cmd_run_map[x] + ) + empty_mock = MagicMock(return_value={}) + + mock_freebsd_uname = ('FreeBSD', + 'freebsd10.3-hostname-8148', + '10.3-RELEASE', + 'FreeBSD 10.3-RELEASE #0 r297264: Fri Mar 25 02:10:02 UTC 2016 root@releng1.nyi.freebsd.org:/usr/obj/usr/src/sys/GENERIC', + 'amd64', + 'amd64') + + with patch('platform.uname', + MagicMock(return_value=mock_freebsd_uname)): + with patch.object(salt.utils.platform, 'is_linux', + MagicMock(return_value=False)): + with patch.object(salt.utils.platform, 'is_freebsd', + MagicMock(return_value=True)): + # Skip the first if statement + with patch.object(salt.utils.platform, 'is_proxy', + MagicMock(return_value=False)): + # Skip the init grain compilation (not pertinent) + with patch.object(os.path, 'exists', path_exists_mock): + with patch('salt.utils.path.which') as mock: + mock.return_value = '/sbin/sysctl' + # Make a bunch of functions return empty dicts, + # we don't care about these grains for the + # purposes of this test. + with patch.object( + core, + '_bsd_cpudata', + empty_mock): + with patch.object( + core, + '_hw_data', + empty_mock): + with patch.object( + core, + '_zpool_data', + empty_mock): + with patch.object( + core, + '_virtual', + empty_mock): + with patch.object( + core, + '_ps', + empty_mock): + # Mock the osarch + with patch.dict( + core.__salt__, + {'cmd.run': cmd_run_mock}): + os_grains = core.os_data() + + self.assertEqual(os_grains.get('mem_total'), 2023) + self.assertEqual(os_grains.get('swap_total'), 400) diff --git a/tests/unit/modules/test_beacons.py b/tests/unit/modules/test_beacons.py index 6706866bb9..73f012a75d 100644 --- a/tests/unit/modules/test_beacons.py +++ b/tests/unit/modules/test_beacons.py @@ -59,6 +59,9 @@ class BeaconsTestCase(TestCase, LoaderModuleMockMixin): event_returns = [{'complete': True, 'tag': '/salt/minion/minion_beacons_list_complete', 'beacons': {}}, + {'complete': True, + 'tag': '/salt/minion/minion_beacons_list_available_complete', + 'beacons': ['ps']}, {'complete': True, 'valid': True, 'vcomment': '', diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 540c805ff5..4547a17dca 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -504,6 +504,26 @@ class FileModuleTestCase(TestCase, LoaderModuleMockMixin): } } + def test_check_file_meta_no_lsattr(self): + ''' + Ensure that we skip attribute comparison if lsattr(1) is not found + ''' + source = "salt:///README.md" + name = "/home/git/proj/a/README.md" + source_sum = {} + stats_result = {'size': 22, 'group': 'wheel', 'uid': 0, 'type': 'file', + 'mode': '0600', 'gid': 0, 'target': name, 'user': + 'root', 'mtime': 1508356390, 'atime': 1508356390, + 'inode': 447, 'ctime': 1508356390} + with patch('salt.modules.file.stats') as m_stats: + m_stats.return_value = stats_result + with patch('salt.utils.path.which') as m_which: + m_which.return_value = None + result = filemod.check_file_meta(name, name, source, source_sum, + 'root', 'root', '755', None, + 'base') + self.assertTrue(result, None) + @skipIf(salt.utils.platform.is_windows(), 'SED is not available on Windows') def test_sed_limit_escaped(self): with tempfile.NamedTemporaryFile(mode='w+') as tfile: diff --git a/tests/unit/modules/test_shadow.py b/tests/unit/modules/test_shadow.py index e152d59b9a..62781153c2 100644 --- a/tests/unit/modules/test_shadow.py +++ b/tests/unit/modules/test_shadow.py @@ -10,6 +10,7 @@ from __future__ import absolute_import import salt.utils.platform from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf +from tests.support.helpers import skip_if_not_root # Import salt libs try: @@ -42,6 +43,7 @@ _HASHES = dict( @skipIf(not salt.utils.platform.is_linux(), 'minion is not Linux') +@skipIf(not HAS_SHADOW, 'shadow module is not available') class LinuxShadowTest(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): @@ -62,8 +64,7 @@ class LinuxShadowTest(TestCase, LoaderModuleMockMixin): hash_info['pw_hash'] ) - # 'list_users' function tests: 1 - + @skip_if_not_root def test_list_users(self): ''' Test if it returns a list of all users diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index d083b7b1e5..029bff098e 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -428,6 +428,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): controllers = root.findall('.//devices/controller') # There should be no controller self.assertTrue(len(controllers) == 0) + # kvm mac address shoud start with 52:54:00 + self.assertTrue("mac address='52:54:00" in xml_data) def test_mixed_dict_and_list_as_profile_objects(self): diff --git a/tests/unit/modules/test_win_groupadd.py b/tests/unit/modules/test_win_groupadd.py index 9ddd7430ee..7404f76e24 100644 --- a/tests/unit/modules/test_win_groupadd.py +++ b/tests/unit/modules/test_win_groupadd.py @@ -10,6 +10,8 @@ from __future__ import absolute_import from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( + MagicMock, + Mock, patch, NO_MOCK, NO_MOCK_REASON @@ -17,6 +19,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.win_groupadd as win_groupadd +import salt.utils.win_functions # Import Other Libs # pylint: disable=unused-import @@ -24,12 +27,46 @@ try: import win32com import pythoncom import pywintypes + PYWINTYPES_ERROR = pywintypes.com_error( + -1234, 'Exception occurred.', (0, None, 'C', None, 0, -4321), None) HAS_WIN_LIBS = True except ImportError: HAS_WIN_LIBS = False # pylint: enable=unused-import +class MockMember(object): + def __init__(self, name): + self.ADSPath = name + + +class MockGroupObj(object): + def __init__(self, ads_name, ads_users): + self._members = [MockMember(x) for x in ads_users] + self.Name = ads_name + + def members(self): + return self._members + + def Add(self, name): + ''' + This should be a no-op unless we want to test raising an error, in + which case this should be overridden in a subclass. + ''' + pass + + def Remove(self, name): + ''' + This should be a no-op unless we want to test raising an error, in + which case this should be overridden in a subclass. + ''' + pass + + +if not NO_MOCK: + sam_mock = MagicMock(side_effect=lambda x: 'HOST\\' + x) + + @skipIf(not HAS_WIN_LIBS, 'win_groupadd unit tests can only be run if win32com, pythoncom, and pywintypes are installed') @skipIf(NO_MOCK, NO_MOCK_REASON) class WinGroupTestCase(TestCase, LoaderModuleMockMixin): @@ -41,106 +78,352 @@ class WinGroupTestCase(TestCase, LoaderModuleMockMixin): win_groupadd: {'__opts__': {'test': False}} } - # 'add' function tests: 1 - def test_add(self): ''' - Test if it add the specified group + Test adding a new group ''' - self.assertDictEqual(win_groupadd.add('foo'), - {'changes': [], 'name': 'foo', 'result': None, - 'comment': 'The group foo already exists.'}) + info = MagicMock(return_value=False) + with patch.object(win_groupadd, 'info', info),\ + patch.object(win_groupadd, '_get_computer_object', Mock()): + self.assertDictEqual(win_groupadd.add('foo'), + {'changes': ['Successfully created group foo'], + 'name': 'foo', + 'result': True, + 'comment': ''}) - # 'delete' function tests: 1 + def test_add_group_exists(self): + ''' + Test adding a new group if the group already exists + ''' + info = MagicMock(return_value={'name': 'foo', + 'passwd': None, + 'gid': None, + 'members': ['HOST\\spongebob']}) + with patch.object(win_groupadd, 'info', info),\ + patch.object(win_groupadd, '_get_computer_object', Mock()): + self.assertDictEqual(win_groupadd.add('foo'), + {'changes': [], 'name': 'foo', 'result': None, + 'comment': 'The group foo already exists.'}) + + def test_add_error(self): + ''' + Test adding a group and encountering an error + ''' + class CompObj(object): + def Create(self, type, name): + raise PYWINTYPES_ERROR + + compobj_mock = MagicMock(return_value=CompObj()) + + info = MagicMock(return_value=False) + with patch.object(win_groupadd, 'info', info),\ + patch.object(win_groupadd, '_get_computer_object', compobj_mock): + self.assertDictEqual(win_groupadd.add('foo'), + {'changes': [], + 'name': 'foo', + 'result': False, + 'comment': 'Failed to create group foo. C'}) def test_delete(self): ''' - Test if it remove the specified group + Test removing a group ''' - self.assertDictEqual(win_groupadd.delete('foo'), - {'changes': [], 'name': 'foo', 'result': None, - 'comment': 'The group foo does not exists.'}) + info = MagicMock(return_value={'name': 'foo', + 'passwd': None, + 'gid': None, + 'members': ['HOST\\spongebob']}) + with patch.object(win_groupadd, 'info', info), \ + patch.object(win_groupadd, '_get_computer_object', Mock()): + self.assertDictEqual( + win_groupadd.delete('foo'), + {'changes': ['Successfully removed group foo'], + 'name': 'foo', + 'result': True, + 'comment': ''}) - # 'info' function tests: 1 + def test_delete_no_group(self): + ''' + Test removing a group that doesn't exists + ''' + info = MagicMock(return_value=False) + with patch.object(win_groupadd, 'info', info), \ + patch.object(win_groupadd, '_get_computer_object', Mock()): + self.assertDictEqual(win_groupadd.delete('foo'), + {'changes': [], 'name': 'foo', 'result': None, + 'comment': 'The group foo does not exists.'}) + + def test_delete_error(self): + ''' + Test removing a group and encountering an error + ''' + class CompObj(object): + def Delete(self, type, name): + raise PYWINTYPES_ERROR + + compobj_mock = MagicMock(return_value=CompObj()) + + info = MagicMock(return_value={'name': 'foo', + 'passwd': None, + 'gid': None, + 'members': ['HOST\\spongebob']}) + with patch.object(win_groupadd, 'info', info),\ + patch.object(win_groupadd, '_get_computer_object', compobj_mock): + self.assertDictEqual( + win_groupadd.delete('foo'), + {'changes': [], + 'name': 'foo', + 'result': False, + 'comment': 'Failed to remove group foo. C'}) def test_info(self): ''' Test if it return information about a group. ''' - with patch(win_groupadd.win32.client, 'flag', None): - self.assertDictEqual(win_groupadd.info('dc=salt'), + groupobj_mock = MagicMock(return_value=MockGroupObj('salt', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock): + self.assertDictEqual(win_groupadd.info('salt'), {'gid': None, - 'members': ['dc=\\user1'], + 'members': ['HOST\\steve'], 'passwd': None, - 'name': 'WinNT://./dc=salt,group'}) - - with patch(win_groupadd.win32.client, 'flag', 1): - self.assertFalse(win_groupadd.info('dc=salt')) - - with patch(win_groupadd.win32.client, 'flag', 2): - self.assertFalse(win_groupadd.info('dc=salt')) - - # 'getent' function tests: 1 + 'name': 'salt'}) def test_getent(self): + groupobj_mock = MagicMock( + return_value=[ + MockGroupObj('salt', ['WinNT://HOST/steve']), + MockGroupObj('salty', ['WinNT://HOST/spongebob'])]) + mock_g_to_g = MagicMock(side_effect=[1, 2]) + with patch.object(win_groupadd, '_get_all_groups', groupobj_mock),\ + patch.dict(win_groupadd.__salt__, {'file.group_to_gid': mock_g_to_g}): + self.assertListEqual( + win_groupadd.getent(), + [ + {'gid': 1, 'members': ['HOST\\steve'], 'name': 'salt', 'passwd': 'x'}, + {'gid': 2, 'members': ['HOST\\spongebob'], 'name': 'salty', 'passwd': 'x'} + ]) + + def test_getent_context(self): ''' - Test if it return info on all groups + Test group.getent is using the values in __context__ ''' with patch.dict(win_groupadd.__context__, {'group.getent': True}): self.assertTrue(win_groupadd.getent()) - # 'adduser' function tests: 1 - def test_adduser(self): ''' - Test if it add a user to a group + Test adding a user to a group ''' - with patch(win_groupadd.win32.client, 'flag', None): - self.assertDictEqual(win_groupadd.adduser('dc=foo', 'dc=\\username'), - {'changes': {'Users Added': ['dc=\\username']}, - 'comment': '', 'name': 'dc=foo', 'result': True}) + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.adduser('foo', 'spongebob'), + {'changes': {'Users Added': ['HOST\\spongebob']}, + 'comment': '', + 'name': 'foo', + 'result': True}) - with patch(win_groupadd.win32.client, 'flag', 1): - comt = ('Failed to add dc=\\username to group dc=foo. C') - self.assertDictEqual(win_groupadd.adduser('dc=foo', 'dc=\\username'), - {'changes': {'Users Added': []}, 'name': 'dc=foo', - 'comment': comt, 'result': False}) + def test_adduser_already_exists(self): + ''' + Test adding a user that already exists + ''' + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.adduser('foo', 'steve'), + {'changes': {'Users Added': []}, + 'comment': 'User HOST\\steve is already a member of foo', + 'name': 'foo', + 'result': None}) - # 'deluser' function tests: 1 + def test_adduser_error(self): + ''' + Test adding a user and encountering an error + ''' + # Create mock group object with mocked Add function which raises the + # exception we need in order to test the error case. + class GroupObj(MockGroupObj): + def Add(self, name): + raise PYWINTYPES_ERROR + + groupobj_mock = MagicMock(return_value=GroupObj('foo', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.adduser('foo', 'username'), + {'changes': {'Users Added': []}, + 'name': 'foo', + 'comment': 'Failed to add HOST\\username to group foo. C', + 'result': False}) + + def test_adduser_group_does_not_exist(self): + groupobj_mock = MagicMock(side_effect=PYWINTYPES_ERROR) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.adduser('foo', 'spongebob'), + {'changes': {'Users Added': []}, + 'name': 'foo', + 'comment': 'Failure accessing group foo. C', + 'result': False}) def test_deluser(self): ''' - Test if it remove a user to a group + Test removing a user from a group ''' - ret = {'changes': {'Users Removed': []}, - 'comment': 'User dc=\\username is not a member of dc=foo', - 'name': 'dc=foo', 'result': None} + # Test removing a user + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', ['WinNT://HOST/spongebob'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + ret = {'changes': {'Users Removed': ['spongebob']}, + 'comment': '', + 'name': 'foo', + 'result': True} + self.assertDictEqual(win_groupadd.deluser('foo', 'spongebob'), ret) - self.assertDictEqual(win_groupadd.deluser('dc=foo', 'dc=\\username'), - ret) + def test_deluser_no_user(self): + ''' + Test removing a user from a group and that user is not a member of the + group + ''' + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + ret = {'changes': {'Users Removed': []}, + 'comment': 'User spongebob is not a member of foo', + 'name': 'foo', + 'result': None} + self.assertDictEqual(win_groupadd.deluser('foo', 'spongebob'), ret) - # 'members' function tests: 1 + def test_deluser_error(self): + ''' + Test removing a user and encountering an error + ''' + class GroupObj(MockGroupObj): + def Remove(self, name): + raise PYWINTYPES_ERROR + + groupobj_mock = MagicMock(return_value=GroupObj('foo', ['WinNT://HOST/spongebob'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.deluser('foo', 'spongebob'), + {'changes': {'Users Removed': []}, + 'name': 'foo', + 'comment': 'Failed to remove spongebob from group foo. C', + 'result': False}) + + def test_deluser_group_does_not_exist(self): + groupobj_mock = MagicMock(side_effect=PYWINTYPES_ERROR) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.deluser('foo', 'spongebob'), + {'changes': {'Users Removed': []}, + 'name': 'foo', + 'comment': 'Failure accessing group foo. C', + 'result': False}) def test_members(self): ''' - Test if it remove a user to a group + Test adding a list of members to a group, all existing users removed ''' - comment = ['Failure accessing group dc=foo. C'] - ret = {'name': 'dc=foo', 'result': False, 'comment': comment, - 'changes': {'Users Added': [], 'Users Removed': []}} + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', ['WinNT://HOST/steve'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.members('foo', 'spongebob,patrick,squidward'), + {'changes': { + 'Users Added': ['HOST\\patrick', 'HOST\\spongebob', 'HOST\\squidward'], + 'Users Removed': ['HOST\\steve'] + }, + 'comment': [], + 'name': 'foo', + 'result': True}) - with patch(win_groupadd.win32.client, 'flag', 2): - self.assertDictEqual(win_groupadd.members - ('dc=foo', 'dc=\\user1,dc=\\user2,dc=\\user3'), - ret) + def test_members_correct_membership(self): + ''' + Test adding a list of users where the list of users already exists + ''' + members_list = ['WinNT://HOST/spongebob', + 'WinNT://HOST/squidward', + 'WinNT://HOST/patrick'] + groupobj_mock = MagicMock(return_value=MockGroupObj('foo', members_list)) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.members('foo', 'spongebob,patrick,squidward'), + {'changes': {'Users Added': [], 'Users Removed': []}, + 'comment': ['foo membership is correct'], + 'name': 'foo', + 'result': None}) - with patch(win_groupadd.win32.client, 'flag', 1): - comment = ['Failed to add dc=\\user2 to dc=foo. C', - 'Failed to remove dc=\\user1 from dc=foo. C'] - ret.update({'comment': comment, 'result': False}) - self.assertDictEqual(win_groupadd.members('dc=foo', 'dc=\\user2'), ret) + def test_members_group_does_not_exist(self): + ''' + Test adding a list of users where the group does not exist + ''' + groupobj_mock = MagicMock(side_effect=PYWINTYPES_ERROR) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.members('foo', 'spongebob'), + {'changes': {'Users Added': [], 'Users Removed': []}, + 'comment': ['Failure accessing group foo. C'], + 'name': 'foo', + 'result': False}) - with patch(win_groupadd.win32.client, 'flag', None): - comment = ['dc=foo membership is correct'] - ret.update({'comment': comment, 'result': None}) - self.assertDictEqual(win_groupadd.members('dc=foo', 'dc=\\user1'), ret) + def test_members_fail_to_remove(self): + ''' + Test adding a list of members and fail to remove members not in the list + ''' + class GroupObj(MockGroupObj): + def Remove(self, name): + raise PYWINTYPES_ERROR + + groupobj_mock = MagicMock(return_value=GroupObj('foo', ['WinNT://HOST/spongebob'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.members('foo', 'patrick'), + {'changes': {'Users Added': ['HOST\\patrick'], 'Users Removed': []}, + 'comment': ['Failed to remove HOST\\spongebob from foo. C'], + 'name': 'foo', + 'result': False}) + + def test_members_fail_to_add(self): + ''' + Test adding a list of members and failing to add + ''' + class GroupObj(MockGroupObj): + def Add(self, name): + raise PYWINTYPES_ERROR + + groupobj_mock = MagicMock(return_value=GroupObj('foo', ['WinNT://HOST/spongebob'])) + with patch.object(win_groupadd, '_get_group_object', groupobj_mock), \ + patch.object(salt.utils.win_functions, 'get_sam_name', sam_mock): + self.assertDictEqual( + win_groupadd.members('foo', 'patrick'), + {'changes': {'Users Added': [], 'Users Removed': ['HOST\\spongebob']}, + 'comment': ['Failed to add HOST\\patrick to foo. C'], + 'name': 'foo', + 'result': False}) + + def test_list_groups(self): + ''' + Test that list groups returns a list of groups by name + ''' + groupobj_mock = MagicMock( + return_value=[ + MockGroupObj('salt', ['WinNT://HOST/steve']), + MockGroupObj('salty', ['WinNT://HOST/Administrator'])]) + with patch.object(win_groupadd, '_get_all_groups', groupobj_mock): + self.assertListEqual(win_groupadd.list_groups(), + ['salt', 'salty']) + + def test_list_groups_context(self): + ''' + Test group.list_groups is using the values in __context__ + ''' + with patch.dict(win_groupadd.__context__, {'group.list_groups': True}): + self.assertTrue(win_groupadd.list_groups()) diff --git a/tests/unit/netapi/rest_tornado/test_handlers.py b/tests/unit/netapi/rest_tornado/test_handlers.py index 23bf5188ca..c7e88ccbdb 100644 --- a/tests/unit/netapi/rest_tornado/test_handlers.py +++ b/tests/unit/netapi/rest_tornado/test_handlers.py @@ -13,6 +13,7 @@ from tests.support.unit import TestCase, skipIf # Import Salt libs import salt.auth +from salt.ext.six.moves import map # pylint: disable=import-error try: import salt.netapi.rest_tornado as rest_tornado from salt.netapi.rest_tornado import saltnado @@ -619,6 +620,34 @@ class TestSaltAuthHandler(SaltnadoTestCase): self.assertEqual(response.code, 400) +class TestSaltRunHandler(SaltnadoTestCase): + + def get_app(self): + urls = [('/run', saltnado.RunSaltAPIHandler)] + return self.build_tornado_app(urls) + + def test_authentication_exception_consistency(self): + ''' + Test consistency of authentication exception of each clients. + ''' + valid_response = {'return': ['Failed to authenticate']} + + clients = ['local', 'local_async', 'runner', 'runner_async'] + request_lowstates = map(lambda client: {"client": client, + "tgt": "*", + "fun": "test.fib", + "arg": ["10"]}, + clients) + + for request_lowstate in request_lowstates: + response = self.fetch('/run', + method='POST', + body=json.dumps(request_lowstate), + headers={'Content-Type': self.content_type_map['json']}) + + self.assertEqual(valid_response, json.loads(response.body)) + + @skipIf(HAS_TORNADO is False, 'The tornado package needs to be installed') # pylint: disable=W0223 class TestWebsocketSaltAPIHandler(SaltnadoTestCase): diff --git a/tests/unit/states/test_blockdev.py b/tests/unit/states/test_blockdev.py index 9cf8b1db27..4a797fbe4f 100644 --- a/tests/unit/states/test_blockdev.py +++ b/tests/unit/states/test_blockdev.py @@ -100,7 +100,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin): # Test state return when block device format fails with patch.dict(blockdev.__salt__, {'cmd.run': MagicMock(return_value=mock_ext4), - 'disk.format_': MagicMock(return_value=True)}): + 'disk.format': MagicMock(return_value=True)}): comt = ('Failed to format {0}'.format(name)) ret.update({'comment': comt, 'result': False}) with patch.object(salt.utils.path, 'which', diff --git a/tests/unit/states/test_mount.py b/tests/unit/states/test_mount.py index 1e1886001f..5a91e41da0 100644 --- a/tests/unit/states/test_mount.py +++ b/tests/unit/states/test_mount.py @@ -33,11 +33,11 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): ''' Test to verify that a device is mounted. ''' - name = '/mnt/sdb' - device = '/dev/sdb5' + name = os.path.realpath('/mnt/sdb') + device = os.path.realpath('/dev/sdb5') fstype = 'xfs' - name2 = '/mnt/cifs' + name2 = os.path.realpath('/mnt/cifs') device2 = '//SERVER/SHARE/' fstype2 = 'cifs' opts2 = ['noowners'] @@ -64,12 +64,11 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): mock_group = MagicMock(return_value={'gid': 100}) mock_read_cache = MagicMock(return_value={}) mock_write_cache = MagicMock(return_value=True) - umount1 = ("Forced unmount because devices don't match. " - "Wanted: /dev/sdb6, current: /dev/sdb5, /dev/sdb5") with patch.dict(mount.__grains__, {'os': 'Darwin'}): with patch.dict(mount.__salt__, {'mount.active': mock_mnt, 'cmd.run_all': mock_ret, - 'mount.umount': mock_f}): + 'mount.umount': mock_f}), \ + patch('os.path.exists', MagicMock(return_value=True)): comt = ('Unable to find device with label /dev/sdb5.') ret.update({'comment': comt}) self.assertDictEqual(mount.mounted(name, 'LABEL=/dev/sdb5', @@ -83,7 +82,7 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): ret) with patch.dict(mount.__opts__, {'test': False}): - comt = ('Unable to unmount /mnt/sdb: False.') + comt = ('Unable to unmount {0}: False.'.format(name)) umount = ('Forced unmount and mount because' ' options (noowners) changed') ret.update({'comment': comt, 'result': False, @@ -91,16 +90,19 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(mount.mounted(name, device, 'nfs'), ret) + umount1 = ("Forced unmount because devices don't match. " + "Wanted: {0}, current: {1}, {1}".format(os.path.realpath('/dev/sdb6'), device)) comt = ('Unable to unmount') ret.update({'comment': comt, 'result': None, 'changes': {'umount': umount1}}) - self.assertDictEqual(mount.mounted(name, '/dev/sdb6', + self.assertDictEqual(mount.mounted(name, os.path.realpath('/dev/sdb6'), fstype, opts=[]), ret) with patch.dict(mount.__salt__, {'mount.active': mock_emt, 'mount.mount': mock_str, 'mount.set_automaster': mock}): - with patch.dict(mount.__opts__, {'test': True}): + with patch.dict(mount.__opts__, {'test': True}), \ + patch('os.path.exists', MagicMock(return_value=False)): comt = ('{0} does not exist and would not be created'.format(name)) ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(mount.mounted(name, device, @@ -119,14 +121,16 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(mount.mounted(name, device, fstype), ret) - with patch.dict(mount.__opts__, {'test': True}): + with patch.dict(mount.__opts__, {'test': True}), \ + patch('os.path.exists', MagicMock(return_value=False)): comt = ('{0} does not exist and would neither be created nor mounted. ' '{0} needs to be written to the fstab in order to be made persistent.'.format(name)) ret.update({'comment': comt, 'result': None}) self.assertDictEqual(mount.mounted(name, device, fstype, mount=False), ret) - with patch.dict(mount.__opts__, {'test': False}): + with patch.dict(mount.__opts__, {'test': False}), \ + patch('os.path.exists', MagicMock(return_value=False)): comt = ('{0} not present and not mounted. ' 'Entry already exists in the fstab.'.format(name)) ret.update({'comment': comt, 'result': True}) diff --git a/tests/unit/states/test_ssh_known_hosts.py b/tests/unit/states/test_ssh_known_hosts.py index 88bee8963c..e74a7aebdb 100644 --- a/tests/unit/states/test_ssh_known_hosts.py +++ b/tests/unit/states/test_ssh_known_hosts.py @@ -96,7 +96,7 @@ class SshKnownHostsTestCase(TestCase, LoaderModuleMockMixin): self.assertDictEqual(ssh_known_hosts.present(name, user), ret) result = {'status': 'updated', 'error': '', - 'new': {'fingerprint': fingerprint, 'key': key}, + 'new': [{'fingerprint': fingerprint, 'key': key}], 'old': ''} mock = MagicMock(return_value=result) with patch.dict(ssh_known_hosts.__salt__, @@ -104,8 +104,8 @@ class SshKnownHostsTestCase(TestCase, LoaderModuleMockMixin): comt = ("{0}'s key saved to .ssh/known_hosts (key: {1})" .format(name, key)) ret.update({'comment': comt, 'result': True, - 'changes': {'new': {'fingerprint': fingerprint, - 'key': key}, 'old': ''}}) + 'changes': {'new': [{'fingerprint': fingerprint, + 'key': key}], 'old': ''}}) self.assertDictEqual(ssh_known_hosts.present(name, user, key=key), ret) @@ -136,14 +136,14 @@ class SshKnownHostsTestCase(TestCase, LoaderModuleMockMixin): mock = MagicMock(return_value=False) with patch.dict(ssh_known_hosts.__salt__, - {'ssh.get_known_host': mock}): + {'ssh.get_known_host_entries': mock}): comt = ('Host is already absent') ret.update({'comment': comt, 'result': True}) self.assertDictEqual(ssh_known_hosts.absent(name, user), ret) mock = MagicMock(return_value=True) with patch.dict(ssh_known_hosts.__salt__, - {'ssh.get_known_host': mock}): + {'ssh.get_known_host_entries': mock}): with patch.dict(ssh_known_hosts.__opts__, {'test': True}): comt = ('Key for github.com is set to be' ' removed from .ssh/known_hosts') diff --git a/tests/unit/templates/test_jinja.py b/tests/unit/templates/test_jinja.py index 146e230747..0975dc2da7 100644 --- a/tests/unit/templates/test_jinja.py +++ b/tests/unit/templates/test_jinja.py @@ -2,14 +2,16 @@ # Import python libs from __future__ import absolute_import -import os +from jinja2 import Environment, DictLoader, exceptions import ast import copy -import tempfile -import json import datetime +import json +import os import pprint import re +import tempfile +import yaml # Import Salt Testing libs from tests.support.unit import skipIf, TestCase @@ -17,26 +19,30 @@ from tests.support.case import ModuleCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock from tests.support.paths import TMP_CONF_DIR -# Import salt libs +# Import Salt libs import salt.config -from salt.ext import six import salt.loader -import salt.utils.files -from salt.utils import get_context from salt.exceptions import SaltRenderError + +from salt.ext import six from salt.ext.six.moves import builtins + from salt.utils.decorators.jinja import JinjaFilter from salt.utils.jinja import ( SaltCacheLoader, SerializerExtension, ensure_sequence_filter ) -from salt.utils.templates import JINJA, render_jinja_tmpl from salt.utils.odict import OrderedDict +from salt.utils.templates import ( + get_context, + JINJA, + render_jinja_tmpl +) +import salt.utils.files +import salt.utils.stringutils # Import 3rd party libs -import yaml -from jinja2 import Environment, DictLoader, exceptions try: import timelib # pylint: disable=W0611 HAS_TIMELIB = True @@ -176,12 +182,9 @@ class TestGetTemplate(TestCase): with salt.utils.files.fopen(fn_) as fp_: out = render_jinja_tmpl( fp_.read(), - dict( - opts=self.local_opts, - saltenv='test', - salt=self.local_salt - )) - self.assertEqual(out, 'world\n') + dict(opts=self.local_opts, saltenv='test', salt=self.local_salt) + ) + self.assertEqual(out, 'world' + os.linesep) def test_fallback_noloader(self): ''' @@ -192,12 +195,9 @@ class TestGetTemplate(TestCase): with salt.utils.files.fopen(filename) as fp_: out = render_jinja_tmpl( fp_.read(), - dict( - opts=self.local_opts, - saltenv='test', - salt=self.local_salt - )) - self.assertEqual(out, 'Hey world !a b !\n') + dict(opts=self.local_opts, saltenv='test', salt=self.local_salt) + ) + self.assertEqual(out, 'Hey world !a b !' + os.linesep) def test_saltenv(self): ''' @@ -216,7 +216,7 @@ class TestGetTemplate(TestCase): 'file_roots': self.local_opts['file_roots'], 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Salt', saltenv='test', salt=self.local_salt)) - self.assertEqual(out, 'Hey world !Hi Salt !\n') + self.assertEqual(out, 'Hey world !Hi Salt !' + os.linesep) self.assertEqual(fc.requests[0]['path'], 'salt://macro') def test_macro_additional_log_for_generalexc(self): @@ -225,7 +225,7 @@ class TestGetTemplate(TestCase): more output from trace. ''' expected = r'''Jinja error:.*division.* -.*/macrogeneral\(2\): +.*macrogeneral\(2\): --- \{% macro mymacro\(\) -%\} \{\{ 1/0 \}\} <====================== @@ -249,7 +249,7 @@ class TestGetTemplate(TestCase): more output from trace. ''' expected = r'''Jinja variable 'b' is undefined -.*/macroundefined\(2\): +.*macroundefined\(2\): --- \{% macro mymacro\(\) -%\} \{\{b.greetee\}\} <-- error is here <====================== @@ -272,7 +272,7 @@ class TestGetTemplate(TestCase): If we failed in a macro, get more output from trace. ''' expected = r'''Jinja syntax error: expected token .*end.*got '-'.* -.*/macroerror\(2\): +.*macroerror\(2\): --- # macro \{% macro mymacro\(greeting, greetee='world'\) -\} <-- error is here <====================== @@ -302,7 +302,7 @@ class TestGetTemplate(TestCase): 'file_roots': self.local_opts['file_roots'], 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt)) - self.assertEqual(out, u'Hey world !Hi Sàlt !\n') + self.assertEqual(out, salt.utils.stringutils.to_unicode('Hey world !Hi Sàlt !' + os.linesep)) self.assertEqual(fc.requests[0]['path'], 'salt://macro') filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii') @@ -313,7 +313,7 @@ class TestGetTemplate(TestCase): 'file_roots': self.local_opts['file_roots'], 'pillar_roots': self.local_opts['pillar_roots']}, a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt)) - self.assertEqual(u'Assunção\n', out) + self.assertEqual(u'Assunção' + os.linesep, out) self.assertEqual(fc.requests[0]['path'], 'salt://macro') @skipIf(HAS_TIMELIB is False, 'The `timelib` library is not installed.') @@ -376,8 +376,8 @@ class TestGetTemplate(TestCase): with salt.utils.files.fopen(out['data']) as fp: result = fp.read() if six.PY2: - result = result.decode('utf-8') - self.assertEqual(u'Assunção\n', result) + result = salt.utils.stringutils.to_unicode(result) + self.assertEqual(salt.utils.stringutils.to_unicode('Assunção' + os.linesep), result) def test_get_context_has_enough_context(self): template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf' diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 77e75afaf2..5b0d9643e7 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -14,6 +14,7 @@ from tests.support.mock import patch, call, NO_MOCK, NO_MOCK_REASON, MagicMock import salt.master from tests.support.case import ModuleCase from salt import auth +import salt.utils.platform @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -150,6 +151,7 @@ class MasterACLTestCase(ModuleCase): } self.addCleanup(delattr, self, 'valid_clear_load') + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_master_publish_name(self): ''' Test to ensure a simple name can auth against a given function. @@ -220,6 +222,7 @@ class MasterACLTestCase(ModuleCase): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_master_minion_glob(self): ''' Test to ensure we can allow access to a given @@ -257,6 +260,7 @@ class MasterACLTestCase(ModuleCase): # Unimplemented pass + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_args_empty_spec(self): ''' Test simple arg restriction allowed. @@ -275,6 +279,7 @@ class MasterACLTestCase(ModuleCase): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.empty') + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_args_simple_match(self): ''' Test simple arg restriction allowed. @@ -296,6 +301,7 @@ class MasterACLTestCase(ModuleCase): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.echo') + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_args_more_args(self): ''' Test simple arg restriction allowed to pass unlisted args. @@ -356,6 +362,7 @@ class MasterACLTestCase(ModuleCase): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_args_kwargs_match(self): ''' Test simple kwargs restriction allowed. @@ -429,6 +436,7 @@ class MasterACLTestCase(ModuleCase): self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.mock_calls, []) + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_args_mixed_match(self): ''' Test mixed args and kwargs restriction allowed. @@ -574,6 +582,7 @@ class AuthACLTestCase(ModuleCase): } self.addCleanup(delattr, self, 'valid_clear_load') + @skipIf(salt.utils.platform.is_windows(), 'PAM eauth not available on Windows') def test_acl_simple_allow(self): self.clear.publish(self.valid_clear_load) self.assertEqual(self.auth_check_mock.call_args[0][0], diff --git a/tests/unit/test_daemons.py b/tests/unit/test_daemons.py index a2f3bbc863..cf736acce1 100644 --- a/tests/unit/test_daemons.py +++ b/tests/unit/test_daemons.py @@ -68,6 +68,141 @@ class LoggerMock(object): return False +def _master_exec_test(child_pipe): + def _create_master(): + ''' + Create master instance + :return: + ''' + obj = daemons.Master() + obj.config = {'user': 'dummy', 'hash_type': alg} + for attr in ['start_log_info', 'prepare', 'shutdown', 'master']: + setattr(obj, attr, MagicMock()) + + return obj + + _logger = LoggerMock() + ret = True + with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): + with patch('salt.cli.daemons.log', _logger): + for alg in ['md5', 'sha1']: + _create_master().start() + ret = ret and _logger.messages \ + and _logger.has_message('Do not use {alg}'.format(alg=alg), + log_type='warning') + + _logger.reset() + + for alg in ['sha224', 'sha256', 'sha384', 'sha512']: + _create_master().start() + ret = ret and _logger.messages \ + and not _logger.has_message('Do not use ') + child_pipe.send(ret) + child_pipe.close() + + +def _minion_exec_test(child_pipe): + def _create_minion(): + ''' + Create minion instance + :return: + ''' + obj = daemons.Minion() + obj.config = {'user': 'dummy', 'hash_type': alg} + for attr in ['start_log_info', 'prepare', 'shutdown']: + setattr(obj, attr, MagicMock()) + setattr(obj, 'minion', MagicMock(restart=False)) + + return obj + + ret = True + _logger = LoggerMock() + with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): + with patch('salt.cli.daemons.log', _logger): + for alg in ['md5', 'sha1']: + _create_minion().start() + ret = ret and _logger.messages \ + and _logger.has_message('Do not use {alg}'.format(alg=alg), + log_type='warning') + _logger.reset() + + for alg in ['sha224', 'sha256', 'sha384', 'sha512']: + _create_minion().start() + ret = ret and _logger.messages \ + and not _logger.has_message('Do not use ') + + child_pipe.send(ret) + child_pipe.close() + + +def _proxy_exec_test(child_pipe): + def _create_proxy_minion(): + ''' + Create proxy minion instance + :return: + ''' + obj = daemons.ProxyMinion() + obj.config = {'user': 'dummy', 'hash_type': alg} + for attr in ['minion', 'start_log_info', 'prepare', 'shutdown', 'tune_in']: + setattr(obj, attr, MagicMock()) + + obj.minion.restart = False + return obj + + ret = True + _logger = LoggerMock() + with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): + with patch('salt.cli.daemons.log', _logger): + for alg in ['md5', 'sha1']: + _create_proxy_minion().start() + ret = ret and _logger.messages \ + and _logger.has_message('Do not use {alg}'.format(alg=alg), + log_type='warning') + + _logger.reset() + + for alg in ['sha224', 'sha256', 'sha384', 'sha512']: + _create_proxy_minion().start() + ret = ret and _logger.messages \ + and not _logger.has_message('Do not use ') + child_pipe.send(ret) + child_pipe.close() + + +def _syndic_exec_test(child_pipe): + def _create_syndic(): + ''' + Create syndic instance + :return: + ''' + obj = daemons.Syndic() + obj.config = {'user': 'dummy', 'hash_type': alg} + for attr in ['syndic', 'start_log_info', 'prepare', 'shutdown']: + setattr(obj, attr, MagicMock()) + + return obj + + ret = True + _logger = LoggerMock() + with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): + with patch('salt.cli.daemons.log', _logger): + for alg in ['md5', 'sha1']: + _create_syndic().start() + ret = ret and _logger.messages \ + and _logger.has_message('Do not use {alg}'.format(alg=alg), + log_type='warning') + + _logger.reset() + + for alg in ['sha224', 'sha256', 'sha384', 'sha512']: + _create_syndic().start() + ret = ret and _logger.messages \ + and not _logger.has_message('Do not use ') + + child_pipe.send(ret) + child_pipe.close() + + @skipIf(NO_MOCK, NO_MOCK_REASON) class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin): ''' @@ -87,38 +222,7 @@ class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin): :return: ''' - def exec_test(child_pipe): - def _create_master(): - ''' - Create master instance - :return: - ''' - obj = daemons.Master() - obj.config = {'user': 'dummy', 'hash_type': alg} - for attr in ['start_log_info', 'prepare', 'shutdown', 'master']: - setattr(obj, attr, MagicMock()) - - return obj - - _logger = LoggerMock() - ret = True - with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): - with patch('salt.cli.daemons.log', _logger): - for alg in ['md5', 'sha1']: - _create_master().start() - ret = ret and _logger.messages \ - and _logger.has_message('Do not use {alg}'.format(alg=alg), - log_type='warning') - - _logger.reset() - - for alg in ['sha224', 'sha256', 'sha384', 'sha512']: - _create_master().start() - ret = ret and _logger.messages \ - and not _logger.has_message('Do not use ') - child_pipe.send(ret) - child_pipe.close() - self._multiproc_exec_test(exec_test) + self._multiproc_exec_test(_master_exec_test) def test_minion_daemon_hash_type_verified(self): ''' @@ -126,41 +230,7 @@ class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin): :return: ''' - - def exec_test(child_pipe): - def _create_minion(): - ''' - Create minion instance - :return: - ''' - obj = daemons.Minion() - obj.config = {'user': 'dummy', 'hash_type': alg} - for attr in ['start_log_info', 'prepare', 'shutdown']: - setattr(obj, attr, MagicMock()) - setattr(obj, 'minion', MagicMock(restart=False)) - - return obj - - ret = True - _logger = LoggerMock() - with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): - with patch('salt.cli.daemons.log', _logger): - for alg in ['md5', 'sha1']: - _create_minion().start() - ret = ret and _logger.messages \ - and _logger.has_message('Do not use {alg}'.format(alg=alg), - log_type='warning') - _logger.reset() - - for alg in ['sha224', 'sha256', 'sha384', 'sha512']: - _create_minion().start() - ret = ret and _logger.messages \ - and not _logger.has_message('Do not use ') - - child_pipe.send(ret) - child_pipe.close() - - self._multiproc_exec_test(exec_test) + self._multiproc_exec_test(_minion_exec_test) def test_proxy_minion_daemon_hash_type_verified(self): ''' @@ -168,41 +238,7 @@ class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin): :return: ''' - - def exec_test(child_pipe): - def _create_proxy_minion(): - ''' - Create proxy minion instance - :return: - ''' - obj = daemons.ProxyMinion() - obj.config = {'user': 'dummy', 'hash_type': alg} - for attr in ['minion', 'start_log_info', 'prepare', 'shutdown', 'tune_in']: - setattr(obj, attr, MagicMock()) - - obj.minion.restart = False - return obj - - ret = True - _logger = LoggerMock() - with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): - with patch('salt.cli.daemons.log', _logger): - for alg in ['md5', 'sha1']: - _create_proxy_minion().start() - ret = ret and _logger.messages \ - and _logger.has_message('Do not use {alg}'.format(alg=alg), - log_type='warning') - - _logger.reset() - - for alg in ['sha224', 'sha256', 'sha384', 'sha512']: - _create_proxy_minion().start() - ret = ret and _logger.messages \ - and not _logger.has_message('Do not use ') - child_pipe.send(ret) - child_pipe.close() - - self._multiproc_exec_test(exec_test) + self._multiproc_exec_test(_proxy_exec_test) def test_syndic_daemon_hash_type_verified(self): ''' @@ -210,38 +246,4 @@ class DaemonsStarterTestCase(TestCase, SaltClientTestCaseMixin): :return: ''' - - def exec_test(child_pipe): - def _create_syndic(): - ''' - Create syndic instance - :return: - ''' - obj = daemons.Syndic() - obj.config = {'user': 'dummy', 'hash_type': alg} - for attr in ['syndic', 'start_log_info', 'prepare', 'shutdown']: - setattr(obj, attr, MagicMock()) - - return obj - - ret = True - _logger = LoggerMock() - with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)): - with patch('salt.cli.daemons.log', _logger): - for alg in ['md5', 'sha1']: - _create_syndic().start() - ret = ret and _logger.messages \ - and _logger.has_message('Do not use {alg}'.format(alg=alg), - log_type='warning') - - _logger.reset() - - for alg in ['sha224', 'sha256', 'sha384', 'sha512']: - _create_syndic().start() - ret = ret and _logger.messages \ - and not _logger.has_message('Do not use ') - - child_pipe.send(ret) - child_pipe.close() - - self._multiproc_exec_test(exec_test) + self._multiproc_exec_test(_syndic_exec_test) diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py index b12fcb6a93..074e337983 100644 --- a/tests/unit/test_master.py +++ b/tests/unit/test_master.py @@ -24,6 +24,8 @@ class ClearFuncsTestCase(TestCase): opts = salt.config.master_config(None) self.clear_funcs = salt.master.ClearFuncs(opts, {}) + # runner tests + def test_runner_token_not_authenticated(self): ''' Asserts that a TokenAuthenticationError is returned when the token can't authenticate. @@ -116,6 +118,8 @@ class ClearFuncsTestCase(TestCase): ret = self.clear_funcs.runner({}) self.assertDictEqual(mock_ret, ret) + # wheel tests + def test_wheel_token_not_authenticated(self): ''' Asserts that a TokenAuthenticationError is returned when the token can't authenticate. @@ -207,3 +211,105 @@ class ClearFuncsTestCase(TestCase): u'message': u'Authentication failure of type "user" occurred'}} ret = self.clear_funcs.wheel({}) self.assertDictEqual(mock_ret, ret) + + # publish tests + + def test_publish_user_is_blacklisted(self): + ''' + Asserts that an empty string is returned when the user has been blacklisted. + ''' + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=True)): + self.assertEqual(u'', self.clear_funcs.publish({u'user': u'foo', u'fun': u'test.arg'})) + + def test_publish_cmd_blacklisted(self): + ''' + Asserts that an empty string returned when the command has been blacklisted. + ''' + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=True)): + self.assertEqual(u'', self.clear_funcs.publish({u'user': u'foo', u'fun': u'test.arg'})) + + def test_publish_token_not_authenticated(self): + ''' + Asserts that an empty string is returned when the token can't authenticate. + ''' + load = {u'user': u'foo', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'token': u'asdfasdfasdfasdf'}} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_token_authorization_error(self): + ''' + Asserts that an empty string is returned when the token authenticates, but is not + authorized. + ''' + token = u'asdfasdfasdfasdf' + load = {u'user': u'foo', u'fun': u'test.arg', u'tgt': u'test_minion', + u'arg': u'bar', u'kwargs': {u'token': token}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_eauth_not_authenticated(self): + ''' + Asserts that an empty string is returned when the user can't authenticate. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'eauth': u'foo'}} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_eauth_authorization_error(self): + ''' + Asserts that an empty string is returned when the user authenticates, but is not + authorized. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'eauth': u'foo'}, u'arg': u'bar'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_user_not_authenticated(self): + ''' + Asserts that an empty string is returned when the user can't authenticate. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_user_authenticated_missing_auth_list(self): + ''' + Asserts that an empty string is returned when the user has an effective user id and is + authenticated, but the auth_list is empty. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'user': u'test'}, u'arg': u'foo'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_key', MagicMock(return_value='fake-user-key')), \ + patch('salt.utils.master.get_values_of_matching_keys', MagicMock(return_value=[])): + self.assertEqual(u'', self.clear_funcs.publish(load)) + + def test_publish_user_authorization_error(self): + ''' + Asserts that an empty string is returned when the user authenticates, but is not + authorized. + ''' + load = {u'user': u'test', u'fun': u'test.arg', u'tgt': u'test_minion', + u'kwargs': {u'user': u'test'}, u'arg': u'foo'} + with patch('salt.acl.PublisherACL.user_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.acl.PublisherACL.cmd_is_blacklisted', MagicMock(return_value=False)), \ + patch('salt.auth.LoadAuth.authenticate_key', MagicMock(return_value='fake-user-key')), \ + patch('salt.utils.master.get_values_of_matching_keys', MagicMock(return_value=['test'])), \ + patch('salt.utils.minions.CkMinions.auth_check', MagicMock(return_value=False)): + self.assertEqual(u'', self.clear_funcs.publish(load)) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index 4479164232..476941f8f4 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -439,7 +439,7 @@ class PillarTestCase(TestCase): def _setup_test_topfile_mocks(self, Matcher, get_file_client, nodegroup_order, glob_order): # Write a simple topfile and two pillar state files - self.top_file = tempfile.NamedTemporaryFile(dir=TMP) + self.top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) s = ''' base: group: @@ -456,19 +456,19 @@ base: '''.format(nodegroup_order=nodegroup_order, glob_order=glob_order) self.top_file.write(salt.utils.stringutils.to_bytes(s)) self.top_file.flush() - self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP) + self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) self.ssh_file.write(b''' ssh: foo ''') self.ssh_file.flush() - self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP) + self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) self.ssh_minion_file.write(b''' ssh: bar ''') self.ssh_minion_file.flush() - self.generic_file = tempfile.NamedTemporaryFile(dir=TMP) + self.generic_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) self.generic_file.write(b''' generic: key1: @@ -478,7 +478,7 @@ generic: sub_key1: [] ''') self.generic_file.flush() - self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP) + self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) self.generic_minion_file.write(b''' generic: key1: @@ -507,7 +507,7 @@ generic: client.get_state.side_effect = get_state def _setup_test_include_mocks(self, Matcher, get_file_client): - self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP) + self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False) top_file.write(b''' base: '*': @@ -518,21 +518,21 @@ base: - test ''') top_file.flush() - self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP) + self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False) init_sls.write(b''' include: - test.sub1 - test.sub2 ''') init_sls.flush() - self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP) + self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False) sub1_sls.write(b''' p1: - value1_1 - value1_2 ''') sub1_sls.flush() - self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP) + self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False) sub2_sls.write(b''' p1: - value1_3 diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py index 070a46fe75..89a8b4fd59 100644 --- a/tests/unit/utils/test_gitfs.py +++ b/tests/unit/utils/test_gitfs.py @@ -37,18 +37,19 @@ class TestGitFSProvider(TestCase): MagicMock(return_value=True)): with patch.object(role_class, 'verify_pygit2', MagicMock(return_value=False)): - args = [OPTS] + args = [OPTS, {}] + kwargs = {'init_remotes': False} if role_name == 'winrepo': - args.append('/tmp/winrepo-dir') + kwargs['cache_root'] = '/tmp/winrepo-dir' with patch.dict(OPTS, {key: provider}): # Try to create an instance with uppercase letters in # provider name. If it fails then a # FileserverConfigError will be raised, so no assert is # necessary. - role_class(*args) - # Now try to instantiate an instance with all lowercase - # letters. Again, no need for an assert here. - role_class(*args) + role_class(*args, **kwargs) + # Now try to instantiate an instance with all lowercase + # letters. Again, no need for an assert here. + role_class(*args, **kwargs) def test_valid_provider(self): ''' @@ -73,12 +74,13 @@ class TestGitFSProvider(TestCase): verify = 'verify_pygit2' mock2 = _get_mock(verify, provider) with patch.object(role_class, verify, mock2): - args = [OPTS] + args = [OPTS, {}] + kwargs = {'init_remotes': False} if role_name == 'winrepo': - args.append('/tmp/winrepo-dir') + kwargs['cache_root'] = '/tmp/winrepo-dir' with patch.dict(OPTS, {key: provider}): - role_class(*args) + role_class(*args, **kwargs) with patch.dict(OPTS, {key: 'foo'}): # Set the provider name to a known invalid provider @@ -86,5 +88,5 @@ class TestGitFSProvider(TestCase): self.assertRaises( FileserverConfigError, role_class, - *args - ) + *args, + **kwargs) diff --git a/tests/unit/utils/test_xmlutil.py b/tests/unit/utils/test_xmlutil.py new file mode 100644 index 0000000000..b17b0af3e6 --- /dev/null +++ b/tests/unit/utils/test_xmlutil.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +''' + tests.unit.xmlutil_test + ~~~~~~~~~~~~~~~~~~~~ +''' +from __future__ import absolute_import +# Import Salt Testing libs +from tests.support.unit import TestCase + +# Import Salt libs +from salt._compat import ElementTree as ET +import salt.utils.xmlutil as xml + + +class XMLUtilTestCase(TestCase): + ''' + Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted + dictionary. The default method of parsing will ignore attributes and return only the child + items. The full method will include parsing attributes. + ''' + + def setUp(self): + + # Populate our use cases for specific XML formats. + self.cases = { + 'a': { + 'xml': 'data', + 'legacy': {'parent': 'data'}, + 'full': 'data' + }, + 'b': { + 'xml': 'data', + 'legacy': {'parent': 'data'}, + 'full': {'parent': 'data', 'value': 'data'} + }, + 'c': { + 'xml': 'datadata' + '', + 'legacy': {'child': ['data', {'child': 'data'}, {'child': None}, {'child': None}]}, + 'full': {'child': ['data', {'child': 'data', 'value': 'data'}, {'value': 'data'}, None]} + }, + 'd': { + 'xml': 'data', + 'legacy': {'child': 'data'}, + 'full': {'child': 'data', 'another': 'data', 'value': 'data'} + }, + 'e': { + 'xml': 'data', + 'legacy': {'child': 'data'}, + 'full': {'child': {'child': 'data', 'value': 'data'}, 'another': 'data', 'value': 'data'} + }, + 'f': { + 'xml': 'data' + 'data', + 'legacy': {'child': [{'sub-child': 'data'}, {'child': 'data'}]}, + 'full': {'child': [{'sub-child': {'value': 'data', 'sub-child': 'data'}}, 'data']} + }, + } + + def test_xml_case_a(self): + xmldata = ET.fromstring(self.cases['a']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['a']['legacy']) + + def test_xml_case_a_legacy(self): + xmldata = ET.fromstring(self.cases['a']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['a']['legacy']) + + def test_xml_case_a_full(self): + xmldata = ET.fromstring(self.cases['a']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['a']['full']) + + def test_xml_case_b(self): + xmldata = ET.fromstring(self.cases['b']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['b']['legacy']) + + def test_xml_case_b_legacy(self): + xmldata = ET.fromstring(self.cases['b']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['b']['legacy']) + + def test_xml_case_b_full(self): + xmldata = ET.fromstring(self.cases['b']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['b']['full']) + + def test_xml_case_c(self): + xmldata = ET.fromstring(self.cases['c']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['c']['legacy']) + + def test_xml_case_c_legacy(self): + xmldata = ET.fromstring(self.cases['c']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['c']['legacy']) + + def test_xml_case_c_full(self): + xmldata = ET.fromstring(self.cases['c']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['c']['full']) + + def test_xml_case_d(self): + xmldata = ET.fromstring(self.cases['d']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['d']['legacy']) + + def test_xml_case_d_legacy(self): + xmldata = ET.fromstring(self.cases['d']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['d']['legacy']) + + def test_xml_case_d_full(self): + xmldata = ET.fromstring(self.cases['d']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['d']['full']) + + def test_xml_case_e(self): + xmldata = ET.fromstring(self.cases['e']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['e']['legacy']) + + def test_xml_case_e_legacy(self): + xmldata = ET.fromstring(self.cases['e']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['e']['legacy']) + + def test_xml_case_e_full(self): + xmldata = ET.fromstring(self.cases['e']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['e']['full']) + + def test_xml_case_f(self): + xmldata = ET.fromstring(self.cases['f']['xml']) + defaultdict = xml.to_dict(xmldata) + self.assertEqual(defaultdict, self.cases['f']['legacy']) + + def test_xml_case_f_legacy(self): + xmldata = ET.fromstring(self.cases['f']['xml']) + defaultdict = xml.to_dict(xmldata, False) + self.assertEqual(defaultdict, self.cases['f']['legacy']) + + def test_xml_case_f_full(self): + xmldata = ET.fromstring(self.cases['f']['xml']) + defaultdict = xml.to_dict(xmldata, True) + self.assertEqual(defaultdict, self.cases['f']['full'])