mirror of
https://github.com/valitydev/salt.git
synced 2024-11-09 01:36:48 +00:00
Merge branch 'develop' into develop
This commit is contained in:
commit
62c0cf70d4
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -12,4 +12,10 @@ Remove this section if not relevant
|
||||
|
||||
Yes/No
|
||||
|
||||
### Commits signed with GPG?
|
||||
|
||||
Yes/No
|
||||
|
||||
Please review [Salt's Contributing Guide](https://docs.saltstack.com/en/latest/topics/development/contributing.html) for best practices.
|
||||
|
||||
See GitHub's [page on GPG signing](https://help.github.com/articles/signing-commits-using-gpg/) for more information about signing commits with GPG.
|
||||
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 950 is approximately 2 years and 7 months
|
||||
daysUntilStale: 950
|
||||
# 910 is approximately 2 years and 6 months
|
||||
daysUntilStale: 910
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -34,6 +34,7 @@ Full list of Salt Cloud modules
|
||||
scaleway
|
||||
softlayer
|
||||
softlayer_hw
|
||||
vagrant
|
||||
virtualbox
|
||||
vmware
|
||||
vultrpy
|
||||
|
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
=========================
|
||||
salt.cloud.clouds.vagrant
|
||||
=========================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.vagrant
|
||||
:members:
|
@ -299,6 +299,7 @@ execution modules
|
||||
openstack_mng
|
||||
openvswitch
|
||||
opkg
|
||||
opsgenie
|
||||
oracle
|
||||
osquery
|
||||
out
|
||||
|
6
doc/ref/modules/all/salt.modules.opsgenie.rst
Normal file
6
doc/ref/modules/all/salt.modules.opsgenie.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.modules.opsgenie
|
||||
===================
|
||||
|
||||
.. automodule:: salt.modules.opsgenie
|
||||
:members:
|
@ -188,6 +188,7 @@ state modules
|
||||
openstack_config
|
||||
openvswitch_bridge
|
||||
openvswitch_port
|
||||
opsgenie
|
||||
pagerduty
|
||||
pagerduty_escalation_policy
|
||||
pagerduty_schedule
|
||||
@ -267,6 +268,7 @@ state modules
|
||||
tuned
|
||||
uptime
|
||||
user
|
||||
vagrant
|
||||
vault
|
||||
vbox_guest
|
||||
victorops
|
||||
|
6
doc/ref/states/all/salt.states.opsgenie.rst
Normal file
6
doc/ref/states/all/salt.states.opsgenie.rst
Normal file
@ -0,0 +1,6 @@
|
||||
=====================
|
||||
salt.states.opsgenie
|
||||
=====================
|
||||
|
||||
.. automodule:: salt.states.opsgenie
|
||||
:members:
|
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.states.vagrant
|
||||
===================
|
||||
|
||||
.. automodule:: salt.states.vagrant
|
||||
:members:
|
@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states
|
||||
in parallel. This is accomplished very easily by adding the ``parallel: True``
|
||||
option to your state declaration:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
nginx:
|
||||
service.running:
|
||||
@ -24,7 +24,7 @@ state to finish.
|
||||
|
||||
Given this example:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
sleep 10:
|
||||
cmd.run:
|
||||
@ -74,16 +74,16 @@ also complete.
|
||||
Things to be Careful of
|
||||
=======================
|
||||
|
||||
Parallel States does not prevent you from creating parallel conflicts on your
|
||||
Parallel States do not prevent you from creating parallel conflicts on your
|
||||
system. This means that if you start multiple package installs using Salt then
|
||||
the package manager will block or fail. If you attempt to manage the same file
|
||||
with multiple states in parallel then the result can produce an unexpected
|
||||
file.
|
||||
|
||||
Make sure that the states you choose to run in parallel do not conflict, or
|
||||
else, like in and parallel programming environment, the outcome may not be
|
||||
else, like in any parallel programming environment, the outcome may not be
|
||||
what you expect. Doing things like just making all states run in parallel
|
||||
will almost certinly result in unexpected behavior.
|
||||
will almost certainly result in unexpected behavior.
|
||||
|
||||
With that said, running states in parallel should be safe the vast majority
|
||||
of the time and the most likely culprit for unexpected behavior is running
|
||||
|
@ -540,6 +540,17 @@ machines which are already installed, but not Salted. For more information about
|
||||
this driver and for configuration examples, please see the
|
||||
:ref:`Gettting Started with Saltify <getting-started-with-saltify>` documentation.
|
||||
|
||||
.. _config_vagrant:
|
||||
|
||||
Vagrant
|
||||
-------
|
||||
|
||||
The Vagrant driver is a new, experimental driver for controlling a VagrantBox
|
||||
virtual machine, and installing Salt on it. The target host machine must be a
|
||||
working salt minion, which is controlled via the salt master using salt-api.
|
||||
For more information, see
|
||||
:ref:`Getting Started With Vagrant <getting-started-with-vagrant>`.
|
||||
|
||||
|
||||
Extending Profiles and Cloud Providers Configuration
|
||||
====================================================
|
||||
|
@ -38,26 +38,30 @@ These are features that are available for almost every cloud host.
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
||||
[2] Always returns `{}`.
|
||||
|
||||
Actions
|
||||
=======
|
||||
@ -70,46 +74,46 @@ instance name to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
Functions
|
||||
=========
|
||||
@ -122,81 +126,83 @@ require the name of the provider to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
@ -129,6 +129,7 @@ Cloud Provider Specifics
|
||||
Getting Started With Scaleway <scaleway>
|
||||
Getting Started With Saltify <saltify>
|
||||
Getting Started With SoftLayer <softlayer>
|
||||
Getting Started With Vagrant <vagrant>
|
||||
Getting Started With Vexxhost <vexxhost>
|
||||
Getting Started With Virtualbox <virtualbox>
|
||||
Getting Started With VMware <vmware>
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. _misc-salt-cloud-options:
|
||||
|
||||
================================
|
||||
Miscellaneous Salt Cloud Options
|
||||
================================
|
||||
|
@ -4,7 +4,7 @@
|
||||
Getting Started With Saltify
|
||||
============================
|
||||
|
||||
The Saltify driver is a new, experimental driver for installing Salt on existing
|
||||
The Saltify driver is a driver for installing Salt on existing
|
||||
machines (virtual or bare metal).
|
||||
|
||||
|
||||
@ -33,20 +33,29 @@ the salt-master:
|
||||
|
||||
However, if you wish to use the more advanced capabilities of salt-cloud, such as
|
||||
rebooting, listing, and disconnecting machines, then the salt master must fill
|
||||
the role usually performed by a vendor's cloud management system. In order to do
|
||||
that, you must configure your salt master as a salt-api server, and supply credentials
|
||||
to use it. (See ``salt-api setup`` below.)
|
||||
the role usually performed by a vendor's cloud management system. The salt master
|
||||
must be running on the salt-cloud machine, and created nodes must be connected to the
|
||||
master.
|
||||
|
||||
Additional information about which configuration options apply to which actions
|
||||
can be studied in the
|
||||
:ref:`Saltify Module documentation <saltify-module>`
|
||||
and the
|
||||
:ref:`Miscellaneous Salt Cloud Options <misc-salt-cloud-options>`
|
||||
document.
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
||||
Saltify requires a profile to be configured for each machine that needs Salt
|
||||
installed. The initial profile can be set up at ``/etc/salt/cloud.profiles``
|
||||
Saltify requires a separate profile to be configured for each machine that
|
||||
needs Salt installed [#]_. The initial profile can be set up at
|
||||
``/etc/salt/cloud.profiles``
|
||||
or in the ``/etc/salt/cloud.profiles.d/`` directory. Each profile requires
|
||||
both an ``ssh_host`` and an ``ssh_username`` key parameter as well as either
|
||||
an ``key_filename`` or a ``password``.
|
||||
|
||||
.. [#] Unless you are using a map file to provide the unique parameters.
|
||||
|
||||
Profile configuration example:
|
||||
|
||||
.. code-block:: yaml
|
||||
@ -68,40 +77,78 @@ The machine can now be "Salted" with the following command:
|
||||
This will install salt on the machine specified by the cloud profile,
|
||||
``salt-this-machine``, and will give the machine the minion id of
|
||||
``my-machine``. If the command was executed on the salt-master, its Salt
|
||||
key will automatically be signed on the master.
|
||||
key will automatically be accepted by the master.
|
||||
|
||||
Once a salt-minion has been successfully installed on the instance, connectivity
|
||||
to it can be verified with Salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt my-machine test.ping
|
||||
|
||||
salt my-machine test.version
|
||||
|
||||
Destroy Options
|
||||
---------------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
For obvious reasons, the ``destroy`` action does not actually vaporize hardware.
|
||||
If the salt master is connected using salt-api, it can tear down parts of
|
||||
the client machines. It will remove the client's key from the salt master,
|
||||
and will attempt the following options:
|
||||
If the salt master is connected, it can tear down parts of the client machines.
|
||||
It will remove the client's key from the salt master,
|
||||
and can execute the following options:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- remove_config_on_destroy: true
|
||||
# default: true
|
||||
# Deactivate salt-minion on reboot and
|
||||
# delete the minion config and key files from its ``/etc/salt`` directory,
|
||||
# NOTE: If deactivation is unsuccessful (older Ubuntu machines) then when
|
||||
# delete the minion config and key files from its "/etc/salt" directory,
|
||||
# NOTE: If deactivation was unsuccessful (older Ubuntu machines) then when
|
||||
# salt-minion restarts it will automatically create a new, unwanted, set
|
||||
# of key files. The ``force_minion_config`` option must be used in that case.
|
||||
# of key files. Use the "force_minion_config" option to replace them.
|
||||
|
||||
- shutdown_on_destroy: false
|
||||
# default: false
|
||||
# send a ``shutdown`` command to the client.
|
||||
# last of all, send a "shutdown" command to the client.
|
||||
|
||||
Wake On LAN
|
||||
-----------
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
In addition to connecting a hardware machine to a Salt master,
|
||||
you have the option of sending a wake-on-LAN
|
||||
`magic packet`_
|
||||
to start that machine running.
|
||||
|
||||
.. _magic packet: https://en.wikipedia.org/wiki/Wake-on-LAN
|
||||
|
||||
The "magic packet" must be sent by an existing salt minion which is on
|
||||
the same network segment as the target machine. (Or your router
|
||||
must be set up especially to route WoL packets.) Your target machine
|
||||
must be set up to listen for WoL and to respond appropriatly.
|
||||
|
||||
You must provide the Salt node id of the machine which will send
|
||||
the WoL packet \(parameter ``wol_sender_node``\), and
|
||||
the hardware MAC address of the machine you intend to wake,
|
||||
\(parameter ``wake_on_lan_mac``\). If both parameters are defined,
|
||||
the WoL will be sent. The cloud master will then sleep a while
|
||||
\(parameter ``wol_boot_wait``) to give the target machine time to
|
||||
boot up before we start probing its SSH port to begin deploying
|
||||
Salt to it. The default sleep time is 30 seconds.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# /etc/salt/cloud.profiles.d/saltify.conf
|
||||
|
||||
salt-this-machine:
|
||||
ssh_host: 12.34.56.78
|
||||
ssh_username: root
|
||||
key_filename: '/etc/salt/mysshkey.pem'
|
||||
provider: my-saltify-config
|
||||
wake_on_lan_mac: '00:e0:4c:70:2a:b2' # found with ifconfig
|
||||
wol_sender_node: bevymaster # its on this network segment
|
||||
wol_boot_wait: 45 # seconds to sleep
|
||||
|
||||
Using Map Files
|
||||
---------------
|
||||
The settings explained in the section above may also be set in a map file. An
|
||||
@ -165,67 +212,3 @@ Return values:
|
||||
- ``True``: Credential verification succeeded
|
||||
- ``False``: Credential verification succeeded
|
||||
- ``None``: Credential verification was not attempted.
|
||||
|
||||
Provisioning salt-api
|
||||
=====================
|
||||
|
||||
In order to query or control minions it created, saltify needs to send commands
|
||||
to the salt master. It does that using the network interface to salt-api.
|
||||
|
||||
The salt-api is not enabled by default. The following example will provide a
|
||||
simple installation.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.profiles.d/my_saltify_profiles.conf
|
||||
hw_41: # a theoretical example hardware machine
|
||||
ssh_host: 10.100.9.41 # the hard address of your target
|
||||
ssh_username: vagrant # a user name which has passwordless sudo
|
||||
password: vagrant # on your target machine
|
||||
provider: my_saltify_provider
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.providers.d/saltify_provider.conf
|
||||
my_saltify_provider:
|
||||
driver: saltify
|
||||
eauth: pam
|
||||
username: vagrant # supply some sudo-group-member's name
|
||||
password: vagrant # and password on the salt master
|
||||
minion:
|
||||
master: 10.100.9.5 # the hard address of the master
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/master.d/auth.conf
|
||||
# using salt-api ... members of the 'sudo' group can do anything ...
|
||||
external_auth:
|
||||
pam:
|
||||
sudo%:
|
||||
- .*
|
||||
- '@wheel'
|
||||
- '@runner'
|
||||
- '@jobs'
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/master.d/api.conf
|
||||
# see https://docs.saltstack.com/en/latest/ref/netapi/all/salt.netapi.rest_cherrypy.html
|
||||
rest_cherrypy:
|
||||
host: localhost
|
||||
port: 8000
|
||||
ssl_crt: /etc/pki/tls/certs/localhost.crt
|
||||
ssl_key: /etc/pki/tls/certs/localhost.key
|
||||
thread_pool: 30
|
||||
socket_queue_size: 10
|
||||
|
||||
|
||||
Start your target machine as a Salt minion named "node41" by:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo salt-cloud -p hw_41 node41
|
||||
|
||||
|
@ -94,6 +94,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``:
|
||||
private_vlan: 396
|
||||
private_network: True
|
||||
private_ssh: True
|
||||
# Use a dedicated host instead of cloud
|
||||
dedicated_host_id: 1234
|
||||
# May be used _instead_of_ image
|
||||
global_identifier: 320d8be5-46c0-dead-cafe-13e3c51
|
||||
|
||||
@ -334,9 +336,21 @@ it can be verified with Salt:
|
||||
|
||||
# salt 'myserver.example.com' test.ping
|
||||
|
||||
|
||||
Cloud Profiles
|
||||
Dedicated Host
|
||||
~~~~~~~~~~~~~~
|
||||
Soflayer allows the creation of new VMs in a dedicated host. This means that
|
||||
you can order and pay a fixed amount for a bare metal dedicated host and use
|
||||
it to provision as many VMs as you can fit in there. If you want your VMs to
|
||||
be launched in a dedicated host, instead of Sofltayer's cloud, set the
|
||||
``dedicated_host_id`` parameter in your profile.
|
||||
|
||||
dedicated_host_id
|
||||
-----------------
|
||||
The id of the dedicated host where the VMs should be created. If not set, VMs
|
||||
will be created in Softlayer's cloud instead.
|
||||
|
||||
Bare metal Profiles
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
Set up an initial profile at ``/etc/salt/cloud.profiles``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
268
doc/topics/cloud/vagrant.rst
Normal file
268
doc/topics/cloud/vagrant.rst
Normal file
@ -0,0 +1,268 @@
|
||||
.. _getting-started-with-vagrant:
|
||||
|
||||
============================
|
||||
Getting Started With Vagrant
|
||||
============================
|
||||
|
||||
The Vagrant driver is a new, experimental driver for spinning up a VagrantBox
|
||||
virtual machine, and installing Salt on it.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
The Vagrant driver itself has no external dependencies.
|
||||
|
||||
The machine which will host the VagrantBox must be an already existing minion
|
||||
of the cloud server's Salt master.
|
||||
It must have Vagrant_ installed, and a Vagrant-compatible virtual machine engine,
|
||||
such as VirtualBox_.
|
||||
(Note: The Vagrant driver does not depend on the salt-cloud VirtualBox driver in any way.)
|
||||
|
||||
.. _Vagrant: https://www.vagrantup.com/
|
||||
.. _VirtualBox: https://www.virtualbox.org/
|
||||
|
||||
\[Caution: The version of Vagrant packaged for ``apt install`` in Ubuntu 16.04 will not connect a bridged
|
||||
network adapter correctly. Use a version downloaded directly from the web site.\]
|
||||
|
||||
Include the Vagrant guest editions plugin:
|
||||
``vagrant plugin install vagrant-vbguest``.
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Configuration of the client virtual machine (using VirtualBox, VMware, etc)
|
||||
will be done by Vagrant as specified in the Vagrantfile on the host machine.
|
||||
|
||||
Salt-cloud will push the commands to install and provision a salt minion on
|
||||
the virtual machine, so you need not (perhaps **should** not) provision salt
|
||||
in your Vagrantfile, in most cases.
|
||||
|
||||
If, however, your cloud master cannot open an SSH connection to the child VM,
|
||||
you may **need** to let Vagrant provision the VM with Salt, and use some other
|
||||
method (such as passing a pillar dictionary to the VM) to pass the master's
|
||||
IP address to the VM. The VM can then attempt to reach the salt master in the
|
||||
usual way for non-cloud minions. Specify the profile configuration argument
|
||||
as ``deploy: False`` to prevent the cloud master from trying.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Note: This example is for /etc/salt/cloud.providers file or any file in
|
||||
# the /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-vagrant-config:
|
||||
minion:
|
||||
master: 111.222.333.444
|
||||
provider: vagrant
|
||||
|
||||
|
||||
Because the Vagrant driver needs a place to store the mapping between the
|
||||
node name you use for Salt commands and the Vagrantfile which controls the VM,
|
||||
you must configure your salt minion as a Salt smb server.
|
||||
(See `host provisioning example`_ below.)
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
||||
Vagrant requires a profile to be configured for each machine that needs Salt
|
||||
installed. The initial profile can be set up at ``/etc/salt/cloud.profiles``
|
||||
or in the ``/etc/salt/cloud.profiles.d/`` directory.
|
||||
|
||||
Each profile requires a ``vagrantfile`` parameter. If the Vagrantfile has
|
||||
definitions for `multiple machines`_ then you need a ``machine`` parameter,
|
||||
|
||||
.. _`multiple machines`: https://www.vagrantup.com/docs/multi-machine/
|
||||
|
||||
Salt-cloud uses SSH to provision the minion. There must be a routable path
|
||||
from the cloud master to the VM. Usually, you will want to use
|
||||
a bridged network adapter for SSH. The address may not be known until
|
||||
DHCP assigns it. If ``ssh_host`` is not defined, and ``target_network``
|
||||
is defined, the driver will attempt to read the address from the output
|
||||
of an ``ifconfig`` command. Lacking either setting,
|
||||
the driver will try to use the value Vagrant returns as its ``ssh_host``,
|
||||
which will work only if the cloud master is running somewhere on the same host.
|
||||
|
||||
The ``target_network`` setting should be used
|
||||
to identify the IP network your bridged adapter is expected to appear on.
|
||||
Use CIDR notation, like ``target_network: '2001:DB8::/32'``
|
||||
or ``target_network: '192.0.2.0/24'``.
|
||||
|
||||
Profile configuration example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# /etc/salt/cloud.profiles.d/vagrant.conf
|
||||
|
||||
vagrant-machine:
|
||||
host: my-vhost # the Salt id of the virtual machine's host computer.
|
||||
provider: my-vagrant-config
|
||||
cwd: /srv/machines # the path to your Virtualbox file.
|
||||
vagrant_runas: my-username # the username who defined the Vagrantbox on the host
|
||||
# vagrant_up_timeout: 300 # (seconds) timeout for cmd.run of the "vagrant up" command
|
||||
# vagrant_provider: '' # option for "vagrant up" like: "--provider vmware_fusion"
|
||||
# ssh_host: None # "None" means try to find the routable IP address from "ifconfig"
|
||||
# target_network: None # Expected CIDR address of your bridged network
|
||||
# force_minion_config: false # Set "true" to re-purpose an existing VM
|
||||
|
||||
The machine can now be created and configured with the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -p vagrant-machine my-id
|
||||
|
||||
This will create the machine specified by the cloud profile
|
||||
``vagrant-machine``, and will give the machine the minion id of
|
||||
``my-id``. If the cloud master is also the salt-master, its Salt
|
||||
key will automatically be accepted on the master.
|
||||
|
||||
Once a salt-minion has been successfully installed on the instance, connectivity
|
||||
to it can be verified with Salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt my-id test.ping
|
||||
|
||||
.. _host provisioning example:
|
||||
|
||||
Provisioning a Vagrant cloud host (example)
|
||||
===========================================
|
||||
|
||||
In order to query or control minions it created, each host
|
||||
minion needs to track the Salt node names associated with
|
||||
any guest virtual machines on it.
|
||||
It does that using a Salt sdb database.
|
||||
|
||||
The Salt sdb is not configured by default. The following example shows a
|
||||
simple installation.
|
||||
|
||||
This example assumes:
|
||||
|
||||
- you are on a large network using the 10.x.x.x IP address space
|
||||
- your Salt master's Salt id is "bevymaster"
|
||||
- it will also be your salt-cloud controller
|
||||
- it is at hardware address 10.124.30.7
|
||||
- it is running a recent Debian family Linux (raspbian)
|
||||
- your workstation is a Salt minion of bevymaster
|
||||
- your workstation's minion id is "my_laptop"
|
||||
- VirtualBox has been installed on "my_laptop" (apt install is okay)
|
||||
- Vagrant was installed from vagrantup.com. (not the 16.04 Ubuntu apt)
|
||||
- "my_laptop" has done "vagrant plugin install vagrant-vbguest"
|
||||
- the VM you want to start is on "my_laptop" at "/home/my_username/Vagrantfile"
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/minion.d/vagrant_sdb.conf on host computer "my_laptop"
|
||||
# -- this sdb database is required by the Vagrant module --
|
||||
vagrant_sdb_data: # The sdb database must have this name.
|
||||
driver: sqlite3 # Let's use SQLite to store the data ...
|
||||
database: /var/cache/salt/vagrant.sqlite # ... in this file ...
|
||||
table: sdb # ... using this table name.
|
||||
create_table: True # if not present
|
||||
|
||||
Remember to re-start your minion after changing its configuration files...
|
||||
|
||||
``sudo systemctl restart salt-minion``
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
# -*- mode: ruby -*-
|
||||
# file /home/my_username/Vagrantfile on host computer "my_laptop"
|
||||
BEVY = "bevy1"
|
||||
DOMAIN = BEVY + ".test" # .test is an ICANN reserved non-public TLD
|
||||
|
||||
# must supply a list of names to avoid Vagrant asking for interactive input
|
||||
def get_good_ifc() # try to find a working Ubuntu network adapter name
|
||||
addr_infos = Socket.getifaddrs
|
||||
addr_infos.each do |info|
|
||||
a = info.addr
|
||||
if a and a.ip? and not a.ip_address.start_with?("127.")
|
||||
return info.name
|
||||
end
|
||||
end
|
||||
return "eth0" # fall back to an old reliable name
|
||||
end
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.ssh.forward_agent = true # so you can use git ssh://...
|
||||
|
||||
# add a bridged network interface. (try to detect name, then guess MacOS names, too)
|
||||
interface_guesses = [get_good_ifc(), 'en0: Ethernet', 'en1: Wi-Fi (AirPort)']
|
||||
config.vm.network "public_network", bridge: interface_guesses
|
||||
if ARGV[0] == "up"
|
||||
puts "Trying bridge network using interfaces: #{interface_guesses}"
|
||||
end
|
||||
config.vm.provision "shell", inline: "ip address", run: "always" # make user feel good
|
||||
|
||||
# . . . . . . . . . . . . Define machine QUAIL1 . . . . . . . . . . . . . .
|
||||
config.vm.define "quail1", primary: true do |quail_config|
|
||||
quail_config.vm.box = "boxesio/xenial64-standard" # a public VMware & Virtualbox box
|
||||
quail_config.vm.hostname = "quail1." + DOMAIN # supply a name in our bevy
|
||||
quail_config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 1024 # limit memory for the virtual box
|
||||
v.cpus = 1
|
||||
v.linked_clone = true # make a soft copy of the base Vagrant box
|
||||
v.customize ["modifyvm", :id, "--natnet1", "192.168.128.0/24"] # do not use 10.x network for NAT
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.profiles.d/my_vagrant_profiles.conf on bevymaster
|
||||
q1:
|
||||
host: my_laptop # the Salt id of your virtual machine host
|
||||
machine: quail1 # a machine name in the Vagrantfile (if not primary)
|
||||
vagrant_runas: my_username # owner of Vagrant box files on "my_laptop"
|
||||
cwd: '/home/my_username' # the path (on "my_laptop") of the Vagrantfile
|
||||
provider: my_vagrant_provider # name of entry in provider.conf file
|
||||
target_network: '10.0.0.0/8' # VM external address will be somewhere here
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.providers.d/vagrant_provider.conf on bevymaster
|
||||
my_vagrant_provider:
|
||||
driver: vagrant
|
||||
minion:
|
||||
master: 10.124.30.7 # the hard address of the master
|
||||
|
||||
|
||||
Create and use your new Salt minion
|
||||
-----------------------------------
|
||||
|
||||
- Typing on the Salt master computer ``bevymaster``, tell it to create a new minion named ``v1`` using profile ``q1``...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo salt-cloud -p q1 v1
|
||||
sudo salt v1 network.ip_addrs
|
||||
[ you get a list of IP addresses, including the bridged one ]
|
||||
|
||||
- logged in to your laptop (or some other computer known to GitHub)...
|
||||
|
||||
\[NOTE:\] if you are using MacOS, you need to type ``ssh-add -K`` after each boot,
|
||||
unless you use one of the methods in `this gist`_.
|
||||
|
||||
.. _this gist: https://github.com/jirsbek/SSH-keys-in-macOS-Sierra-keychain
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -A vagrant@< the bridged network address >
|
||||
# [ or, if you are at /home/my_username/ on my_laptop ]
|
||||
vagrant ssh quail1
|
||||
|
||||
- then typing on your new node "v1" (a.k.a. quail1.bevy1.test)...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
password: vagrant
|
||||
# [ stuff types out ... ]
|
||||
|
||||
ls -al /vagrant
|
||||
# [ should be shared /home/my_username from my_laptop ]
|
||||
|
||||
# you can access other network facilities using the ssh authorization
|
||||
# as recorded in your ~.ssh/ directory on my_laptop ...
|
||||
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
git clone ssh://git@github.com/yourID/your_project
|
||||
# etc...
|
||||
|
@ -45,11 +45,27 @@ but leave any existing config, cache, and PKI information.
|
||||
Salt Minion Installation
|
||||
========================
|
||||
|
||||
If the system is missing the appropriate version of the Visual C++
|
||||
Redistributable (vcredist) the user will be prompted to install it. Click ``OK``
|
||||
to install the vcredist. Click ``Cancel`` to abort the installation without
|
||||
making modifications to the system.
|
||||
|
||||
If Salt is already installed on the system the user will be prompted to remove
|
||||
the previous installation. Click ``OK`` to uninstall Salt without removing the
|
||||
configuration, PKI information, or cached files. Click ``Cancel`` to abort the
|
||||
installation before making any modifications to the system.
|
||||
|
||||
After the Welcome and the License Agreement, the installer asks for two bits of
|
||||
information to configure the minion; the master hostname and the minion name.
|
||||
The installer will update the minion config with these options. If the installer
|
||||
finds an existing minion config file, these fields will be populated with values
|
||||
from the existing config.
|
||||
The installer will update the minion config with these options.
|
||||
|
||||
If the installer finds an existing minion config file, these fields will be
|
||||
populated with values from the existing config, but they will be grayed out.
|
||||
There will also be a checkbox to use the existing config. If you continue, the
|
||||
existing config will be used. If the checkbox is unchecked, default values are
|
||||
displayed and can be changed. If you continue, the existing config file in
|
||||
``c:\salt\conf`` will be removed along with the ``c:\salt\conf\minion.d`
|
||||
directory. The values entered will be used with the default config.
|
||||
|
||||
The final page allows you to start the minion service and optionally change its
|
||||
startup type. By default, the minion is set to ``Automatic``. You can change the
|
||||
@ -71,11 +87,6 @@ be managed there or from the command line like any other Windows service.
|
||||
sc start salt-minion
|
||||
net start salt-minion
|
||||
|
||||
.. note::
|
||||
If the minion won't start, you may need to install the Microsoft Visual C++
|
||||
2008 x64 SP1 redistributable. Allow all Windows updates to run salt-minion
|
||||
smoothly.
|
||||
|
||||
Installation Prerequisites
|
||||
--------------------------
|
||||
|
||||
@ -96,15 +107,29 @@ Minion silently:
|
||||
========================= =====================================================
|
||||
Option Description
|
||||
========================= =====================================================
|
||||
``/minion-name=`` A string value to set the minion name. Default is
|
||||
'hostname'
|
||||
``/master=`` A string value to set the IP address or host name of
|
||||
the master. Default value is 'salt'
|
||||
the master. Default value is 'salt'. You can pass a
|
||||
single master or a comma-separated list of masters.
|
||||
Setting the master will replace existing config with
|
||||
the default config. Cannot be used in conjunction
|
||||
with ``/use-existing-config``
|
||||
``/minion-name=`` A string value to set the minion name. Default is
|
||||
'hostname'. Setting the minion name will replace
|
||||
existing config with the default config. Cannot be
|
||||
used in conjunction with ``/use-existing-config``
|
||||
``/start-minion=`` Either a 1 or 0. '1' will start the salt-minion
|
||||
service, '0' will not. Default is to start the
|
||||
service after installation.
|
||||
service after installation
|
||||
``/start-minion-delayed`` Set the minion start type to
|
||||
``Automatic (Delayed Start)``
|
||||
``/use-existing-config`` Either a 1 or 0. '1' will use the existing config if
|
||||
present. '0' will replace existing config with the
|
||||
default config. Default is '1'. If this is set to '1'
|
||||
values passed in ``/master`` and ``/minion-name``
|
||||
will be ignored
|
||||
``/S`` Runs the installation silently. Uses the above
|
||||
settings or the defaults
|
||||
``/?`` Displays command line help
|
||||
========================= =====================================================
|
||||
|
||||
.. note::
|
||||
|
@ -14,23 +14,33 @@ CVE-2017-14695 Directory traversal vulnerability in minion id validation in Salt
|
||||
|
||||
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
|
||||
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
Known Issues
|
||||
============
|
||||
|
||||
On 2017.7.2 when using salt-api and cherrypy version 5.6.0, issue `#43581`_ will occur when starting the salt-api service. We have patched the cherry-py packages for python-cherrypy-5.6.0-2 from repo.saltstack.com. If you are using python-cherrypy-5.6.0-1 please ensure to run `yum install python-cherrypy` to install the new patched version.
|
||||
|
||||
*Generated at: 2017-09-26T21:06:19Z*
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
Statistics:
|
||||
*Generated at: 2017-10-02T21:10:14Z*
|
||||
|
||||
- Total Merges: **326**
|
||||
- Total Issue references: **133**
|
||||
- Total PR references: **389**
|
||||
Statistics
|
||||
==========
|
||||
|
||||
Changes:
|
||||
- Total Merges: **328**
|
||||
- Total Issue references: **134**
|
||||
- Total PR references: **391**
|
||||
|
||||
Changes
|
||||
=======
|
||||
|
||||
- **PR** `#43868`_: (*rallytime*) Back-port `#43847`_ to 2017.7.2
|
||||
* Fix to module.run
|
||||
|
||||
- **PR** `#43756`_: (*gtmanfred*) split build and install for pkg osx
|
||||
@ *2017-09-26T20:51:28Z*
|
||||
|
||||
* 88414d5 Merge pull request `#43756`_ from gtmanfred/2017.7.2
|
||||
* f7df41f split build and install for pkg osx
|
||||
|
||||
- **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2
|
||||
@ *2017-09-19T17:33:34Z*
|
||||
@ -3110,6 +3120,12 @@ Changes:
|
||||
.. _`#480`: https://github.com/saltstack/salt/issues/480
|
||||
.. _`#495`: https://github.com/saltstack/salt/issues/495
|
||||
.. _`#43581`: https://github.com/saltstack/salt/issues/43581
|
||||
.. _`#43756`: https://github.com/saltstack/salt/pull/43756
|
||||
.. _`#43847`: https://github.com/saltstack/salt/pull/43847
|
||||
.. _`#43868`: https://github.com/saltstack/salt/pull/43868
|
||||
.. _`#475`: https://github.com/saltstack/salt/issues/475
|
||||
.. _`#480`: https://github.com/saltstack/salt/issues/480
|
||||
.. _`#495`: https://github.com/saltstack/salt/issues/495
|
||||
.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424
|
||||
.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366
|
||||
.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543
|
||||
|
@ -55,6 +55,7 @@ The new grains added are:
|
||||
|
||||
* ``fc_wwn``: Show all fibre channel world wide port names for a host
|
||||
* ``iscsi_iqn``: Show the iSCSI IQN name for a host
|
||||
* ``swap_total``: Show the configured swap_total for Linux, *BSD, OS X and Solaris/SunOS
|
||||
|
||||
Grains Changes
|
||||
--------------
|
||||
@ -116,6 +117,31 @@ The ``state_output`` parameter now supports ``full_id``, ``changes_id`` and ``te
|
||||
Just like ``mixed_id``, these use the state ID as name in the highstate output.
|
||||
For more information on these output modes, see the docs for the :mod:`Highstate Outputter <salt.output.highstate>`.
|
||||
|
||||
Windows Installer: Changes to existing config handling
|
||||
------------------------------------------------------
|
||||
Behavior with existing configuration has changed. With previous installers the
|
||||
existing config was used and the master and minion id could be modified via the
|
||||
installer. It was problematic in that it didn't account for configuration that
|
||||
may be defined in the ``minion.d`` directory. This change gives you the option
|
||||
via a checkbox to either use the existing config with out changes or the default
|
||||
config using values you pass to the installer. If you choose to use the existing
|
||||
config then no changes are made. If not, the existing config is deleted, to
|
||||
include the ``minion.d`` directory, and the default config is used. A
|
||||
command-line switch (``/use-existing-config``) has also been added to control
|
||||
this behavior.
|
||||
|
||||
Windows Installer: Multi-master configuration
|
||||
---------------------------------------------
|
||||
The installer now has the ability to apply a multi-master configuration either
|
||||
from the gui or the command line. The ``master`` field in the gui can accept
|
||||
either a single master or a comma-separated list of masters. The command-line
|
||||
switch (``/master=``) can accept the same.
|
||||
|
||||
Windows Installer: Command-line help
|
||||
------------------------------------
|
||||
The Windows installer will now display command-line help when a help switch
|
||||
(``/?``) is passed.
|
||||
|
||||
Salt Cloud Features
|
||||
-------------------
|
||||
|
||||
@ -138,6 +164,56 @@ file. For example:
|
||||
|
||||
These commands will run in sequence **before** the bootstrap script is executed.
|
||||
|
||||
New salt-cloud Grains
|
||||
=====================
|
||||
|
||||
When salt cloud creates a new minon, it will now add grain information
|
||||
to the minion configuration file, identifying the resources originally used
|
||||
to create it.
|
||||
|
||||
The generated grain information will appear similar to:
|
||||
|
||||
.. code-block:: yaml
|
||||
grains:
|
||||
salt-cloud:
|
||||
driver: ec2
|
||||
provider: my_ec2:ec2
|
||||
profile: ec2-web
|
||||
The generation of salt-cloud grains can be surpressed by the
|
||||
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
|
||||
|
||||
Upgraded Saltify Driver
|
||||
=======================
|
||||
|
||||
The salt-cloud Saltify driver is used to provision machines which
|
||||
are not controlled by a dedicated cloud supervisor (such as typical hardware
|
||||
machines) by pushing a salt-bootstrap command to them and accepting them on
|
||||
the salt master. Creation of a node has been its only function and no other
|
||||
salt-cloud commands were implemented.
|
||||
|
||||
With this upgrade, it can use the salt-api to provide advanced control,
|
||||
such as rebooting a machine, querying it along with conventional cloud minions,
|
||||
and, ultimately, disconnecting it from its master.
|
||||
|
||||
After disconnection from ("destroying" on) one master, a machine can be
|
||||
re-purposed by connecting to ("creating" on) a subsequent master.
|
||||
|
||||
New Vagrant Driver
|
||||
==================
|
||||
|
||||
The salt-cloud Vagrant driver brings virtual machines running in a limited
|
||||
environment, such as a programmer's workstation, under salt-cloud control.
|
||||
This can be useful for experimentation, instruction, or testing salt configurations.
|
||||
|
||||
Using salt-api on the master, and a salt-minion running on the host computer,
|
||||
the Vagrant driver can create (``vagrant up``), restart (``vagrant reload``),
|
||||
and destroy (``vagrant destroy``) VMs, as controlled by salt-cloud profiles
|
||||
which designate a ``Vagrantfile`` on the host machine.
|
||||
|
||||
The master can be a very limited machine, such as a Raspberry Pi, or a small
|
||||
VagrantBox VM.
|
||||
|
||||
|
||||
New pillar/master_tops module called saltclass
|
||||
----------------------------------------------
|
||||
|
||||
@ -1020,3 +1096,10 @@ The ``version.py`` file had the following changes:
|
||||
Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be
|
||||
used instead. The removal of these warnings does not have a behavior change. Only
|
||||
the warning text was removed.
|
||||
|
||||
Sentry Log Handler
|
||||
------------------
|
||||
|
||||
Configuring sentry raven python client via ``project``, ``servers``, ``public_key
|
||||
and ``secret_key`` is deprecated and won't work with sentry clients > 3.0.
|
||||
Instead, the ``dsn`` config param must be used.
|
||||
|
@ -27,7 +27,7 @@ Installing Dependencies
|
||||
=======================
|
||||
|
||||
Both pygit2_ and GitPython_ are supported Python interfaces to git. If
|
||||
compatible versions of both are installed, pygit2_ will preferred. In these
|
||||
compatible versions of both are installed, pygit2_ will be preferred. In these
|
||||
cases, GitPython_ can be forced using the :conf_master:`gitfs_provider`
|
||||
parameter in the master config file.
|
||||
|
||||
|
@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER
|
||||
echo -n -e "\033]0;Build: Install Salt\007"
|
||||
sudo rm -rf $SRCDIR/build
|
||||
sudo rm -rf $SRCDIR/dist
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
|
||||
sudo $PYTHON $SRCDIR/setup.py install
|
||||
|
||||
############################################################################
|
||||
# Build Package
|
||||
|
@ -11,6 +11,7 @@
|
||||
!define PRODUCT_UNINST_KEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}"
|
||||
!define PRODUCT_UNINST_KEY_OTHER "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}"
|
||||
!define PRODUCT_UNINST_ROOT_KEY "HKLM"
|
||||
!define OUTFILE "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe"
|
||||
|
||||
# Import Libraries
|
||||
!include "MUI2.nsh"
|
||||
@ -52,6 +53,15 @@ ${StrStrAdv}
|
||||
Pop "${ResultVar}"
|
||||
!macroend
|
||||
|
||||
# Part of the Explode function for Strings
|
||||
!define Explode "!insertmacro Explode"
|
||||
!macro Explode Length Separator String
|
||||
Push `${Separator}`
|
||||
Push `${String}`
|
||||
Call Explode
|
||||
Pop `${Length}`
|
||||
!macroend
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Configure Pages, Ordering, and Configuration
|
||||
@ -92,10 +102,17 @@ Var Dialog
|
||||
Var Label
|
||||
Var CheckBox_Minion_Start
|
||||
Var CheckBox_Minion_Start_Delayed
|
||||
Var ConfigMasterHost
|
||||
Var MasterHost
|
||||
Var MasterHost_State
|
||||
Var ConfigMinionName
|
||||
Var MinionName
|
||||
Var MinionName_State
|
||||
Var ExistingConfigFound
|
||||
Var UseExistingConfig
|
||||
Var UseExistingConfig_State
|
||||
Var WarningExistingConfig
|
||||
Var WarningDefaultConfig
|
||||
Var StartMinion
|
||||
Var StartMinionDelayed
|
||||
Var DeleteInstallDir
|
||||
@ -115,27 +132,105 @@ Function pageMinionConfig
|
||||
Abort
|
||||
${EndIf}
|
||||
|
||||
# Master IP or Hostname Dialog Control
|
||||
${NSD_CreateLabel} 0 0 100% 12u "Master IP or Hostname:"
|
||||
Pop $Label
|
||||
|
||||
${NSD_CreateText} 0 13u 100% 12u $MasterHost_State
|
||||
Pop $MasterHost
|
||||
|
||||
# Minion ID Dialog Control
|
||||
${NSD_CreateLabel} 0 30u 100% 12u "Minion Name:"
|
||||
Pop $Label
|
||||
|
||||
${NSD_CreateText} 0 43u 100% 12u $MinionName_State
|
||||
Pop $MinionName
|
||||
|
||||
# Use Existing Config Checkbox
|
||||
${NSD_CreateCheckBox} 0 65u 100% 12u "&Use Existing Config"
|
||||
Pop $UseExistingConfig
|
||||
${NSD_OnClick} $UseExistingConfig pageMinionConfig_OnClick
|
||||
|
||||
# Add Existing Config Warning Label
|
||||
${NSD_CreateLabel} 0 80u 100% 60u "The values above are taken from an \
|
||||
existing configuration found in `c:\salt\conf\minion`. Configuration \
|
||||
settings defined in the `minion.d` directories, if they exist, are not \
|
||||
shown here.$\r$\n\
|
||||
$\r$\n\
|
||||
Clicking `Install` will leave the existing config unchanged."
|
||||
Pop $WarningExistingConfig
|
||||
CreateFont $0 "Arial" 10 500 /ITALIC
|
||||
SendMessage $WarningExistingConfig ${WM_SETFONT} $0 1
|
||||
SetCtlColors $WarningExistingConfig 0xBB0000 transparent
|
||||
|
||||
# Add Default Config Warning Label
|
||||
${NSD_CreateLabel} 0 80u 100% 60u "Clicking `Install` will remove the \
|
||||
the existing minion config file and remove the minion.d directories. \
|
||||
The values above will be used in the new default config."
|
||||
Pop $WarningDefaultConfig
|
||||
CreateFont $0 "Arial" 10 500 /ITALIC
|
||||
SendMessage $WarningDefaultConfig ${WM_SETFONT} $0 1
|
||||
SetCtlColors $WarningDefaultConfig 0xBB0000 transparent
|
||||
|
||||
# If no existing config found, disable the checkbox and stuff
|
||||
# Set UseExistingConfig_State to 0
|
||||
${If} $ExistingConfigFound == 0
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
ShowWindow $UseExistingConfig ${SW_HIDE}
|
||||
ShowWindow $WarningExistingConfig ${SW_HIDE}
|
||||
ShowWindow $WarningDefaultConfig ${SW_HIDE}
|
||||
${Endif}
|
||||
|
||||
${NSD_SetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
Call pageMinionConfig_OnClick
|
||||
|
||||
nsDialogs::Show
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function pageMinionConfig_OnClick
|
||||
|
||||
# You have to pop the top handle to keep the stack clean
|
||||
Pop $R0
|
||||
|
||||
# Assign the current checkbox state to the variable
|
||||
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
# Validate the checkboxes
|
||||
${If} $UseExistingConfig_State == ${BST_CHECKED}
|
||||
# Use Existing Config is checked, show warning
|
||||
ShowWindow $WarningExistingConfig ${SW_SHOW}
|
||||
EnableWindow $MasterHost 0
|
||||
EnableWindow $MinionName 0
|
||||
${NSD_SetText} $MasterHost $ConfigMasterHost
|
||||
${NSD_SetText} $MinionName $ConfigMinionName
|
||||
${If} $ExistingConfigFound == 1
|
||||
ShowWindow $WarningDefaultConfig ${SW_HIDE}
|
||||
${Endif}
|
||||
${Else}
|
||||
# Use Existing Config is not checked, hide the warning
|
||||
ShowWindow $WarningExistingConfig ${SW_HIDE}
|
||||
EnableWindow $MasterHost 1
|
||||
EnableWindow $MinionName 1
|
||||
${NSD_SetText} $MasterHost $MasterHost_State
|
||||
${NSD_SetText} $MinionName $MinionName_State
|
||||
${If} $ExistingConfigFound == 1
|
||||
ShowWindow $WarningDefaultConfig ${SW_SHOW}
|
||||
${Endif}
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function pageMinionConfig_Leave
|
||||
|
||||
${NSD_GetText} $MasterHost $MasterHost_State
|
||||
${NSD_GetText} $MinionName $MinionName_State
|
||||
${NSD_GetState} $UseExistingConfig $UseExistingConfig_State
|
||||
|
||||
Call RemoveExistingConfig
|
||||
|
||||
FunctionEnd
|
||||
|
||||
@ -194,7 +289,7 @@ FunctionEnd
|
||||
!else
|
||||
Name "${PRODUCT_NAME} ${PRODUCT_VERSION}"
|
||||
!endif
|
||||
OutFile "Salt-Minion-${PRODUCT_VERSION}-Py${PYTHON_VERSION}-${CPUARCH}-Setup.exe"
|
||||
OutFile "${OutFile}"
|
||||
InstallDir "c:\salt"
|
||||
InstallDirRegKey HKLM "${PRODUCT_DIR_REGKEY}" ""
|
||||
ShowInstDetails show
|
||||
@ -311,8 +406,6 @@ SectionEnd
|
||||
|
||||
Function .onInit
|
||||
|
||||
Call getMinionConfig
|
||||
|
||||
Call parseCommandLineSwitches
|
||||
|
||||
# Check for existing installation
|
||||
@ -364,6 +457,23 @@ Function .onInit
|
||||
|
||||
skipUninstall:
|
||||
|
||||
Call getMinionConfig
|
||||
|
||||
IfSilent 0 +2
|
||||
Call RemoveExistingConfig
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
Function RemoveExistingConfig
|
||||
|
||||
${If} $ExistingConfigFound == 1
|
||||
${AndIf} $UseExistingConfig_State == 0
|
||||
# Wipe out the Existing Config
|
||||
Delete "$INSTDIR\conf\minion"
|
||||
RMDir /r "$INSTDIR\conf\minion.d"
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
@ -407,7 +517,9 @@ Section -Post
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodConsole 24000"
|
||||
nsExec::Exec "nssm.exe set salt-minion AppStopMethodWindow 2000"
|
||||
|
||||
Call updateMinionConfig
|
||||
${If} $UseExistingConfig_State == 0
|
||||
Call updateMinionConfig
|
||||
${EndIf}
|
||||
|
||||
Push "C:\salt"
|
||||
Call AddToPath
|
||||
@ -534,18 +646,32 @@ FunctionEnd
|
||||
# Helper Functions
|
||||
###############################################################################
|
||||
Function MsiQueryProductState
|
||||
# Used for detecting VCRedist Installation
|
||||
!define INSTALLSTATE_DEFAULT "5"
|
||||
|
||||
!define INSTALLSTATE_DEFAULT "5"
|
||||
|
||||
Pop $R0
|
||||
StrCpy $NeedVcRedist "False"
|
||||
System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0"
|
||||
StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0
|
||||
StrCpy $NeedVcRedist "True"
|
||||
Pop $R0
|
||||
StrCpy $NeedVcRedist "False"
|
||||
System::Call "msi::MsiQueryProductStateA(t '$R0') i.r0"
|
||||
StrCmp $0 ${INSTALLSTATE_DEFAULT} +2 0
|
||||
StrCpy $NeedVcRedist "True"
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Trim Function
|
||||
# - Trim whitespace from the beginning and end of a string
|
||||
# - Trims spaces, \r, \n, \t
|
||||
#
|
||||
# Usage:
|
||||
# Push " some string " ; String to Trim
|
||||
# Call Trim
|
||||
# Pop $0 ; Trimmed String: "some string"
|
||||
#
|
||||
# or
|
||||
#
|
||||
# ${Trim} $0 $1 ; Trimmed String, String to Trim
|
||||
#------------------------------------------------------------------------------
|
||||
Function Trim
|
||||
|
||||
Exch $R1 # Original string
|
||||
@ -580,6 +706,95 @@ Function Trim
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Explode Function
|
||||
# - Splits a string based off the passed separator
|
||||
# - Each item in the string is pushed to the stack
|
||||
# - The last item pushed to the stack is the length of the array
|
||||
#
|
||||
# Usage:
|
||||
# Push "," ; Separator
|
||||
# Push "string,to,separate" ; String to explode
|
||||
# Call Explode
|
||||
# Pop $0 ; Number of items in the array
|
||||
#
|
||||
# or
|
||||
#
|
||||
# ${Explode} $0 $1 $2 ; Length, Separator, String
|
||||
#------------------------------------------------------------------------------
|
||||
Function Explode
|
||||
# Initialize variables
|
||||
Var /GLOBAL explString
|
||||
Var /GLOBAL explSeparator
|
||||
Var /GLOBAL explStrLen
|
||||
Var /GLOBAL explSepLen
|
||||
Var /GLOBAL explOffset
|
||||
Var /GLOBAL explTmp
|
||||
Var /GLOBAL explTmp2
|
||||
Var /GLOBAL explTmp3
|
||||
Var /GLOBAL explArrCount
|
||||
|
||||
# Get input from user
|
||||
Pop $explString
|
||||
Pop $explSeparator
|
||||
|
||||
# Calculates initial values
|
||||
StrLen $explStrLen $explString
|
||||
StrLen $explSepLen $explSeparator
|
||||
StrCpy $explArrCount 1
|
||||
|
||||
${If} $explStrLen <= 1 # If we got a single character
|
||||
${OrIf} $explSepLen > $explStrLen # or separator is larger than the string,
|
||||
Push $explString # then we return initial string with no change
|
||||
Push 1 # and set array's length to 1
|
||||
Return
|
||||
${EndIf}
|
||||
|
||||
# Set offset to the last symbol of the string
|
||||
StrCpy $explOffset $explStrLen
|
||||
IntOp $explOffset $explOffset - 1
|
||||
|
||||
# Clear temp string to exclude the possibility of appearance of occasional data
|
||||
StrCpy $explTmp ""
|
||||
StrCpy $explTmp2 ""
|
||||
StrCpy $explTmp3 ""
|
||||
|
||||
# Loop until the offset becomes negative
|
||||
${Do}
|
||||
# If offset becomes negative, it is time to leave the function
|
||||
${IfThen} $explOffset == -1 ${|} ${ExitDo} ${|}
|
||||
|
||||
# Remove everything before and after the searched part ("TempStr")
|
||||
StrCpy $explTmp $explString $explSepLen $explOffset
|
||||
|
||||
${If} $explTmp == $explSeparator
|
||||
# Calculating offset to start copy from
|
||||
IntOp $explTmp2 $explOffset + $explSepLen # Offset equals to the current offset plus length of separator
|
||||
StrCpy $explTmp3 $explString "" $explTmp2
|
||||
|
||||
Push $explTmp3 # Throwing array item to the stack
|
||||
IntOp $explArrCount $explArrCount + 1 # Increasing array's counter
|
||||
|
||||
StrCpy $explString $explString $explOffset 0 # Cutting all characters beginning with the separator entry
|
||||
StrLen $explStrLen $explString
|
||||
${EndIf}
|
||||
|
||||
${If} $explOffset = 0 # If the beginning of the line met and there is no separator,
|
||||
# copying the rest of the string
|
||||
${If} $explSeparator == "" # Fix for the empty separator
|
||||
IntOp $explArrCount $explArrCount - 1
|
||||
${Else}
|
||||
Push $explString
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
|
||||
IntOp $explOffset $explOffset - 1
|
||||
${Loop}
|
||||
|
||||
Push $explArrCount
|
||||
FunctionEnd
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# StrStr Function
|
||||
# - find substring in a string
|
||||
@ -816,6 +1031,9 @@ FunctionEnd
|
||||
###############################################################################
|
||||
Function getMinionConfig
|
||||
|
||||
# Set Config Found Default Value
|
||||
StrCpy $ExistingConfigFound 0
|
||||
|
||||
confFind:
|
||||
IfFileExists "$INSTDIR\conf\minion" confFound confNotFound
|
||||
|
||||
@ -828,24 +1046,42 @@ Function getMinionConfig
|
||||
${EndIf}
|
||||
|
||||
confFound:
|
||||
StrCpy $ExistingConfigFound 1
|
||||
FileOpen $0 "$INSTDIR\conf\minion" r
|
||||
|
||||
ClearErrors
|
||||
confLoop:
|
||||
FileRead $0 $1
|
||||
IfErrors EndOfFile
|
||||
${StrLoc} $2 $1 "master:" ">"
|
||||
${If} $2 == 0
|
||||
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0"
|
||||
${Trim} $2 $2
|
||||
StrCpy $MasterHost_State $2
|
||||
ClearErrors # Clear Errors
|
||||
FileRead $0 $1 # Read the next line
|
||||
IfErrors EndOfFile # Error is probably EOF
|
||||
${StrLoc} $2 $1 "master:" ">" # Find `master:` starting at the beginning
|
||||
${If} $2 == 0 # If it found it in the first position, then it is defined
|
||||
${StrStrAdv} $2 $1 "master: " ">" ">" "0" "0" "0" # Read everything after `master: `
|
||||
${Trim} $2 $2 # Trim white space
|
||||
${If} $2 == "" # If it's empty, it's probably a list
|
||||
masterLoop:
|
||||
ClearErrors # Clear Errors
|
||||
FileRead $0 $1 # Read the next line
|
||||
IfErrors EndOfFile # Error is probably EOF
|
||||
${StrStrAdv} $2 $1 "- " ">" ">" "0" "0" "0" # Read everything after `- `
|
||||
${Trim} $2 $2 # Trim white space
|
||||
${IfNot} $2 == "" # If it's not empty, we found something
|
||||
${If} $ConfigMasterHost == "" # Is the default `salt` there
|
||||
StrCpy $ConfigMasterHost $2 # If so, make the first item the new entry
|
||||
${Else}
|
||||
StrCpy $ConfigMasterHost "$ConfigMasterHost,$2" # Append the new master, comma separated
|
||||
${EndIf}
|
||||
Goto masterLoop # Check the next one
|
||||
${EndIf}
|
||||
${Else}
|
||||
StrCpy $ConfigMasterHost $2 # A single master entry
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
|
||||
${StrLoc} $2 $1 "id:" ">"
|
||||
${If} $2 == 0
|
||||
${StrStrAdv} $2 $1 "id: " ">" ">" "0" "0" "0"
|
||||
${Trim} $2 $2
|
||||
StrCpy $MinionName_State $2
|
||||
StrCpy $ConfigMinionName $2
|
||||
${EndIf}
|
||||
|
||||
Goto confLoop
|
||||
@ -855,6 +1091,14 @@ Function getMinionConfig
|
||||
|
||||
confReallyNotFound:
|
||||
|
||||
# Set Default Config Values if not found
|
||||
${If} $ConfigMasterHost == ""
|
||||
StrCpy $ConfigMasterHost "salt"
|
||||
${EndIf}
|
||||
${If} $ConfigMinionName == ""
|
||||
StrCpy $ConfigMinionName "hostname"
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
||||
|
||||
@ -874,7 +1118,22 @@ Function updateMinionConfig
|
||||
${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line
|
||||
${If} $3 == 0 # is it in the first...
|
||||
${OrIf} $3 == 1 # or second position (account for comments)
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
|
||||
${Explode} $9 "," $MasterHost_state # Split the hostname on commas, $9 is the number of items found
|
||||
${If} $9 == 1 # 1 means only a single master was passed
|
||||
StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master
|
||||
${Else} # Make a multi-master entry
|
||||
StrCpy $2 "master:" # Make the first line "master:"
|
||||
|
||||
loop_explode: # Start a loop to go through the list in the config
|
||||
pop $8 # Pop the next item off the stack
|
||||
${Trim} $8 $8 # Trim any whitespace
|
||||
StrCpy $2 "$2$\r$\n - $8" # Add it to the master variable ($2)
|
||||
IntOp $9 $9 - 1 # Decrement the list count
|
||||
${If} $9 >= 1 # If it's not 0
|
||||
Goto loop_explode # Do it again
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
${EndIf} # close if statement
|
||||
|
||||
@ -905,6 +1164,67 @@ Function parseCommandLineSwitches
|
||||
# Load the parameters
|
||||
${GetParameters} $R0
|
||||
|
||||
# Display Help
|
||||
ClearErrors
|
||||
${GetOptions} $R0 "/?" $R1
|
||||
IfErrors display_help_not_found
|
||||
|
||||
System::Call 'kernel32::GetStdHandle(i -11)i.r0'
|
||||
System::Call 'kernel32::AttachConsole(i -1)i.r1'
|
||||
${If} $0 = 0
|
||||
${OrIf} $1 = 0
|
||||
System::Call 'kernel32::AllocConsole()'
|
||||
System::Call 'kernel32::GetStdHandle(i -11)i.r0'
|
||||
${EndIf}
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "Help for Salt Minion installation$\n"
|
||||
FileWrite $0 "===============================================================================$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/minion-name=$\t$\tA string value to set the minion name. Default is$\n"
|
||||
FileWrite $0 "$\t$\t$\t'hostname'. Setting the minion name will replace$\n"
|
||||
FileWrite $0 "$\t$\t$\texisting config with a default config. Cannot be$\n"
|
||||
FileWrite $0 "$\t$\t$\tused in conjunction with /use-existing-config=1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/master=$\t$\tA string value to set the IP address or hostname of$\n"
|
||||
FileWrite $0 "$\t$\t$\tthe master. Default value is 'salt'. You may pass a$\n"
|
||||
FileWrite $0 "$\t$\t$\tsingle master, or a comma separated list of masters.$\n"
|
||||
FileWrite $0 "$\t$\t$\tSetting the master will replace existing config with$\n"
|
||||
FileWrite $0 "$\t$\t$\ta default config. Cannot be used in conjunction with$\n"
|
||||
FileWrite $0 "$\t$\t$\t/use-existing-config=1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/start-minion=$\t$\t1 will start the service, 0 will not. Default is 1$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/start-minion-delayed$\tSet the minion start type to 'Automatic (Delayed Start)'$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/use-existing-config=$\t1 will use the existing config if present, 0 will$\n"
|
||||
FileWrite $0 "$\t$\t$\treplace existing config with a default config. Default$\n"
|
||||
FileWrite $0 "$\t$\t$\tis 1. If this is set to 1, values passed in$\n"
|
||||
FileWrite $0 "$\t$\t$\t/minion-name and /master will be ignored$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/S$\t$\t$\tInstall Salt silently$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "/?$\t$\t$\tDisplay this help screen$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "-------------------------------------------------------------------------------$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "Examples:$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "${OutFile} /S$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "${OutFile} /S /minion-name=myminion /master=master.mydomain.com /start-minion-delayed$\n"
|
||||
FileWrite $0 "$\n"
|
||||
FileWrite $0 "===============================================================================$\n"
|
||||
FileWrite $0 "Press Enter to continue..."
|
||||
System::Free $0
|
||||
System::Free $1
|
||||
System::Call 'kernel32::FreeConsole()'
|
||||
Abort
|
||||
display_help_not_found:
|
||||
|
||||
# Set default value for Use Existing Config
|
||||
StrCpy $UseExistingConfig_State 1
|
||||
|
||||
# Check for start-minion switches
|
||||
# /start-service is to be deprecated, so we must check for both
|
||||
${GetOptions} $R0 "/start-service=" $R1
|
||||
@ -930,19 +1250,31 @@ Function parseCommandLineSwitches
|
||||
start_minion_delayed_not_found:
|
||||
|
||||
# Minion Config: Master IP/Name
|
||||
# If setting master, we don't want to use existing config
|
||||
${GetOptions} $R0 "/master=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
StrCpy $MasterHost_State $R1
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
${ElseIf} $MasterHost_State == ""
|
||||
StrCpy $MasterHost_State "salt"
|
||||
${EndIf}
|
||||
|
||||
# Minion Config: Minion ID
|
||||
# If setting minion id, we don't want to use existing config
|
||||
${GetOptions} $R0 "/minion-name=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
StrCpy $MinionName_State $R1
|
||||
StrCpy $UseExistingConfig_State 0
|
||||
${ElseIf} $MinionName_State == ""
|
||||
StrCpy $MinionName_State "hostname"
|
||||
${EndIf}
|
||||
|
||||
# Use Existing Config
|
||||
# Overrides above settings with user passed settings
|
||||
${GetOptions} $R0 "/use-existing-config=" $R1
|
||||
${IfNot} $R1 == ""
|
||||
# Use Existing Config was passed something, set it
|
||||
StrCpy $UseExistingConfig_State $R1
|
||||
${EndIf}
|
||||
|
||||
FunctionEnd
|
||||
|
@ -6,8 +6,6 @@ This system allows for authentication to be managed in a module pluggable way
|
||||
so that any external authentication system can be used inside of Salt
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# 1. Create auth loader instance
|
||||
# 2. Accept arguments as a dict
|
||||
# 3. Verify with function introspection
|
||||
@ -16,7 +14,7 @@ from __future__ import absolute_import
|
||||
# 6. Interface to verify tokens
|
||||
|
||||
# Import python libs
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import, print_function
|
||||
import collections
|
||||
import time
|
||||
import logging
|
||||
@ -31,6 +29,7 @@ import salt.transport.client
|
||||
import salt.utils.args
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.files
|
||||
import salt.utils.master
|
||||
import salt.utils.minions
|
||||
import salt.utils.user
|
||||
import salt.utils.versions
|
||||
@ -430,13 +429,26 @@ class LoadAuth(object):
|
||||
|
||||
auth_list = self.get_auth_list(load)
|
||||
elif auth_type == 'user':
|
||||
if not self.authenticate_key(load, key):
|
||||
auth_ret = self.authenticate_key(load, key)
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
if not auth_ret: # auth_ret can be a boolean or the effective user id
|
||||
if show_username:
|
||||
msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username)
|
||||
else:
|
||||
msg = 'Authentication failure of type "user" occurred'
|
||||
msg = '{0} for user {1}.'.format(msg, username)
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
|
||||
# Verify that the caller has root on master
|
||||
if auth_ret is not True:
|
||||
if AuthUser(load['user']).is_sudo():
|
||||
if not self.opts['sudo_acl'] or not self.opts['publisher_acl']:
|
||||
auth_ret = True
|
||||
|
||||
if auth_ret is not True:
|
||||
auth_list = salt.utils.master.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'], auth_ret)
|
||||
if not auth_list:
|
||||
ret['error'] = {'name': 'UserAuthenticationError', 'message': msg}
|
||||
return ret
|
||||
else:
|
||||
ret['error'] = {'name': 'SaltInvocationError',
|
||||
'message': 'Authentication type not supported.'}
|
||||
|
@ -199,13 +199,42 @@ class Beacon(object):
|
||||
else:
|
||||
self.opts['beacons'][name].append({'enabled': enabled_value})
|
||||
|
||||
def list_beacons(self):
|
||||
def _get_beacons(self,
|
||||
include_opts=True,
|
||||
include_pillar=True):
|
||||
'''
|
||||
Return the beacons data structure
|
||||
'''
|
||||
beacons = {}
|
||||
if include_pillar:
|
||||
pillar_beacons = self.opts.get('pillar', {}).get('beacons', {})
|
||||
if not isinstance(pillar_beacons, dict):
|
||||
raise ValueError('Beacons must be of type dict.')
|
||||
beacons.update(pillar_beacons)
|
||||
if include_opts:
|
||||
opts_beacons = self.opts.get('beacons', {})
|
||||
if not isinstance(opts_beacons, dict):
|
||||
raise ValueError('Beacons must be of type dict.')
|
||||
beacons.update(opts_beacons)
|
||||
return beacons
|
||||
|
||||
def list_beacons(self,
|
||||
include_pillar=True,
|
||||
include_opts=True):
|
||||
'''
|
||||
List the beacon items
|
||||
|
||||
include_pillar: Whether to include beacons that are
|
||||
configured in pillar, default is True.
|
||||
|
||||
include_opts: Whether to include beacons that are
|
||||
configured in opts, default is True.
|
||||
'''
|
||||
beacons = self._get_beacons(include_pillar, include_opts)
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': True, 'beacons': beacons},
|
||||
tag='/salt/minion/minion_beacons_list_complete')
|
||||
|
||||
return True
|
||||
@ -236,8 +265,8 @@ class Beacon(object):
|
||||
del beacon_data['enabled']
|
||||
valid, vcomment = self.beacons[validate_str](beacon_data)
|
||||
else:
|
||||
log.info('Beacon %s does not have a validate'
|
||||
' function, skipping validation.', name)
|
||||
vcomment = 'Beacon {0} does not have a validate' \
|
||||
' function, skipping validation.'.format(name)
|
||||
valid = True
|
||||
|
||||
# Fire the complete event back along with the list of beacons
|
||||
@ -257,16 +286,23 @@ class Beacon(object):
|
||||
data = {}
|
||||
data[name] = beacon_data
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Updating settings for beacon '
|
||||
'item: %s', name)
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = 'Cannot update beacon item {0}, ' \
|
||||
'because it is configured in pillar.'.format(name)
|
||||
complete = False
|
||||
else:
|
||||
log.info('Added new beacon item %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
if name in self.opts['beacons']:
|
||||
comment = 'Updating settings for beacon ' \
|
||||
'item: {0}'.format(name)
|
||||
else:
|
||||
comment = 'Added new beacon item: {0}'.format(name)
|
||||
complete = True
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_add_complete')
|
||||
|
||||
return True
|
||||
@ -279,13 +315,20 @@ class Beacon(object):
|
||||
data = {}
|
||||
data[name] = beacon_data
|
||||
|
||||
log.info('Updating settings for beacon '
|
||||
'item: %s', name)
|
||||
self.opts['beacons'].update(data)
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = 'Cannot modify beacon item {0}, ' \
|
||||
'it is configured in pillar.'.format(name)
|
||||
complete = False
|
||||
else:
|
||||
comment = 'Updating settings for beacon ' \
|
||||
'item: {0}'.format(name)
|
||||
complete = True
|
||||
self.opts['beacons'].update(data)
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_modify_complete')
|
||||
|
||||
return True
|
||||
@ -295,13 +338,22 @@ class Beacon(object):
|
||||
Delete a beacon item
|
||||
'''
|
||||
|
||||
if name in self.opts['beacons']:
|
||||
log.info('Deleting beacon item %s', name)
|
||||
del self.opts['beacons'][name]
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = 'Cannot delete beacon item {0}, ' \
|
||||
'it is configured in pillar.'.format(name)
|
||||
complete = False
|
||||
else:
|
||||
if name in self.opts['beacons']:
|
||||
del self.opts['beacons'][name]
|
||||
comment = 'Deleting beacon item: {0}'.format(name)
|
||||
else:
|
||||
comment = 'Beacon item {0} not found.'.format(name)
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_delete_complete')
|
||||
|
||||
return True
|
||||
@ -339,11 +391,19 @@ class Beacon(object):
|
||||
Enable a beacon
|
||||
'''
|
||||
|
||||
self._update_enabled(name, True)
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = 'Cannot enable beacon item {0}, ' \
|
||||
'it is configured in pillar.'.format(name)
|
||||
complete = False
|
||||
else:
|
||||
self._update_enabled(name, True)
|
||||
comment = 'Enabling beacon item {0}'.format(name)
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_enabled_complete')
|
||||
|
||||
return True
|
||||
@ -353,11 +413,19 @@ class Beacon(object):
|
||||
Disable a beacon
|
||||
'''
|
||||
|
||||
self._update_enabled(name, False)
|
||||
if name in self._get_beacons(include_opts=False):
|
||||
comment = 'Cannot disable beacon item {0}, ' \
|
||||
'it is configured in pillar.'.format(name)
|
||||
complete = False
|
||||
else:
|
||||
self._update_enabled(name, False)
|
||||
comment = 'Disabling beacon item {0}'.format(name)
|
||||
complete = True
|
||||
|
||||
# Fire the complete event back along with updated list of beacons
|
||||
evt = salt.utils.event.get_event('minion', opts=self.opts)
|
||||
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_disabled_complete')
|
||||
|
||||
return True
|
||||
|
@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Send events covering service status
|
||||
Send events covering process status
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
|
@ -3,6 +3,8 @@
|
||||
Beacon to monitor temperature, humidity and pressure using the SenseHat
|
||||
of a Raspberry Pi.
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
:maintainer: Benedikt Werner <1benediktwerner@gmail.com>
|
||||
:maturity: new
|
||||
:depends: sense_hat Python module
|
||||
|
@ -1595,7 +1595,10 @@ class LocalClient(object):
|
||||
timeout=timeout,
|
||||
tgt=tgt,
|
||||
tgt_type=tgt_type,
|
||||
expect_minions=(verbose or show_timeout),
|
||||
# (gtmanfred) expect_minions is popped here incase it is passed from a client
|
||||
# call. If this is not popped, then it would be passed twice to
|
||||
# get_iter_returns.
|
||||
expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),
|
||||
**kwargs
|
||||
):
|
||||
log.debug(u'return event: %s', ret)
|
||||
|
@ -1417,7 +1417,7 @@ class Cloud(object):
|
||||
if name in vms:
|
||||
prov = vms[name]['provider']
|
||||
driv = vms[name]['driver']
|
||||
msg = six.u('{0} already exists under {1}:{2}').format(
|
||||
msg = u'{0} already exists under {1}:{2}'.format(
|
||||
name, prov, driv
|
||||
)
|
||||
log.error(msg)
|
||||
|
@ -2080,6 +2080,7 @@ def attach_disk(name=None, kwargs=None, call=None):
|
||||
disk_name = kwargs['disk_name']
|
||||
mode = kwargs.get('mode', 'READ_WRITE').upper()
|
||||
boot = kwargs.get('boot', False)
|
||||
auto_delete = kwargs.get('auto_delete', False)
|
||||
if boot and boot.lower() in ['true', 'yes', 'enabled']:
|
||||
boot = True
|
||||
else:
|
||||
@ -2109,7 +2110,8 @@ def attach_disk(name=None, kwargs=None, call=None):
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot)
|
||||
result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot,
|
||||
ex_auto_delete=auto_delete)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
@ -2389,6 +2391,8 @@ def create_attach_volumes(name, kwargs, call=None):
|
||||
'type': The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard.
|
||||
'image': An image to use for this new disk. Optional.
|
||||
'snapshot': A snapshot to use for this new disk. Optional.
|
||||
'auto_delete': An option(bool) to keep or remove the disk upon
|
||||
instance deletion. Optional, defaults to False.
|
||||
|
||||
Volumes are attached in the order in which they are given, thus on a new
|
||||
node the first volume will be /dev/sdb, the second /dev/sdc, and so on.
|
||||
@ -2416,7 +2420,8 @@ def create_attach_volumes(name, kwargs, call=None):
|
||||
'size': volume['size'],
|
||||
'type': volume.get('type', 'pd-standard'),
|
||||
'image': volume.get('image', None),
|
||||
'snapshot': volume.get('snapshot', None)
|
||||
'snapshot': volume.get('snapshot', None),
|
||||
'auto_delete': volume.get('auto_delete', False)
|
||||
}
|
||||
|
||||
create_disk(volume_dict, 'function')
|
||||
|
@ -465,18 +465,54 @@ def create(vm_):
|
||||
|
||||
return ret
|
||||
except Exception as e: # pylint: disable=broad-except
|
||||
# Try to clean up in as much cases as possible
|
||||
log.info('Cleaning up after exception clean up items: {0}'.format(cleanup))
|
||||
for leftover in cleanup:
|
||||
what = leftover['what']
|
||||
item = leftover['item']
|
||||
if what == 'domain':
|
||||
destroy_domain(conn, item)
|
||||
if what == 'volume':
|
||||
item.delete()
|
||||
do_cleanup(cleanup)
|
||||
# throw the root cause after cleanup
|
||||
raise e
|
||||
|
||||
|
||||
def do_cleanup(cleanup):
|
||||
'''
|
||||
Clean up clone domain leftovers as much as possible.
|
||||
|
||||
Extra robust clean up in order to deal with some small changes in libvirt
|
||||
behavior over time. Passed in volumes and domains are deleted, any errors
|
||||
are ignored. Used when cloning/provisioning a domain fails.
|
||||
|
||||
:param cleanup: list containing dictonaries with two keys: 'what' and 'item'.
|
||||
If 'what' is domain the 'item' is a libvirt domain object.
|
||||
If 'what' is volume then the item is a libvirt volume object.
|
||||
|
||||
Returns:
|
||||
none
|
||||
|
||||
.. versionadded: 2017.7.3
|
||||
'''
|
||||
log.info('Cleaning up after exception')
|
||||
for leftover in cleanup:
|
||||
what = leftover['what']
|
||||
item = leftover['item']
|
||||
if what == 'domain':
|
||||
log.info('Cleaning up {0} {1}'.format(what, item.name()))
|
||||
try:
|
||||
item.destroy()
|
||||
log.debug('{0} {1} forced off'.format(what, item.name()))
|
||||
except libvirtError:
|
||||
pass
|
||||
try:
|
||||
item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+
|
||||
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+
|
||||
libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
|
||||
log.debug('{0} {1} undefined'.format(what, item.name()))
|
||||
except libvirtError:
|
||||
pass
|
||||
if what == 'volume':
|
||||
try:
|
||||
item.delete()
|
||||
log.debug('{0} {1} cleaned up'.format(what, item.name()))
|
||||
except libvirtError:
|
||||
pass
|
||||
|
||||
|
||||
def destroy(name, call=None):
|
||||
"""
|
||||
This function irreversibly destroys a virtual machine on the cloud provider.
|
||||
|
@ -4572,7 +4572,8 @@ def _list_nodes(full=False):
|
||||
pass
|
||||
|
||||
vms[name]['id'] = vm.find('ID').text
|
||||
vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text
|
||||
if vm.find('TEMPLATE').find('TEMPLATE_ID'):
|
||||
vms[name]['image'] = vm.find('TEMPLATE').find('TEMPLATE_ID').text
|
||||
vms[name]['name'] = name
|
||||
vms[name]['size'] = {'cpu': cpu_size, 'memory': memory_size}
|
||||
vms[name]['state'] = vm.find('STATE').text
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
.. _`saltify-module`:
|
||||
|
||||
Saltify Module
|
||||
==============
|
||||
|
||||
@ -7,6 +9,9 @@ The Saltify module is designed to install Salt on a remote machine, virtual or
|
||||
bare metal, using SSH. This module is useful for provisioning machines which
|
||||
are already installed, but not Salted.
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
The wake_on_lan capability, and actions destroy, reboot, and query functions were added.
|
||||
|
||||
Use of this module requires some configuration in cloud profile and provider
|
||||
files as described in the
|
||||
:ref:`Gettting Started with Saltify <getting-started-with-saltify>` documentation.
|
||||
@ -15,11 +20,12 @@ files as described in the
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.cloud
|
||||
import salt.config as config
|
||||
import salt.netapi
|
||||
import salt.client
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
@ -32,6 +38,7 @@ from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
# noinspection PyUnresolvedReferences
|
||||
from impacket.smbconnection import SessionError as smbSessionError
|
||||
from impacket.smb3 import SessionError as smb3SessionError
|
||||
HAS_IMPACKET = True
|
||||
@ -39,7 +46,9 @@ except ImportError:
|
||||
HAS_IMPACKET = False
|
||||
|
||||
try:
|
||||
# noinspection PyUnresolvedReferences
|
||||
from winrm.exceptions import WinRMTransportError
|
||||
# noinspection PyUnresolvedReferences
|
||||
from requests.exceptions import (
|
||||
ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
|
||||
ProxyError, RetryError, InvalidSchema)
|
||||
@ -55,24 +64,6 @@ def __virtual__():
|
||||
return True
|
||||
|
||||
|
||||
def _get_connection_info():
|
||||
'''
|
||||
Return connection information for the passed VM data
|
||||
'''
|
||||
vm_ = get_configured_provider()
|
||||
|
||||
try:
|
||||
ret = {'username': vm_['username'],
|
||||
'password': vm_['password'],
|
||||
'eauth': vm_['eauth'],
|
||||
'vm': vm_,
|
||||
}
|
||||
except KeyError:
|
||||
raise SaltCloudException(
|
||||
'Configuration must define salt-api "username", "password" and "eauth"')
|
||||
return ret
|
||||
|
||||
|
||||
def avail_locations(call=None):
|
||||
'''
|
||||
This function returns a list of locations available.
|
||||
@ -81,7 +72,7 @@ def avail_locations(call=None):
|
||||
|
||||
salt-cloud --list-locations my-cloud-provider
|
||||
|
||||
[ saltify will always returns an empty dictionary ]
|
||||
[ saltify will always return an empty dictionary ]
|
||||
'''
|
||||
|
||||
return {}
|
||||
@ -127,8 +118,6 @@ def list_nodes(call=None):
|
||||
|
||||
returns a list of dictionaries of defined standard fields.
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
|
||||
'''
|
||||
@ -172,8 +161,8 @@ def list_nodes_full(call=None):
|
||||
salt-cloud -F
|
||||
|
||||
returns a list of dictionaries.
|
||||
|
||||
for 'saltify' minions, returns dict of grains (enhanced).
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
'''
|
||||
@ -200,16 +189,9 @@ def _list_nodes_full(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'saltify' minions, return dict of grains.
|
||||
'''
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': 'salt-cloud:driver:saltify',
|
||||
'fun': 'grains.items',
|
||||
'arg': '',
|
||||
'tgt_type': 'grain',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
|
||||
return local.run(cmd)
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd('salt-cloud:driver:saltify', 'grains.items', '',
|
||||
tgt_type='grain')
|
||||
|
||||
|
||||
def list_nodes_select(call=None):
|
||||
@ -226,27 +208,69 @@ def show_instance(name, call=None):
|
||||
'''
|
||||
List the a single node, return dict of grains.
|
||||
'''
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': 'name',
|
||||
'fun': 'grains.items',
|
||||
'arg': '',
|
||||
'tgt_type': 'glob',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
ret = local.run(cmd)
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(name, 'grains.items')
|
||||
ret.update(_build_required_items(ret))
|
||||
return ret
|
||||
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
Provision a single machine
|
||||
if configuration parameter ``deploy`` is ``True``,
|
||||
|
||||
Provision a single machine, adding its keys to the salt master
|
||||
|
||||
else,
|
||||
|
||||
Test ssh connections to the machine
|
||||
|
||||
Configuration parameters:
|
||||
|
||||
- deploy: (see above)
|
||||
- provider: name of entry in ``salt/cloud.providers.d/???`` file
|
||||
- ssh_host: IP address or DNS name of the new machine
|
||||
- ssh_username: name used to log in to the new machine
|
||||
- ssh_password: password to log in (unless key_filename is used)
|
||||
- key_filename: (optional) SSH private key for passwordless login
|
||||
- ssh_port: (default=22) TCP port for SSH connection
|
||||
- wake_on_lan_mac: (optional) hardware (MAC) address for wake on lan
|
||||
- wol_sender_node: (optional) salt minion to send wake on lan command
|
||||
- wol_boot_wait: (default=30) seconds to delay while client boots
|
||||
- force_minion_config: (optional) replace the minion configuration files on the new machine
|
||||
|
||||
See also
|
||||
:ref:`Miscellaneous Salt Cloud Options <misc-salt-cloud-options>`
|
||||
and
|
||||
:ref:`Getting Started with Saltify <getting-started-with-saltify>`
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -p mymachine my_new_id
|
||||
'''
|
||||
deploy_config = config.get_cloud_config_value(
|
||||
'deploy', vm_, __opts__, default=False)
|
||||
|
||||
if deploy_config:
|
||||
wol_mac = config.get_cloud_config_value(
|
||||
'wake_on_lan_mac', vm_, __opts__, default='')
|
||||
wol_host = config.get_cloud_config_value(
|
||||
'wol_sender_node', vm_, __opts__, default='')
|
||||
if wol_mac and wol_host:
|
||||
log.info('sending wake-on-lan to %s using node %s',
|
||||
wol_mac, wol_host)
|
||||
local = salt.client.LocalClient()
|
||||
if isinstance(wol_mac, six.string_types):
|
||||
wol_mac = [wol_mac] # a smart user may have passed more params
|
||||
ret = local.cmd(wol_host, 'network.wol', wol_mac)
|
||||
log.info('network.wol returned value %s', ret)
|
||||
if ret and ret[wol_host]:
|
||||
sleep_time = config.get_cloud_config_value(
|
||||
'wol_boot_wait', vm_, __opts__, default=30)
|
||||
if sleep_time > 0.0:
|
||||
log.info('delaying %d seconds for boot', sleep_time)
|
||||
time.sleep(sleep_time)
|
||||
log.info('Provisioning existing machine %s', vm_['name'])
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
else:
|
||||
@ -365,14 +389,21 @@ def destroy(name, call=None):
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Disconnect a minion from the master, and remove its keys.
|
||||
|
||||
Optionally, (if ``remove_config_on_destroy`` is ``True``),
|
||||
disables salt-minion from running on the minion, and
|
||||
erases the Salt configuration files from it.
|
||||
|
||||
Optionally, (if ``shutdown_on_destroy`` is ``True``),
|
||||
orders the minion to halt.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --destroy mymachine
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
'''
|
||||
if call == 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
@ -391,15 +422,9 @@ def destroy(name, call=None):
|
||||
transport=opts['transport']
|
||||
)
|
||||
|
||||
local = salt.netapi.NetapiClient(opts)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': name,
|
||||
'fun': 'grains.get',
|
||||
'arg': ['salt-cloud'],
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
vm_ = cmd['vm']
|
||||
my_info = local.run(cmd)
|
||||
vm_ = get_configured_provider()
|
||||
local = salt.client.LocalClient()
|
||||
my_info = local.cmd(name, 'grains.get', ['salt-cloud'])
|
||||
try:
|
||||
vm_.update(my_info[name]) # get profile name to get config value
|
||||
except (IndexError, TypeError):
|
||||
@ -407,25 +432,22 @@ def destroy(name, call=None):
|
||||
if config.get_cloud_config_value(
|
||||
'remove_config_on_destroy', vm_, opts, default=True
|
||||
):
|
||||
cmd.update({'fun': 'service.disable', 'arg': ['salt-minion']})
|
||||
ret = local.run(cmd) # prevent generating new keys on restart
|
||||
ret = local.cmd(name, # prevent generating new keys on restart
|
||||
'service.disable',
|
||||
['salt-minion'])
|
||||
if ret and ret[name]:
|
||||
log.info('disabled salt-minion service on %s', name)
|
||||
cmd.update({'fun': 'config.get', 'arg': ['conf_file']})
|
||||
ret = local.run(cmd)
|
||||
ret = local.cmd(name, 'config.get', ['conf_file'])
|
||||
if ret and ret[name]:
|
||||
confile = ret[name]
|
||||
cmd.update({'fun': 'file.remove', 'arg': [confile]})
|
||||
ret = local.run(cmd)
|
||||
ret = local.cmd(name, 'file.remove', [confile])
|
||||
if ret and ret[name]:
|
||||
log.info('removed minion %s configuration file %s',
|
||||
name, confile)
|
||||
cmd.update({'fun': 'config.get', 'arg': ['pki_dir']})
|
||||
ret = local.run(cmd)
|
||||
ret = local.cmd(name, 'config.get', ['pki_dir'])
|
||||
if ret and ret[name]:
|
||||
pki_dir = ret[name]
|
||||
cmd.update({'fun': 'file.remove', 'arg': [pki_dir]})
|
||||
ret = local.run(cmd)
|
||||
ret = local.cmd(name, 'file.remove', [pki_dir])
|
||||
if ret and ret[name]:
|
||||
log.info(
|
||||
'removed minion %s key files in %s',
|
||||
@ -435,8 +457,7 @@ def destroy(name, call=None):
|
||||
if config.get_cloud_config_value(
|
||||
'shutdown_on_destroy', vm_, opts, default=False
|
||||
):
|
||||
cmd.update({'fun': 'system.shutdown', 'arg': ''})
|
||||
ret = local.run(cmd)
|
||||
ret = local.cmd(name, 'system.shutdown')
|
||||
if ret and ret[name]:
|
||||
log.info('system.shutdown for minion %s successful', name)
|
||||
|
||||
@ -456,8 +477,6 @@ def reboot(name, call=None):
|
||||
'''
|
||||
Reboot a saltify minion.
|
||||
|
||||
salt-api setup required for operation.
|
||||
|
||||
..versionadded:: Oxygen
|
||||
|
||||
name
|
||||
@ -475,13 +494,5 @@ def reboot(name, call=None):
|
||||
'The reboot action must be called with -a or --action.'
|
||||
)
|
||||
|
||||
local = salt.netapi.NetapiClient(__opts__)
|
||||
cmd = {'client': 'local',
|
||||
'tgt': name,
|
||||
'fun': 'system.reboot',
|
||||
'arg': '',
|
||||
}
|
||||
cmd.update(_get_connection_info())
|
||||
ret = local.run(cmd)
|
||||
|
||||
return ret
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd(name, 'system.reboot')
|
||||
|
@ -371,6 +371,12 @@ def create(vm_):
|
||||
if post_uri:
|
||||
kwargs['postInstallScriptUri'] = post_uri
|
||||
|
||||
dedicated_host_id = config.get_cloud_config_value(
|
||||
'dedicated_host_id', vm_, __opts__, default=None
|
||||
)
|
||||
if dedicated_host_id:
|
||||
kwargs['dedicatedHost'] = {'id': dedicated_host_id}
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'requesting instance',
|
||||
|
338
salt/cloud/clouds/vagrant.py
Normal file
338
salt/cloud/clouds/vagrant.py
Normal file
@ -0,0 +1,338 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Vagrant Cloud Driver
|
||||
====================
|
||||
|
||||
The Vagrant cloud is designed to "vagrant up" a virtual machine as a
|
||||
Salt minion.
|
||||
|
||||
Use of this module requires some configuration in cloud profile and provider
|
||||
files as described in the
|
||||
:ref:`Getting Started with Vagrant <getting-started-with-vagrant>` documentation.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
import salt.client
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Needs no special configuration
|
||||
'''
|
||||
return True
|
||||
|
||||
|
||||
def avail_locations(call=None):
|
||||
r'''
|
||||
This function returns a list of locations available.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-locations my-cloud-provider
|
||||
|
||||
# \[ vagrant will always returns an empty dictionary \]
|
||||
|
||||
'''
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def avail_images(call=None):
|
||||
'''This function returns a list of images available for this cloud provider.
|
||||
vagrant will return a list of profiles.
|
||||
salt-cloud --list-images my-cloud-provider
|
||||
'''
|
||||
vm_ = get_configured_provider()
|
||||
return {'Profiles': [profile for profile in vm_['profiles']]}
|
||||
|
||||
|
||||
def avail_sizes(call=None):
|
||||
r'''
|
||||
This function returns a list of sizes available for this cloud provider.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-sizes my-cloud-provider
|
||||
|
||||
# \[ vagrant always returns an empty dictionary \]
|
||||
|
||||
'''
|
||||
return {}
|
||||
|
||||
|
||||
def list_nodes(call=None):
|
||||
'''
|
||||
List the nodes which have salt-cloud:driver:vagrant grains.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -Q
|
||||
'''
|
||||
nodes = _list_nodes(call)
|
||||
return _build_required_items(nodes)
|
||||
|
||||
|
||||
def _build_required_items(nodes):
|
||||
ret = {}
|
||||
for name, grains in nodes.items():
|
||||
if grains:
|
||||
private_ips = []
|
||||
public_ips = []
|
||||
ips = grains['ipv4'] + grains['ipv6']
|
||||
for adrs in ips:
|
||||
ip_ = ipaddress.ip_address(adrs)
|
||||
if not ip_.is_loopback:
|
||||
if ip_.is_private:
|
||||
private_ips.append(adrs)
|
||||
else:
|
||||
public_ips.append(adrs)
|
||||
|
||||
ret[name] = {
|
||||
'id': grains['id'],
|
||||
'image': grains['salt-cloud']['profile'],
|
||||
'private_ips': private_ips,
|
||||
'public_ips': public_ips,
|
||||
'size': '',
|
||||
'state': 'running'
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_full(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'vagrant' minions, return dict of grains (enhanced).
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call -F
|
||||
'''
|
||||
ret = _list_nodes(call)
|
||||
|
||||
for key, grains in ret.items(): # clean up some hyperverbose grains -- everything is too much
|
||||
try:
|
||||
del grains['cpu_flags'], grains['disks'], grains['pythonpath'], grains['dns'], grains['gpus']
|
||||
except KeyError:
|
||||
pass # ignore absence of things we are eliminating
|
||||
except TypeError:
|
||||
del ret[key] # eliminate all reference to unexpected (None) values.
|
||||
|
||||
reqs = _build_required_items(ret)
|
||||
for name in ret:
|
||||
ret[name].update(reqs[name])
|
||||
return ret
|
||||
|
||||
|
||||
def _list_nodes(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'vagrant' minions, return dict of grains.
|
||||
'''
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd('salt-cloud:driver:vagrant', 'grains.items', '', tgt_type='grain')
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_select(call=None):
|
||||
'''
|
||||
Return a list of the minions that have salt-cloud grains, with
|
||||
select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
||||
def show_instance(name, call=None):
|
||||
'''
|
||||
List the a single node, return dict of grains.
|
||||
'''
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(name, 'grains.items', '')
|
||||
reqs = _build_required_items(ret)
|
||||
ret[name].update(reqs[name])
|
||||
return ret
|
||||
|
||||
|
||||
def _get_my_info(name):
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd(name, 'grains.get', ['salt-cloud'])
|
||||
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
Provision a single machine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
salt-cloud -p my_profile new_node_1
|
||||
|
||||
'''
|
||||
name = vm_['name']
|
||||
machine = config.get_cloud_config_value(
|
||||
'machine', vm_, __opts__, default='')
|
||||
vm_['machine'] = machine
|
||||
host = config.get_cloud_config_value(
|
||||
'host', vm_, __opts__, default=NotImplemented)
|
||||
vm_['cwd'] = config.get_cloud_config_value(
|
||||
'cwd', vm_, __opts__, default='/')
|
||||
vm_['runas'] = config.get_cloud_config_value(
|
||||
'vagrant_runas', vm_, __opts__, default=os.getenv('SUDO_USER'))
|
||||
vm_['timeout'] = config.get_cloud_config_value(
|
||||
'vagrant_up_timeout', vm_, __opts__, default=300)
|
||||
vm_['vagrant_provider'] = config.get_cloud_config_value(
|
||||
'vagrant_provider', vm_, __opts__, default='')
|
||||
vm_['grains'] = {'salt-cloud:vagrant': {'host': host, 'machine': machine}}
|
||||
|
||||
log.info('sending \'vagrant.init %s machine=%s\' command to %s', name, machine, host)
|
||||
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(host, 'vagrant.init', [name], kwarg={'vm': vm_, 'start': True})
|
||||
log.info('response ==> %s', ret[host])
|
||||
|
||||
network_mask = config.get_cloud_config_value(
|
||||
'network_mask', vm_, __opts__, default='')
|
||||
if 'ssh_host' not in vm_:
|
||||
ret = local.cmd(host,
|
||||
'vagrant.get_ssh_config',
|
||||
[name],
|
||||
kwarg={'network_mask': network_mask,
|
||||
'get_private_key': True})[host]
|
||||
with tempfile.NamedTemporaryFile() as pks:
|
||||
if 'private_key' not in vm_ and ret.get('private_key', False):
|
||||
pks.write(ret['private_key'])
|
||||
pks.flush()
|
||||
log.debug('wrote private key to %s', pks.name)
|
||||
vm_['key_filename'] = pks.name
|
||||
if 'ssh_host' not in vm_:
|
||||
vm_.setdefault('ssh_username', ret['ssh_username'])
|
||||
if ret.get('ip_address'):
|
||||
vm_['ssh_host'] = ret['ip_address']
|
||||
else: # if probe failed or not used, use Vagrant's reported ssh info
|
||||
vm_['ssh_host'] = ret['ssh_host']
|
||||
vm_.setdefault('ssh_port', ret['ssh_port'])
|
||||
|
||||
log.info('Provisioning machine %s as node %s using ssh %s',
|
||||
machine, name, vm_['ssh_host'])
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
return ret
|
||||
|
||||
|
||||
def get_configured_provider():
|
||||
'''
|
||||
Return the first configured instance.
|
||||
'''
|
||||
ret = config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or 'vagrant',
|
||||
''
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
def destroy(name, call=None):
|
||||
'''
|
||||
Destroy a node.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --destroy mymachine
|
||||
'''
|
||||
if call == 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The destroy action must be called with -d, --destroy, '
|
||||
'-a, or --action.'
|
||||
)
|
||||
|
||||
opts = __opts__
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
my_info = _get_my_info(name)
|
||||
profile_name = my_info[name]['profile']
|
||||
profile = opts['profiles'][profile_name]
|
||||
host = profile['host']
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(host, 'vagrant.destroy', [name])
|
||||
|
||||
if ret[host]:
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
|
||||
if opts.get('update_cachedir', False) is True:
|
||||
__utils__['cloud.delete_minion_cachedir'](
|
||||
name, __active_provider_name__.split(':')[0], opts)
|
||||
|
||||
return {'Destroyed': '{0} was destroyed.'.format(name)}
|
||||
else:
|
||||
return {'Error': 'Error destroying {}'.format(name)}
|
||||
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
def reboot(name, call=None):
|
||||
'''
|
||||
Reboot a vagrant minion.
|
||||
|
||||
name
|
||||
The name of the VM to reboot.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a reboot vm_name
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudException(
|
||||
'The reboot action must be called with -a or --action.'
|
||||
)
|
||||
my_info = _get_my_info(name)
|
||||
profile_name = my_info[name]['profile']
|
||||
profile = __opts__['profiles'][profile_name]
|
||||
host = profile['host']
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd(host, 'vagrant.reboot', [name])
|
@ -704,7 +704,7 @@ def _manage_devices(devices, vm=None, container_ref=None, new_vm_name=None):
|
||||
network_name = devices['network'][device.deviceInfo.label]['name']
|
||||
adapter_type = devices['network'][device.deviceInfo.label]['adapter_type'] if 'adapter_type' in devices['network'][device.deviceInfo.label] else ''
|
||||
switch_type = devices['network'][device.deviceInfo.label]['switch_type'] if 'switch_type' in devices['network'][device.deviceInfo.label] else ''
|
||||
network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type)
|
||||
network_spec = _edit_existing_network_adapter(device, network_name, adapter_type, switch_type, container_ref)
|
||||
adapter_mapping = _set_network_adapter_mapping(devices['network'][device.deviceInfo.label])
|
||||
device_specs.append(network_spec)
|
||||
nics_map.append(adapter_mapping)
|
||||
@ -2578,7 +2578,7 @@ def create(vm_):
|
||||
config_spec.memoryMB = memory_mb
|
||||
|
||||
if devices:
|
||||
specs = _manage_devices(devices, vm=object_ref, new_vm_name=vm_name)
|
||||
specs = _manage_devices(devices, vm=object_ref, container_ref=container_ref, new_vm_name=vm_name)
|
||||
config_spec.deviceChange = specs['device_specs']
|
||||
|
||||
if extra_config:
|
||||
|
@ -14,6 +14,7 @@ import time
|
||||
import stat
|
||||
|
||||
# Import salt libs
|
||||
import salt.acl
|
||||
import salt.crypt
|
||||
import salt.cache
|
||||
import salt.client
|
||||
@ -69,12 +70,11 @@ def init_git_pillar(opts):
|
||||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=git_pillar.PER_REMOTE_ONLY)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
@ -1176,88 +1176,50 @@ class LocalFuncs(object):
|
||||
)
|
||||
minions = _res['minions']
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get('token', False):
|
||||
# Authenticate
|
||||
token = self.loadauth.authenticate_token(extra)
|
||||
if not token:
|
||||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "token" occurred.')
|
||||
return ''
|
||||
load['user'] = token['name']
|
||||
log.debug('Minion tokenized user = "{0}"'.format(load['user']))
|
||||
elif 'eauth' in extra:
|
||||
# Authenticate.
|
||||
if not self.loadauth.authenticate_eauth(extra):
|
||||
return ''
|
||||
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "eauth" occurred.')
|
||||
return ''
|
||||
load['user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with
|
||||
# Verify that the caller has root on master
|
||||
# Check for external auth calls and authenticate
|
||||
auth_type, err_name, key = self._prep_auth_info(extra)
|
||||
if auth_type == 'user':
|
||||
auth_check = self.loadauth.check_authentication(load, auth_type, key=key)
|
||||
else:
|
||||
auth_ret = self.loadauth.authenticate_key(load, self.key)
|
||||
if auth_ret is False:
|
||||
auth_check = self.loadauth.check_authentication(extra, auth_type)
|
||||
|
||||
# Setup authorization list variable and error information
|
||||
auth_list = auth_check.get('auth_list', [])
|
||||
error = auth_check.get('error')
|
||||
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
|
||||
|
||||
if error:
|
||||
# Authentication error occurred: do not continue.
|
||||
log.warning(err_msg)
|
||||
return ''
|
||||
|
||||
# All Token, Eauth, and non-root users must pass the authorization check
|
||||
if auth_type != 'user' or (auth_type == 'user' and auth_list):
|
||||
# Authorize the request
|
||||
authorized = self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
)
|
||||
|
||||
if not authorized:
|
||||
# Authorization error occurred. Log warning and do not continue.
|
||||
log.warning(err_msg)
|
||||
return ''
|
||||
|
||||
if auth_ret is not True:
|
||||
if salt.auth.AuthUser(load['user']).is_sudo():
|
||||
if not self.opts['sudo_acl'] or not self.opts['publisher_acl']:
|
||||
auth_ret = True
|
||||
|
||||
if auth_ret is not True:
|
||||
# Avoid circular import
|
||||
import salt.utils.master
|
||||
auth_list = salt.utils.master.get_values_of_matching_keys(
|
||||
self.opts['publisher_acl'],
|
||||
auth_ret)
|
||||
if not auth_list:
|
||||
log.warning(
|
||||
'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return ''
|
||||
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
load['fun'],
|
||||
load['arg'],
|
||||
load['tgt'],
|
||||
load.get('tgt_type', 'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=['saltutil.find_job'],
|
||||
):
|
||||
log.warning('Authentication failure of type "user" occurred.')
|
||||
return ''
|
||||
# Perform some specific auth_type tasks after the authorization check
|
||||
if auth_type == 'token':
|
||||
username = auth_check.get('username')
|
||||
load['user'] = username
|
||||
log.debug('Minion tokenized user = "{0}"'.format(username))
|
||||
elif auth_type == 'eauth':
|
||||
# The username we are attempting to auth with
|
||||
load['user'] = self.loadauth.load_name(extra)
|
||||
|
||||
# If we order masters (via a syndic), don't short circuit if no minions
|
||||
# are found
|
||||
|
@ -71,6 +71,15 @@ log = logging.getLogger(__name__)
|
||||
__virtualname__ = 'git'
|
||||
|
||||
|
||||
def _gitfs(init_remotes=True):
|
||||
return salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
init_remotes=init_remotes)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the desired provider module is present and gitfs is enabled
|
||||
@ -79,7 +88,7 @@ def __virtual__():
|
||||
if __virtualname__ not in __opts__['fileserver_backend']:
|
||||
return False
|
||||
try:
|
||||
salt.utils.gitfs.GitFS(__opts__)
|
||||
_gitfs(init_remotes=False)
|
||||
# Initialization of the GitFS object did not fail, so we know we have
|
||||
# valid configuration syntax and that a valid provider was detected.
|
||||
return __virtualname__
|
||||
@ -92,18 +101,14 @@ def clear_cache():
|
||||
'''
|
||||
Completely clear gitfs cache
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
return gitfs.clear_cache()
|
||||
return _gitfs(init_remotes=False).clear_cache()
|
||||
|
||||
|
||||
def clear_lock(remote=None, lock_type='update'):
|
||||
'''
|
||||
Clear update.lk
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.clear_lock(remote=remote, lock_type=lock_type)
|
||||
return _gitfs().clear_lock(remote=remote, lock_type=lock_type)
|
||||
|
||||
|
||||
def lock(remote=None):
|
||||
@ -114,30 +119,21 @@ def lock(remote=None):
|
||||
information, or a pattern. If the latter, then remotes for which the URL
|
||||
matches the pattern will be locked.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.lock(remote=remote)
|
||||
return _gitfs().lock(remote=remote)
|
||||
|
||||
|
||||
def update():
|
||||
'''
|
||||
Execute a git fetch on all of the repos
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
gitfs.update()
|
||||
_gitfs().update()
|
||||
|
||||
|
||||
def envs(ignore_cache=False):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.envs(ignore_cache=ignore_cache)
|
||||
return _gitfs().envs(ignore_cache=ignore_cache)
|
||||
|
||||
|
||||
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
@ -145,10 +141,7 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
Find the first file to match the path and ref, read the file out of git
|
||||
and send the path to the newly cached file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
return _gitfs().find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
|
||||
|
||||
def init():
|
||||
@ -156,29 +149,21 @@ def init():
|
||||
Initialize remotes. This is only used by the master's pre-flight checks,
|
||||
and is not invoked by GitFS.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
_gitfs()
|
||||
|
||||
|
||||
def serve_file(load, fnd):
|
||||
'''
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.serve_file(load, fnd)
|
||||
return _gitfs().serve_file(load, fnd)
|
||||
|
||||
|
||||
def file_hash(load, fnd):
|
||||
'''
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_hash(load, fnd)
|
||||
return _gitfs().file_hash(load, fnd)
|
||||
|
||||
|
||||
def file_list(load):
|
||||
@ -186,10 +171,7 @@ def file_list(load):
|
||||
Return a list of all files on the file server in a specified
|
||||
environment (specified as a key within the load dict).
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_list(load)
|
||||
return _gitfs().file_list(load)
|
||||
|
||||
|
||||
def file_list_emptydirs(load): # pylint: disable=W0613
|
||||
@ -204,17 +186,11 @@ def dir_list(load):
|
||||
'''
|
||||
Return a list of all directories on the master
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.dir_list(load)
|
||||
return _gitfs().dir_list(load)
|
||||
|
||||
|
||||
def symlink_list(load):
|
||||
'''
|
||||
Return a dict of all symlinks based on a given path in the repo
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.symlink_list(load)
|
||||
return _gitfs().symlink_list(load)
|
||||
|
@ -395,46 +395,116 @@ def _sunos_cpudata():
|
||||
return grains
|
||||
|
||||
|
||||
def _linux_memdata():
|
||||
'''
|
||||
Return the memory information for Linux-like systems
|
||||
'''
|
||||
grains = {'mem_total': 0, 'swap_total': 0}
|
||||
|
||||
meminfo = '/proc/meminfo'
|
||||
if os.path.isfile(meminfo):
|
||||
with salt.utils.files.fopen(meminfo, 'r') as ifile:
|
||||
for line in ifile:
|
||||
comps = line.rstrip('\n').split(':')
|
||||
if not len(comps) > 1:
|
||||
continue
|
||||
if comps[0].strip() == 'MemTotal':
|
||||
# Use floor division to force output to be an integer
|
||||
grains['mem_total'] = int(comps[1].split()[0]) // 1024
|
||||
if comps[0].strip() == 'SwapTotal':
|
||||
# Use floor division to force output to be an integer
|
||||
grains['swap_total'] = int(comps[1].split()[0]) // 1024
|
||||
return grains
|
||||
|
||||
|
||||
def _osx_memdata():
|
||||
'''
|
||||
Return the memory information for BSD-like systems
|
||||
'''
|
||||
grains = {'mem_total': 0, 'swap_total': 0}
|
||||
|
||||
sysctl = salt.utils.path.which('sysctl')
|
||||
if sysctl:
|
||||
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
|
||||
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage').split()[2]
|
||||
if swap_total.endswith('K'):
|
||||
_power = 2**10
|
||||
elif swap_total.endswith('M'):
|
||||
_power = 2**20
|
||||
elif swap_total.endswith('G'):
|
||||
_power = 2**30
|
||||
swap_total = swap_total[:-1] * _power
|
||||
|
||||
grains['mem_total'] = int(mem) // 1024 // 1024
|
||||
grains['swap_total'] = int(swap_total) // 1024 // 1024
|
||||
return grains
|
||||
|
||||
|
||||
def _bsd_memdata(osdata):
|
||||
'''
|
||||
Return the memory information for BSD-like systems
|
||||
'''
|
||||
grains = {'mem_total': 0, 'swap_total': 0}
|
||||
|
||||
sysctl = salt.utils.path.which('sysctl')
|
||||
if sysctl:
|
||||
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
|
||||
swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl))
|
||||
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
|
||||
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
|
||||
grains['mem_total'] = int(mem) // 1024 // 1024
|
||||
grains['swap_total'] = int(swap_total) // 1024 // 1024
|
||||
return grains
|
||||
|
||||
|
||||
def _sunos_memdata():
|
||||
'''
|
||||
Return the memory information for SunOS-like systems
|
||||
'''
|
||||
grains = {'mem_total': 0, 'swap_total': 0}
|
||||
|
||||
prtconf = '/usr/sbin/prtconf 2>/dev/null'
|
||||
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
|
||||
comps = line.split(' ')
|
||||
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
|
||||
grains['mem_total'] = int(comps[2].strip())
|
||||
|
||||
swap_cmd = salt.utils.path.which('swap')
|
||||
swap_total = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()[1]
|
||||
grains['swap_total'] = int(swap_total) // 1024
|
||||
return grains
|
||||
|
||||
|
||||
def _windows_memdata():
|
||||
'''
|
||||
Return the memory information for Windows systems
|
||||
'''
|
||||
grains = {'mem_total': 0}
|
||||
# get the Total Physical memory as reported by msinfo32
|
||||
tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys']
|
||||
# return memory info in gigabytes
|
||||
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
|
||||
return grains
|
||||
|
||||
|
||||
def _memdata(osdata):
|
||||
'''
|
||||
Gather information about the system memory
|
||||
'''
|
||||
# Provides:
|
||||
# mem_total
|
||||
# swap_total, for supported systems.
|
||||
grains = {'mem_total': 0}
|
||||
if osdata['kernel'] == 'Linux':
|
||||
meminfo = '/proc/meminfo'
|
||||
|
||||
if os.path.isfile(meminfo):
|
||||
with salt.utils.files.fopen(meminfo, 'r') as ifile:
|
||||
for line in ifile:
|
||||
comps = line.rstrip('\n').split(':')
|
||||
if not len(comps) > 1:
|
||||
continue
|
||||
if comps[0].strip() == 'MemTotal':
|
||||
# Use floor division to force output to be an integer
|
||||
grains['mem_total'] = int(comps[1].split()[0]) // 1024
|
||||
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD', 'Darwin'):
|
||||
sysctl = salt.utils.path.which('sysctl')
|
||||
if sysctl:
|
||||
if osdata['kernel'] == 'Darwin':
|
||||
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
|
||||
else:
|
||||
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
|
||||
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
|
||||
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
|
||||
grains['mem_total'] = int(mem) // 1024 // 1024
|
||||
grains.update(_linux_memdata())
|
||||
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
|
||||
grains.update(_bsd_memdata(osdata))
|
||||
elif osdata['kernel'] == 'Darwin':
|
||||
grains.update(_osx_memdata())
|
||||
elif osdata['kernel'] == 'SunOS':
|
||||
prtconf = '/usr/sbin/prtconf 2>/dev/null'
|
||||
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
|
||||
comps = line.split(' ')
|
||||
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
|
||||
grains['mem_total'] = int(comps[2].strip())
|
||||
grains.update(_sunos_memdata())
|
||||
elif osdata['kernel'] == 'Windows' and HAS_WMI:
|
||||
# get the Total Physical memory as reported by msinfo32
|
||||
tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys']
|
||||
# return memory info in gigabytes
|
||||
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
|
||||
grains.update(_windows_memdata())
|
||||
return grains
|
||||
|
||||
|
||||
@ -1410,7 +1480,10 @@ def os_data():
|
||||
.format(' '.join(init_cmdline))
|
||||
)
|
||||
|
||||
# Add lsb grains on any distro with lsb-release
|
||||
# Add lsb grains on any distro with lsb-release. Note that this import
|
||||
# can fail on systems with lsb-release installed if the system package
|
||||
# does not install the python package for the python interpreter used by
|
||||
# Salt (i.e. python2 or python3)
|
||||
try:
|
||||
import lsb_release # pylint: disable=import-error
|
||||
release = lsb_release.get_distro_information()
|
||||
@ -1459,7 +1532,13 @@ def os_data():
|
||||
if 'VERSION_ID' in os_release:
|
||||
grains['lsb_distrib_release'] = os_release['VERSION_ID']
|
||||
if 'PRETTY_NAME' in os_release:
|
||||
grains['lsb_distrib_codename'] = os_release['PRETTY_NAME']
|
||||
codename = os_release['PRETTY_NAME']
|
||||
# https://github.com/saltstack/salt/issues/44108
|
||||
if os_release['ID'] == 'debian':
|
||||
codename_match = re.search(r'\((\w+)\)$', codename)
|
||||
if codename_match:
|
||||
codename = codename_match.group(1)
|
||||
grains['lsb_distrib_codename'] = codename
|
||||
if 'CPE_NAME' in os_release:
|
||||
if ":suse:" in os_release['CPE_NAME'] or ":opensuse:" in os_release['CPE_NAME']:
|
||||
grains['os'] = "SUSE"
|
||||
|
@ -123,36 +123,25 @@ def setup_handlers():
|
||||
url = urlparse(dsn)
|
||||
if not transport_registry.supported_scheme(url.scheme):
|
||||
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
|
||||
dsn_config = {}
|
||||
if (hasattr(transport_registry, 'compute_scope') and
|
||||
callable(transport_registry.compute_scope)):
|
||||
conf_extras = transport_registry.compute_scope(url, dsn_config)
|
||||
dsn_config.update(conf_extras)
|
||||
options.update({
|
||||
'project': dsn_config['SENTRY_PROJECT'],
|
||||
'servers': dsn_config['SENTRY_SERVERS'],
|
||||
'public_key': dsn_config['SENTRY_PUBLIC_KEY'],
|
||||
'secret_key': dsn_config['SENTRY_SECRET_KEY']
|
||||
})
|
||||
except ValueError as exc:
|
||||
log.info(
|
||||
'Raven failed to parse the configuration provided '
|
||||
'DSN: {0}'.format(exc)
|
||||
)
|
||||
|
||||
# Allow options to be overridden if previously parsed, or define them
|
||||
for key in ('project', 'servers', 'public_key', 'secret_key'):
|
||||
config_value = get_config_value(key)
|
||||
if config_value is None and key not in options:
|
||||
log.debug(
|
||||
'The required \'sentry_handler\' configuration key, '
|
||||
'\'{0}\', is not properly configured. Not configuring '
|
||||
'the sentry logging handler.'.format(key)
|
||||
)
|
||||
return
|
||||
elif config_value is None:
|
||||
continue
|
||||
options[key] = config_value
|
||||
if not dsn:
|
||||
for key in ('project', 'servers', 'public_key', 'secret_key'):
|
||||
config_value = get_config_value(key)
|
||||
if config_value is None and key not in options:
|
||||
log.debug(
|
||||
'The required \'sentry_handler\' configuration key, '
|
||||
'\'{0}\', is not properly configured. Not configuring '
|
||||
'the sentry logging handler.'.format(key)
|
||||
)
|
||||
return
|
||||
elif config_value is None:
|
||||
continue
|
||||
options[key] = config_value
|
||||
|
||||
# site: An optional, arbitrary string to identify this client installation.
|
||||
options.update({
|
||||
|
129
salt/master.py
129
salt/master.py
@ -486,11 +486,11 @@ class Master(SMaster):
|
||||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
new_opts,
|
||||
repo[u'git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
@ -1840,89 +1840,52 @@ class ClearFuncs(object):
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
delimiter
|
||||
)
|
||||
minions = _res.get('minions', list())
|
||||
missing = _res.get('missing', list())
|
||||
minions = _res.get(u'minions', list())
|
||||
missing = _res.get(u'missing', list())
|
||||
|
||||
# Check for external auth calls
|
||||
if extra.get(u'token', False):
|
||||
# Authenticate.
|
||||
token = self.loadauth.authenticate_token(extra)
|
||||
if not token:
|
||||
return u''
|
||||
|
||||
# Get acl
|
||||
auth_list = self.loadauth.get_auth_list(extra, token)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load[u'fun'],
|
||||
clear_load[u'arg'],
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=[u'saltutil.find_job'],
|
||||
):
|
||||
log.warning(u'Authentication failure of type "token" occurred.')
|
||||
return u''
|
||||
clear_load[u'user'] = token[u'name']
|
||||
log.debug(u'Minion tokenized user = "%s"', clear_load[u'user'])
|
||||
elif u'eauth' in extra:
|
||||
# Authenticate.
|
||||
if not self.loadauth.authenticate_eauth(extra):
|
||||
return u''
|
||||
|
||||
# Get acl from eauth module.
|
||||
auth_list = self.loadauth.get_auth_list(extra)
|
||||
|
||||
# Authorize the request
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load[u'fun'],
|
||||
clear_load[u'arg'],
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=[u'saltutil.find_job'],
|
||||
):
|
||||
log.warning(u'Authentication failure of type "eauth" occurred.')
|
||||
return u''
|
||||
clear_load[u'user'] = self.loadauth.load_name(extra) # The username we are attempting to auth with
|
||||
# Verify that the caller has root on master
|
||||
# Check for external auth calls and authenticate
|
||||
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
|
||||
if auth_type == 'user':
|
||||
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
|
||||
else:
|
||||
auth_ret = self.loadauth.authenticate_key(clear_load, self.key)
|
||||
if auth_ret is False:
|
||||
auth_check = self.loadauth.check_authentication(extra, auth_type)
|
||||
|
||||
# Setup authorization list variable and error information
|
||||
auth_list = auth_check.get(u'auth_list', [])
|
||||
err_msg = u'Authentication failure of type "{0}" occurred.'.format(auth_type)
|
||||
|
||||
if auth_check.get(u'error'):
|
||||
# Authentication error occurred: do not continue.
|
||||
log.warning(err_msg)
|
||||
return u''
|
||||
|
||||
# All Token, Eauth, and non-root users must pass the authorization check
|
||||
if auth_type != u'user' or (auth_type == u'user' and auth_list):
|
||||
# Authorize the request
|
||||
authorized = self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load[u'fun'],
|
||||
clear_load[u'arg'],
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=[u'saltutil.find_job'],
|
||||
)
|
||||
|
||||
if not authorized:
|
||||
# Authorization error occurred. Do not continue.
|
||||
log.warning(err_msg)
|
||||
return u''
|
||||
|
||||
if auth_ret is not True:
|
||||
if salt.auth.AuthUser(clear_load[u'user']).is_sudo():
|
||||
if not self.opts[u'sudo_acl'] or not self.opts[u'publisher_acl']:
|
||||
auth_ret = True
|
||||
|
||||
if auth_ret is not True:
|
||||
auth_list = salt.utils.master.get_values_of_matching_keys(
|
||||
self.opts[u'publisher_acl'],
|
||||
auth_ret)
|
||||
if not auth_list:
|
||||
log.warning(
|
||||
u'Authentication failure of type "user" occurred.'
|
||||
)
|
||||
return u''
|
||||
|
||||
if not self.ckminions.auth_check(
|
||||
auth_list,
|
||||
clear_load[u'fun'],
|
||||
clear_load[u'arg'],
|
||||
clear_load[u'tgt'],
|
||||
clear_load.get(u'tgt_type', u'glob'),
|
||||
minions=minions,
|
||||
# always accept find_job
|
||||
whitelist=[u'saltutil.find_job'],
|
||||
):
|
||||
log.warning(u'Authentication failure of type "user" occurred.')
|
||||
return u''
|
||||
# Perform some specific auth_type tasks after the authorization check
|
||||
if auth_type == u'token':
|
||||
username = auth_check.get(u'username')
|
||||
clear_load[u'user'] = username
|
||||
log.debug(u'Minion tokenized user = "%s"', username)
|
||||
elif auth_type == u'eauth':
|
||||
# The username we are attempting to auth with
|
||||
clear_load[u'user'] = self.loadauth.load_name(extra)
|
||||
|
||||
# If we order masters (via a syndic), don't short circuit if no minions
|
||||
# are found
|
||||
|
@ -2063,6 +2063,8 @@ class Minion(MinionBase):
|
||||
func = data.get(u'func', None)
|
||||
name = data.get(u'name', None)
|
||||
beacon_data = data.get(u'beacon_data', None)
|
||||
include_pillar = data.get(u'include_pillar', None)
|
||||
include_opts = data.get(u'include_opts', None)
|
||||
|
||||
if func == u'add':
|
||||
self.beacons.add_beacon(name, beacon_data)
|
||||
@ -2079,7 +2081,7 @@ class Minion(MinionBase):
|
||||
elif func == u'disable_beacon':
|
||||
self.beacons.disable_beacon(name)
|
||||
elif func == u'list':
|
||||
self.beacons.list_beacons()
|
||||
self.beacons.list_beacons(include_opts, include_pillar)
|
||||
elif func == u'list_available':
|
||||
self.beacons.list_available_beacons()
|
||||
elif func == u'validate_beacon':
|
||||
|
@ -29,7 +29,6 @@ import json
|
||||
import yaml
|
||||
# pylint: disable=no-name-in-module,import-error,redefined-builtin
|
||||
from salt.ext import six
|
||||
from salt.ext.six.moves import range
|
||||
from salt.ext.six.moves.urllib.error import HTTPError
|
||||
from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen
|
||||
# pylint: enable=no-name-in-module,import-error,redefined-builtin
|
||||
@ -1610,7 +1609,7 @@ def _consolidate_repo_sources(sources):
|
||||
combined_comps = set(repo.comps).union(set(combined.comps))
|
||||
consolidated[key].comps = list(combined_comps)
|
||||
else:
|
||||
consolidated[key] = sourceslist.SourceEntry(_strip_uri(repo.line))
|
||||
consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line))
|
||||
|
||||
if repo.file != base_file:
|
||||
delete_files.add(repo.file)
|
||||
@ -1718,7 +1717,7 @@ def list_repos():
|
||||
repo['dist'] = source.dist
|
||||
repo['type'] = source.type
|
||||
repo['uri'] = source.uri.rstrip('/')
|
||||
repo['line'] = _strip_uri(source.line.strip())
|
||||
repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip())
|
||||
repo['architectures'] = getattr(source, 'architectures', [])
|
||||
repos.setdefault(source.uri, []).append(repo)
|
||||
return repos
|
||||
@ -2477,18 +2476,6 @@ def file_dict(*packages):
|
||||
return __salt__['lowpkg.file_dict'](*packages)
|
||||
|
||||
|
||||
def _strip_uri(repo):
|
||||
'''
|
||||
Remove the trailing slash from the URI in a repo definition
|
||||
'''
|
||||
splits = repo.split()
|
||||
for idx in range(len(splits)):
|
||||
if any(splits[idx].startswith(x)
|
||||
for x in ('http://', 'https://', 'ftp://')):
|
||||
splits[idx] = splits[idx].rstrip('/')
|
||||
return ' '.join(splits)
|
||||
|
||||
|
||||
def expand_repo_def(**kwargs):
|
||||
'''
|
||||
Take a repository definition and expand it to the full pkg repository dict
|
||||
@ -2504,7 +2491,7 @@ def expand_repo_def(**kwargs):
|
||||
_check_apt()
|
||||
|
||||
sanitized = {}
|
||||
repo = _strip_uri(kwargs['repo'])
|
||||
repo = salt.utils.pkg.deb.strip_uri(kwargs['repo'])
|
||||
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
|
||||
dist = __grains__['lsb_distrib_codename']
|
||||
owner_name, ppa_name = repo[4:].split('/', 1)
|
||||
|
@ -28,12 +28,22 @@ __func_alias__ = {
|
||||
}
|
||||
|
||||
|
||||
def list_(return_yaml=True):
|
||||
def list_(return_yaml=True,
|
||||
include_pillar=True,
|
||||
include_opts=True):
|
||||
'''
|
||||
List the beacons currently configured on the minion
|
||||
|
||||
:param return_yaml: Whether to return YAML formatted output, default True
|
||||
:return: List of currently configured Beacons.
|
||||
:param return_yaml: Whether to return YAML formatted output,
|
||||
default True
|
||||
|
||||
:param include_pillar: Whether to include beacons that are
|
||||
configured in pillar, default is True.
|
||||
|
||||
:param include_opts: Whether to include beacons that are
|
||||
configured in opts, default is True.
|
||||
|
||||
:return: List of currently configured Beacons.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -46,7 +56,10 @@ def list_(return_yaml=True):
|
||||
|
||||
try:
|
||||
eventer = salt.utils.event.get_event('minion', opts=__opts__)
|
||||
res = __salt__['event.fire']({'func': 'list'}, 'manage_beacons')
|
||||
res = __salt__['event.fire']({'func': 'list',
|
||||
'include_pillar': include_pillar,
|
||||
'include_opts': include_opts},
|
||||
'manage_beacons')
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_complete', wait=30)
|
||||
log.debug('event_ret {0}'.format(event_ret))
|
||||
@ -133,6 +146,10 @@ def add(name, beacon_data, **kwargs):
|
||||
ret['comment'] = 'Beacon {0} is already configured.'.format(name)
|
||||
return ret
|
||||
|
||||
if name not in list_available(return_yaml=False):
|
||||
ret['comment'] = 'Beacon "{0}" is not available.'.format(name)
|
||||
return ret
|
||||
|
||||
if 'test' in kwargs and kwargs['test']:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Beacon: {0} would be added.'.format(name)
|
||||
@ -170,7 +187,10 @@ def add(name, beacon_data, **kwargs):
|
||||
if name in beacons and beacons[name] == beacon_data:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Added beacon: {0}.'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = event_ret['comment']
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
@ -262,7 +282,10 @@ def modify(name, beacon_data, **kwargs):
|
||||
if name in beacons and beacons[name] == beacon_data:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Modified beacon: {0}.'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = event_ret['comment']
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
@ -299,12 +322,14 @@ def delete(name, **kwargs):
|
||||
if res:
|
||||
event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_delete_complete', wait=30)
|
||||
if event_ret and event_ret['complete']:
|
||||
log.debug('== event_ret {} =='.format(event_ret))
|
||||
beacons = event_ret['beacons']
|
||||
if name not in beacons:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Deleted beacon: {0}.'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = event_ret['comment']
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon add failed.'
|
||||
@ -327,7 +352,7 @@ def save():
|
||||
ret = {'comment': [],
|
||||
'result': True}
|
||||
|
||||
beacons = list_(return_yaml=False)
|
||||
beacons = list_(return_yaml=False, include_pillar=False)
|
||||
|
||||
# move this file into an configurable opt
|
||||
sfn = '{0}/{1}/beacons.conf'.format(__opts__['config_dir'],
|
||||
@ -380,7 +405,7 @@ def enable(**kwargs):
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to enable beacons on minion.'
|
||||
return ret
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacons enable job failed.'
|
||||
@ -420,7 +445,7 @@ def disable(**kwargs):
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to disable beacons on minion.'
|
||||
return ret
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacons enable job failed.'
|
||||
@ -483,7 +508,10 @@ def enable_beacon(name, **kwargs):
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to enable beacon {0} on minion.'.format(name)
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = event_ret['comment']
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon enable job failed.'
|
||||
@ -536,7 +564,10 @@ def disable_beacon(name, **kwargs):
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failed to disable beacon on minion.'
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
ret['comment'] = event_ret['comment']
|
||||
return ret
|
||||
except KeyError:
|
||||
# Effectively a no-op, since we can't really return without an event system
|
||||
ret['comment'] = 'Event module not available. Beacon disable job failed.'
|
||||
|
@ -51,6 +51,7 @@ import datetime
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
import email.mime.multipart
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -677,11 +678,23 @@ def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
|
||||
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
policies = conn.get_all_policies(as_group=as_group)
|
||||
for policy in policies:
|
||||
if policy.name == scaling_policy_name:
|
||||
return policy.policy_arn
|
||||
log.error('Could not convert: {0}'.format(as_group))
|
||||
retries = 30
|
||||
while retries > 0:
|
||||
retries -= 1
|
||||
try:
|
||||
policies = conn.get_all_policies(as_group=as_group)
|
||||
for policy in policies:
|
||||
if policy.name == scaling_policy_name:
|
||||
return policy.policy_arn
|
||||
log.error('Could not convert: {0}'.format(as_group))
|
||||
return None
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code != 'Throttling':
|
||||
raise
|
||||
log.debug('Throttled by API, will retry in 5 seconds')
|
||||
time.sleep(5)
|
||||
|
||||
log.error('Maximum number of retries exceeded')
|
||||
return None
|
||||
|
||||
|
||||
@ -763,11 +776,18 @@ def get_instances(name, lifecycle_state="InService", health_status="Healthy",
|
||||
# get full instance info, so that we can return the attribute
|
||||
instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
|
||||
if attributes:
|
||||
return [[getattr(instance, attr).encode("ascii") for attr in attributes] for instance in instances]
|
||||
return [[_convert_attribute(instance, attr) for attr in attributes] for instance in instances]
|
||||
else:
|
||||
# properly handle case when not all instances have the requested attribute
|
||||
return [getattr(instance, attribute).encode("ascii") for instance in instances if getattr(instance, attribute)]
|
||||
return [getattr(instance, attribute).encode("ascii") for instance in instances]
|
||||
return [_convert_attribute(instance, attribute) for instance in instances if getattr(instance, attribute)]
|
||||
|
||||
|
||||
def _convert_attribute(instance, attribute):
|
||||
if attribute == "tags":
|
||||
tags = dict(getattr(instance, attribute))
|
||||
return {key.encode("utf-8"): value.encode("utf-8") for key, value in six.iteritems(tags)}
|
||||
|
||||
return getattr(instance, attribute).encode("ascii")
|
||||
|
||||
|
||||
def enter_standby(name, instance_ids, should_decrement_desired_capacity=False,
|
||||
|
@ -154,7 +154,7 @@ def get_unassociated_eip_address(domain='standard', region=None, key=None,
|
||||
Return the first unassociated EIP
|
||||
|
||||
domain
|
||||
Indicates whether the address is a EC2 address or a VPC address
|
||||
Indicates whether the address is an EC2 address or a VPC address
|
||||
(standard|vpc).
|
||||
|
||||
CLI Example:
|
||||
@ -771,9 +771,9 @@ def get_tags(instance_id=None, keyid=None, key=None, profile=None,
|
||||
def exists(instance_id=None, name=None, tags=None, region=None, key=None,
|
||||
keyid=None, profile=None, in_states=None, filters=None):
|
||||
'''
|
||||
Given a instance id, check to see if the given instance id exists.
|
||||
Given an instance id, check to see if the given instance id exists.
|
||||
|
||||
Returns True if the given an instance with the given id, name, or tags
|
||||
Returns True if the given instance with the given id, name, or tags
|
||||
exists; otherwise, False is returned.
|
||||
|
||||
CLI Example:
|
||||
|
@ -75,7 +75,7 @@ def __virtual__():
|
||||
Only load if boto libraries exist.
|
||||
'''
|
||||
if not HAS_BOTO:
|
||||
return (False, 'The modle boto_elasticache could not be loaded: boto libraries not found')
|
||||
return (False, 'The model boto_elasticache could not be loaded: boto libraries not found')
|
||||
__utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__)
|
||||
return True
|
||||
|
||||
|
@ -661,22 +661,29 @@ def get_health_check(name, region=None, key=None, keyid=None, profile=None):
|
||||
salt myminion boto_elb.get_health_check myelb
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
retries = 30
|
||||
|
||||
try:
|
||||
lb = conn.get_all_load_balancers(load_balancer_names=[name])
|
||||
lb = lb[0]
|
||||
ret = odict.OrderedDict()
|
||||
hc = lb.health_check
|
||||
ret['interval'] = hc.interval
|
||||
ret['target'] = hc.target
|
||||
ret['healthy_threshold'] = hc.healthy_threshold
|
||||
ret['timeout'] = hc.timeout
|
||||
ret['unhealthy_threshold'] = hc.unhealthy_threshold
|
||||
return ret
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.error('ELB {0} does not exist: {1}'.format(name, error))
|
||||
return {}
|
||||
while True:
|
||||
try:
|
||||
lb = conn.get_all_load_balancers(load_balancer_names=[name])
|
||||
lb = lb[0]
|
||||
ret = odict.OrderedDict()
|
||||
hc = lb.health_check
|
||||
ret['interval'] = hc.interval
|
||||
ret['target'] = hc.target
|
||||
ret['healthy_threshold'] = hc.healthy_threshold
|
||||
ret['timeout'] = hc.timeout
|
||||
ret['unhealthy_threshold'] = hc.unhealthy_threshold
|
||||
return ret
|
||||
except boto.exception.BotoServerError as e:
|
||||
if retries and e.code == 'Throttling':
|
||||
log.debug('Throttled by AWS API, will retry in 5 seconds.')
|
||||
time.sleep(5)
|
||||
retries -= 1
|
||||
continue
|
||||
log.error(error)
|
||||
log.error('ELB {0} not found.'.format(name))
|
||||
return {}
|
||||
|
||||
|
||||
def set_health_check(name, health_check, region=None, key=None, keyid=None,
|
||||
@ -691,16 +698,23 @@ def set_health_check(name, health_check, region=None, key=None, keyid=None,
|
||||
salt myminion boto_elb.set_health_check myelb '{"target": "HTTP:80/"}'
|
||||
'''
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
retries = 30
|
||||
|
||||
hc = HealthCheck(**health_check)
|
||||
try:
|
||||
conn.configure_health_check(name, hc)
|
||||
log.info('Configured health check on ELB {0}'.format(name))
|
||||
except boto.exception.BotoServerError as error:
|
||||
log.debug(error)
|
||||
log.info('Failed to configure health check on ELB {0}: {1}'.format(name, error))
|
||||
return False
|
||||
return True
|
||||
while True:
|
||||
try:
|
||||
conn.configure_health_check(name, hc)
|
||||
log.info('Configured health check on ELB {0}'.format(name))
|
||||
return True
|
||||
except boto.exception.BotoServerError as error:
|
||||
if retries and e.code == 'Throttling':
|
||||
log.debug('Throttled by AWS API, will retry in 5 seconds.')
|
||||
time.sleep(5)
|
||||
retries -= 1
|
||||
continue
|
||||
log.error(error)
|
||||
log.error('Failed to configure health check on ELB {0}'.format(name))
|
||||
return False
|
||||
|
||||
|
||||
def register_instances(name, instances, region=None, key=None, keyid=None,
|
||||
|
@ -763,7 +763,7 @@ def describe_vpcs(vpc_id=None, name=None, cidr=None, tags=None,
|
||||
'''
|
||||
Describe all VPCs, matching the filter criteria if provided.
|
||||
|
||||
Returns a a list of dictionaries with interesting properties.
|
||||
Returns a list of dictionaries with interesting properties.
|
||||
|
||||
.. versionadded:: 2015.8.0
|
||||
|
||||
|
@ -219,7 +219,7 @@ def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None,
|
||||
# TODO: Call cluster.shutdown() when the module is unloaded on
|
||||
# master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown()
|
||||
# do nothing to allow loaded modules to gracefully handle resources stored
|
||||
# in __context__ (i.e. connection pools). This means that the the connection
|
||||
# in __context__ (i.e. connection pools). This means that the connection
|
||||
# pool is orphaned and Salt relies on Cassandra to reclaim connections.
|
||||
# Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"
|
||||
# function, or something similar for each loaded module, connection pools
|
||||
@ -430,7 +430,7 @@ def cql_query_with_prepare(query, statement_name, statement_arguments, async=Fal
|
||||
values[key] = value
|
||||
ret.append(values)
|
||||
|
||||
# If this was a synchronous call, then we either have a empty list
|
||||
# If this was a synchronous call, then we either have an empty list
|
||||
# because there was no return, or we have a return
|
||||
# If this was an async call we only return the empty list
|
||||
return ret
|
||||
|
@ -269,6 +269,7 @@ def _run(cmd,
|
||||
python_shell=False,
|
||||
env=None,
|
||||
clean_env=False,
|
||||
prepend_path=None,
|
||||
rstrip=True,
|
||||
template=None,
|
||||
umask=None,
|
||||
@ -492,6 +493,9 @@ def _run(cmd,
|
||||
run_env = os.environ.copy()
|
||||
run_env.update(env)
|
||||
|
||||
if prepend_path:
|
||||
run_env['PATH'] = ':'.join((prepend_path, run_env['PATH']))
|
||||
|
||||
if python_shell is None:
|
||||
python_shell = False
|
||||
|
||||
@ -782,6 +786,7 @@ def run(cmd,
|
||||
password=None,
|
||||
encoded_cmd=False,
|
||||
raise_err=False,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
r'''
|
||||
Execute the passed command and return the output as a string
|
||||
@ -864,6 +869,11 @@ def run(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -949,6 +959,7 @@ def run(cmd,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
rstrip=rstrip,
|
||||
umask=umask,
|
||||
@ -1004,6 +1015,7 @@ def shell(cmd,
|
||||
use_vt=False,
|
||||
bg=False,
|
||||
password=None,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute the passed command and return the output as a string.
|
||||
@ -1079,6 +1091,11 @@ def shell(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -1157,6 +1174,7 @@ def shell(cmd,
|
||||
shell=shell,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
rstrip=rstrip,
|
||||
umask=umask,
|
||||
@ -1193,6 +1211,7 @@ def run_stdout(cmd,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a command, and only return the standard out
|
||||
@ -1265,6 +1284,11 @@ def run_stdout(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -1319,6 +1343,7 @@ def run_stdout(cmd,
|
||||
python_shell=python_shell,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
rstrip=rstrip,
|
||||
umask=umask,
|
||||
@ -1374,6 +1399,7 @@ def run_stderr(cmd,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a command and only return the standard error
|
||||
@ -1447,6 +1473,11 @@ def run_stderr(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -1501,6 +1532,7 @@ def run_stderr(cmd,
|
||||
python_shell=python_shell,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
rstrip=rstrip,
|
||||
umask=umask,
|
||||
@ -1558,6 +1590,7 @@ def run_all(cmd,
|
||||
redirect_stderr=False,
|
||||
password=None,
|
||||
encoded_cmd=False,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute the passed command and return a dict of return data
|
||||
@ -1631,6 +1664,11 @@ def run_all(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -1709,6 +1747,7 @@ def run_all(cmd,
|
||||
python_shell=python_shell,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
rstrip=rstrip,
|
||||
umask=umask,
|
||||
@ -2573,7 +2612,7 @@ def run_chroot(root,
|
||||
- env:
|
||||
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
|
||||
|
||||
clean_env:
|
||||
clean_env:
|
||||
Attempt to clean out all other shell environment variables and set
|
||||
only those provided in the 'env' argument to this function.
|
||||
|
||||
@ -2773,8 +2812,8 @@ def shell_info(shell, list_modules=False):
|
||||
'''
|
||||
regex_shells = {
|
||||
'bash': [r'version (\d\S*)', 'bash', '--version'],
|
||||
'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test a error result
|
||||
'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test a error result
|
||||
'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test an error result
|
||||
'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test an error result
|
||||
'zsh': [r'^zsh (\d\S*)', 'zsh', '--version'],
|
||||
'tcsh': [r'^tcsh (\d\S*)', 'tcsh', '--version'],
|
||||
'cmd': [r'Version ([\d.]+)', 'cmd.exe', '/C', 'ver'],
|
||||
@ -3467,6 +3506,7 @@ def run_bg(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
password=None,
|
||||
prepend_path=None,
|
||||
**kwargs):
|
||||
r'''
|
||||
.. versionadded: 2016.3.0
|
||||
@ -3545,6 +3585,11 @@ def run_bg(cmd,
|
||||
variables and set only those provided in the 'env' argument to this
|
||||
function.
|
||||
|
||||
:param str prepend_path: $PATH segment to prepend (trailing ':' not necessary)
|
||||
to $PATH
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:param str template: If this setting is applied then the named templating
|
||||
engine will be used to render the downloaded file. Currently jinja, mako,
|
||||
and wempy are supported
|
||||
@ -3613,6 +3658,7 @@ def run_bg(cmd,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
clean_env=clean_env,
|
||||
prepend_path=prepend_path,
|
||||
template=template,
|
||||
umask=umask,
|
||||
log_callback=log_callback,
|
||||
|
@ -1953,7 +1953,7 @@ def status_peers(consul_url):
|
||||
|
||||
:param consul_url: The Consul server URL.
|
||||
:return: Retrieves the Raft peers for the
|
||||
datacenter in which the the agent is running.
|
||||
datacenter in which the agent is running.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -48,7 +48,7 @@ __virtualname__ = 'pkgbuild'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confirm this module is on a Debian based system, and has required utilities
|
||||
Confirm this module is on a Debian-based system, and has required utilities
|
||||
'''
|
||||
if __grains__.get('os_family', False) in ('Kali', 'Debian'):
|
||||
missing_util = False
|
||||
@ -726,7 +726,7 @@ def make_repo(repodir,
|
||||
|
||||
if times_looped > number_retries:
|
||||
raise SaltInvocationError(
|
||||
'Attemping to sign file {0} failed, timed out after {1} seconds'
|
||||
'Attempting to sign file {0} failed, timed out after {1} seconds'
|
||||
.format(abs_file, int(times_looped * interval))
|
||||
)
|
||||
time.sleep(interval)
|
||||
@ -770,7 +770,7 @@ def make_repo(repodir,
|
||||
|
||||
if times_looped > number_retries:
|
||||
raise SaltInvocationError(
|
||||
'Attemping to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
|
||||
'Attempting to reprepro includedsc for file {0} failed, timed out after {1} loops'.format(abs_file, times_looped)
|
||||
)
|
||||
time.sleep(interval)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
The networking module for Debian based distros
|
||||
The networking module for Debian-based distros
|
||||
|
||||
References:
|
||||
|
||||
@ -46,7 +46,7 @@ __virtualname__ = 'ip'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Confine this module to Debian based distros
|
||||
Confine this module to Debian-based distros
|
||||
'''
|
||||
if __grains__['os_family'] == 'Debian':
|
||||
return __virtualname__
|
||||
@ -1389,7 +1389,7 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
|
||||
for opt in ['up_cmds', 'pre_up_cmds', 'post_up_cmds',
|
||||
'down_cmds', 'pre_down_cmds', 'post_down_cmds']:
|
||||
if opt in opts:
|
||||
iface_data['inet'][opt] = opts[opt]
|
||||
iface_data[def_addrfam][opt] = opts[opt]
|
||||
|
||||
for addrfam in ['inet', 'inet6']:
|
||||
if 'addrfam' in iface_data[addrfam] and iface_data[addrfam]['addrfam'] == addrfam:
|
||||
@ -1562,7 +1562,7 @@ def _read_temp_ifaces(iface, data):
|
||||
return ''
|
||||
|
||||
ifcfg = template.render({'name': iface, 'data': data})
|
||||
# Return as a array so the difflib works
|
||||
# Return as an array so the difflib works
|
||||
return [item + '\n' for item in ifcfg.split('\n')]
|
||||
|
||||
|
||||
@ -1616,7 +1616,7 @@ def _write_file_ifaces(iface, data, **settings):
|
||||
else:
|
||||
fout.write(ifcfg)
|
||||
|
||||
# Return as a array so the difflib works
|
||||
# Return as an array so the difflib works
|
||||
return saved_ifcfg.split('\n')
|
||||
|
||||
|
||||
@ -1646,7 +1646,7 @@ def _write_file_ppp_ifaces(iface, data):
|
||||
with salt.utils.files.fopen(filename, 'w') as fout:
|
||||
fout.write(ifcfg)
|
||||
|
||||
# Return as a array so the difflib works
|
||||
# Return as an array so the difflib works
|
||||
return filename
|
||||
|
||||
|
||||
|
@ -686,7 +686,7 @@ def ps(path):
|
||||
|
||||
def up(path, service_names=None):
|
||||
'''
|
||||
Create and start containers defined in the the docker-compose.yml file
|
||||
Create and start containers defined in the docker-compose.yml file
|
||||
located in path, service_names is a python list, if omitted create and
|
||||
start all containers
|
||||
|
||||
|
@ -915,8 +915,8 @@ def compare_container(first, second, ignore=None):
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
|
||||
else:
|
||||
if item == 'Links':
|
||||
val1 = _scrub_links(val1, first)
|
||||
val2 = _scrub_links(val2, second)
|
||||
val1 = sorted(_scrub_links(val1, first))
|
||||
val2 = sorted(_scrub_links(val2, second))
|
||||
if val1 != val2:
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
|
||||
# Check for optionally-present items that were in the second container
|
||||
@ -938,8 +938,8 @@ def compare_container(first, second, ignore=None):
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2}
|
||||
else:
|
||||
if item == 'Links':
|
||||
val1 = _scrub_links(val1, first)
|
||||
val2 = _scrub_links(val2, second)
|
||||
val1 = sorted(_scrub_links(val1, first))
|
||||
val2 = sorted(_scrub_links(val2, second))
|
||||
if val1 != val2:
|
||||
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
|
||||
return ret
|
||||
|
@ -465,5 +465,5 @@ def server_pxe():
|
||||
log.warning('failed to set boot order')
|
||||
return False
|
||||
|
||||
log.warning('failed to to configure PXE boot')
|
||||
log.warning('failed to configure PXE boot')
|
||||
return False
|
||||
|
@ -923,7 +923,7 @@ def server_pxe(host=None,
|
||||
log.warning('failed to set boot order')
|
||||
return False
|
||||
|
||||
log.warning('failed to to configure PXE boot')
|
||||
log.warning('failed to configure PXE boot')
|
||||
return False
|
||||
|
||||
|
||||
|
@ -45,7 +45,7 @@ def setval(key, val, false_unsets=False, permanent=False):
|
||||
|
||||
permanent
|
||||
On Windows minions this will set the environment variable in the
|
||||
registry so that it is always added as a environment variable when
|
||||
registry so that it is always added as an environment variable when
|
||||
applications open. If you want to set the variable to HKLM instead of
|
||||
HKCU just pass in "HKLM" for this parameter. On all other minion types
|
||||
this will be ignored. Note: This will only take affect on applications
|
||||
@ -144,7 +144,7 @@ def setenv(environ, false_unsets=False, clear_all=False, update_minion=False, pe
|
||||
|
||||
permanent
|
||||
On Windows minions this will set the environment variable in the
|
||||
registry so that it is always added as a environment variable when
|
||||
registry so that it is always added as an environment variable when
|
||||
applications open. If you want to set the variable to HKLM instead of
|
||||
HKCU just pass in "HKLM" for this parameter. On all other minion types
|
||||
this will be ignored. Note: This will only take affect on applications
|
||||
|
@ -4700,6 +4700,7 @@ def check_file_meta(
|
||||
contents
|
||||
File contents
|
||||
'''
|
||||
lsattr_cmd = salt.utils.path.which('lsattr')
|
||||
changes = {}
|
||||
if not source_sum:
|
||||
source_sum = {}
|
||||
@ -4764,13 +4765,14 @@ def check_file_meta(
|
||||
if mode is not None and mode != smode:
|
||||
changes['mode'] = mode
|
||||
|
||||
diff_attrs = _cmp_attrs(name, attrs)
|
||||
if (
|
||||
attrs is not None and
|
||||
diff_attrs[0] is not None or
|
||||
diff_attrs[1] is not None
|
||||
):
|
||||
changes['attrs'] = attrs
|
||||
if lsattr_cmd:
|
||||
diff_attrs = _cmp_attrs(name, attrs)
|
||||
if (
|
||||
attrs is not None and
|
||||
diff_attrs[0] is not None or
|
||||
diff_attrs[1] is not None
|
||||
):
|
||||
changes['attrs'] = attrs
|
||||
|
||||
return changes
|
||||
|
||||
|
@ -6,7 +6,7 @@ Install software from the FreeBSD ``ports(7)`` system
|
||||
|
||||
This module allows you to install ports using ``BATCH=yes`` to bypass
|
||||
configuration prompts. It is recommended to use the :mod:`ports state
|
||||
<salt.states.freebsdports>` to install ports, but it it also possible to use
|
||||
<salt.states.freebsdports>` to install ports, but it is also possible to use
|
||||
this module exclusively from the command line.
|
||||
|
||||
.. code-block:: bash
|
||||
|
128
salt/modules/gcp_addon.py
Normal file
128
salt/modules/gcp_addon.py
Normal file
@ -0,0 +1,128 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
A route is a rule that specifies how certain packets should be handled by the
|
||||
virtual network. Routes are associated with virtual machine instances by tag,
|
||||
and the set of routes for a particular VM is called its routing table.
|
||||
For each packet leaving a virtual machine, the system searches that machine's
|
||||
routing table for a single best matching route.
|
||||
|
||||
This module will create a route to send traffic destined to the Internet
|
||||
through your gateway instance.
|
||||
|
||||
:codeauthor: :email:`Pratik Bandarkar <pratik.bandarkar@gmail.com>`
|
||||
:maturity: new
|
||||
:depends: google-api-python-client
|
||||
:platform: Linux
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import googleapiclient.discovery
|
||||
import oauth2client.service_account
|
||||
HAS_LIB = True
|
||||
except ImportError:
|
||||
HAS_LIB = False
|
||||
|
||||
__virtualname__ = 'gcp'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Check for googleapiclient api
|
||||
'''
|
||||
if HAS_LIB is False:
|
||||
log.info("Required google API's(googleapiclient, oauth2client) not found")
|
||||
return (HAS_LIB, "Required google API's(googleapiclient, oauth2client) not found")
|
||||
|
||||
|
||||
def _get_network(project_id, network_name, service):
|
||||
'''
|
||||
Fetch network selfLink from network name.
|
||||
'''
|
||||
return service.networks().get(project=project_id,
|
||||
network=network_name).execute()
|
||||
|
||||
|
||||
def _get_instance(project_id, instance_zone, name, service):
|
||||
'''
|
||||
Get instance details
|
||||
'''
|
||||
return service.instances().get(project=project_id,
|
||||
zone=instance_zone,
|
||||
instance=name).execute()
|
||||
|
||||
|
||||
def route_create(credential_file=None,
|
||||
project_id=None,
|
||||
name=None,
|
||||
dest_range=None,
|
||||
next_hop_instance=None,
|
||||
instance_zone=None,
|
||||
tags=None,
|
||||
network=None,
|
||||
priority=None
|
||||
):
|
||||
'''
|
||||
Create a route to send traffic destined to the Internet through your
|
||||
gateway instance
|
||||
|
||||
credential_file : string
|
||||
File location of application default credential. For more information,
|
||||
refer: https://developers.google.com/identity/protocols/application-default-credentials
|
||||
project_id : string
|
||||
Project ID where instance and network resides.
|
||||
name : string
|
||||
name of the route to create
|
||||
next_hop_instance : string
|
||||
the name of an instance that should handle traffic matching this route.
|
||||
instance_zone : string
|
||||
zone where instance("next_hop_instance") resides
|
||||
network : string
|
||||
Specifies the network to which the route will be applied.
|
||||
dest_range : string
|
||||
The destination range of outgoing packets that the route will apply to.
|
||||
tags : list
|
||||
(optional) Identifies the set of instances that this route will apply to.
|
||||
priority : int
|
||||
(optional) Specifies the priority of this route relative to other routes.
|
||||
default=1000
|
||||
|
||||
CLI Example:
|
||||
|
||||
salt 'salt-master.novalocal' gcp.route_create
|
||||
credential_file=/root/secret_key.json
|
||||
project_id=cp100-170315
|
||||
name=derby-db-route1
|
||||
next_hop_instance=instance-1
|
||||
instance_zone=us-central1-a
|
||||
network=default
|
||||
dest_range=0.0.0.0/0
|
||||
tags=['no-ip']
|
||||
priority=700
|
||||
|
||||
In above example, the instances which are having tag "no-ip" will route the
|
||||
packet to instance "instance-1"(if packet is intended to other network)
|
||||
'''
|
||||
|
||||
credentials = oauth2client.service_account.ServiceAccountCredentials.\
|
||||
from_json_keyfile_name(credential_file)
|
||||
service = googleapiclient.discovery.build('compute', 'v1',
|
||||
credentials=credentials)
|
||||
routes = service.routes()
|
||||
|
||||
routes_config = {
|
||||
'name': str(name),
|
||||
'network': _get_network(project_id, str(network),
|
||||
service=service)['selfLink'],
|
||||
'destRange': str(dest_range),
|
||||
'nextHopInstance': _get_instance(project_id, instance_zone,
|
||||
next_hop_instance,
|
||||
service=service)['selfLink'],
|
||||
'tags': tags,
|
||||
'priority': priority
|
||||
}
|
||||
route_create_request = routes.insert(project=project_id,
|
||||
body=routes_config)
|
||||
return route_create_request.execute()
|
@ -306,7 +306,7 @@ def _bootstrap_yum(
|
||||
|
||||
root
|
||||
The root of the image to install to. Will be created as a directory if
|
||||
if does not exist. (e.x.: /root/arch)
|
||||
it does not exist. (e.x.: /root/arch)
|
||||
|
||||
pkg_confs
|
||||
The location of the conf files to copy into the image, to point yum
|
||||
@ -374,7 +374,7 @@ def _bootstrap_deb(
|
||||
|
||||
root
|
||||
The root of the image to install to. Will be created as a directory if
|
||||
if does not exist. (e.x.: /root/wheezy)
|
||||
it does not exist. (e.x.: /root/wheezy)
|
||||
|
||||
arch
|
||||
Architecture of the target image. (e.x.: amd64)
|
||||
@ -472,7 +472,7 @@ def _bootstrap_pacman(
|
||||
|
||||
root
|
||||
The root of the image to install to. Will be created as a directory if
|
||||
if does not exist. (e.x.: /root/arch)
|
||||
it does not exist. (e.x.: /root/arch)
|
||||
|
||||
pkg_confs
|
||||
The location of the conf files to copy into the image, to point pacman
|
||||
@ -480,7 +480,7 @@ def _bootstrap_pacman(
|
||||
|
||||
img_format
|
||||
The image format to be used. The ``dir`` type needs no special
|
||||
treatment, but others need special treatement.
|
||||
treatment, but others need special treatment.
|
||||
|
||||
pkgs
|
||||
A list of packages to be installed on this image. For Arch Linux, this
|
||||
|
@ -65,6 +65,9 @@ try:
|
||||
import keystoneclient.exceptions
|
||||
HAS_KEYSTONE = True
|
||||
from keystoneclient.v3 import client as client3
|
||||
from keystoneclient import discover
|
||||
from keystoneauth1 import session
|
||||
from keystoneauth1.identity import generic
|
||||
# pylint: enable=import-error
|
||||
except ImportError:
|
||||
pass
|
||||
@ -111,7 +114,8 @@ def _get_kwargs(profile=None, **connection_args):
|
||||
insecure = get('insecure', False)
|
||||
token = get('token')
|
||||
endpoint = get('endpoint', 'http://127.0.0.1:35357/v2.0')
|
||||
|
||||
user_domain_name = get('user_domain_name', 'Default')
|
||||
project_domain_name = get('project_domain_name', 'Default')
|
||||
if token:
|
||||
kwargs = {'token': token,
|
||||
'endpoint': endpoint}
|
||||
@ -120,7 +124,9 @@ def _get_kwargs(profile=None, **connection_args):
|
||||
'password': password,
|
||||
'tenant_name': tenant,
|
||||
'tenant_id': tenant_id,
|
||||
'auth_url': auth_url}
|
||||
'auth_url': auth_url,
|
||||
'user_domain_name': user_domain_name,
|
||||
'project_domain_name': project_domain_name}
|
||||
# 'insecure' keyword not supported by all v2.0 keystone clients
|
||||
# this ensures it's only passed in when defined
|
||||
if insecure:
|
||||
@ -159,14 +165,23 @@ def auth(profile=None, **connection_args):
|
||||
'''
|
||||
kwargs = _get_kwargs(profile=profile, **connection_args)
|
||||
|
||||
if float(api_version(profile=profile, **connection_args).strip('v')) >= 3:
|
||||
disc = discover.Discover(auth_url=kwargs['auth_url'])
|
||||
v2_auth_url = disc.url_for('v2.0')
|
||||
v3_auth_url = disc.url_for('v3.0')
|
||||
if v3_auth_url:
|
||||
global _OS_IDENTITY_API_VERSION
|
||||
global _TENANTS
|
||||
_OS_IDENTITY_API_VERSION = 3
|
||||
_TENANTS = 'projects'
|
||||
return client3.Client(**kwargs)
|
||||
kwargs['auth_url'] = v3_auth_url
|
||||
else:
|
||||
return client.Client(**kwargs)
|
||||
kwargs['auth_url'] = v2_auth_url
|
||||
kwargs.pop('user_domain_name')
|
||||
kwargs.pop('project_domain_name')
|
||||
auth = generic.Password(**kwargs)
|
||||
sess = session.Session(auth=auth)
|
||||
ks_cl = disc.create_client(session=sess)
|
||||
return ks_cl
|
||||
|
||||
|
||||
def ec2_credentials_create(user_id=None, name=None,
|
||||
|
@ -155,6 +155,7 @@ def _config_logic(napalm_device,
|
||||
loaded_result['diff'] = None
|
||||
loaded_result['result'] = False
|
||||
loaded_result['comment'] = _compare.get('comment')
|
||||
__context__['retcode'] = 1
|
||||
return loaded_result
|
||||
|
||||
_loaded_res = loaded_result.get('result', False)
|
||||
@ -174,12 +175,15 @@ def _config_logic(napalm_device,
|
||||
# make sure it notifies
|
||||
# that something went wrong
|
||||
_explicit_close(napalm_device)
|
||||
__context__['retcode'] = 1
|
||||
return loaded_result
|
||||
|
||||
loaded_result['comment'] += 'Configuration discarded.'
|
||||
# loaded_result['result'] = False not necessary
|
||||
# as the result can be true when test=True
|
||||
_explicit_close(napalm_device)
|
||||
if not loaded_result['result']:
|
||||
__context__['retcode'] = 1
|
||||
return loaded_result
|
||||
|
||||
if not test and commit_config:
|
||||
@ -210,10 +214,13 @@ def _config_logic(napalm_device,
|
||||
loaded_result['result'] = False
|
||||
# notify if anything goes wrong
|
||||
_explicit_close(napalm_device)
|
||||
__context__['retcode'] = 1
|
||||
return loaded_result
|
||||
loaded_result['already_configured'] = True
|
||||
loaded_result['comment'] = 'Already configured.'
|
||||
_explicit_close(napalm_device)
|
||||
if not loaded_result['result']:
|
||||
__context__['retcode'] = 1
|
||||
return loaded_result
|
||||
|
||||
|
||||
|
@ -132,7 +132,7 @@ def version(*names, **kwargs):
|
||||
return __salt__['pkg_resource.version'](*names, **kwargs)
|
||||
|
||||
|
||||
def refresh_db(failhard=False):
|
||||
def refresh_db(failhard=False, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Updates the opkg database to latest packages based upon repositories
|
||||
|
||||
@ -514,7 +514,7 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
||||
return remove(name=name, pkgs=pkgs)
|
||||
|
||||
|
||||
def upgrade(refresh=True):
|
||||
def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Upgrades all packages via ``opkg upgrade``
|
||||
|
||||
@ -803,7 +803,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def list_upgrades(refresh=True):
|
||||
def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List all available package upgrades.
|
||||
|
||||
@ -976,7 +976,7 @@ def info_installed(*names, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def upgrade_available(name):
|
||||
def upgrade_available(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Check whether or not an upgrade is available for a given package
|
||||
|
||||
@ -989,7 +989,7 @@ def upgrade_available(name):
|
||||
return latest_version(name) != ''
|
||||
|
||||
|
||||
def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
||||
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
||||
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
||||
@ -1038,7 +1038,7 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
||||
return None
|
||||
|
||||
|
||||
def list_repos():
|
||||
def list_repos(**kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Lists all repos on /etc/opkg/*.conf
|
||||
|
||||
@ -1075,7 +1075,7 @@ def list_repos():
|
||||
return repos
|
||||
|
||||
|
||||
def get_repo(alias):
|
||||
def get_repo(alias, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Display a repo from the /etc/opkg/*.conf
|
||||
|
||||
@ -1146,7 +1146,7 @@ def _mod_repo_in_file(alias, repostr, filepath):
|
||||
fhandle.writelines(output)
|
||||
|
||||
|
||||
def del_repo(alias):
|
||||
def del_repo(alias, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Delete a repo from /etc/opkg/*.conf
|
||||
|
||||
@ -1260,7 +1260,7 @@ def mod_repo(alias, **kwargs):
|
||||
refresh_db()
|
||||
|
||||
|
||||
def file_list(*packages):
|
||||
def file_list(*packages, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List the files that belong to a package. Not specifying any packages will
|
||||
return a list of _every_ file on the system's package database (not
|
||||
@ -1281,7 +1281,7 @@ def file_list(*packages):
|
||||
return {'errors': output['errors'], 'files': files}
|
||||
|
||||
|
||||
def file_dict(*packages):
|
||||
def file_dict(*packages, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List the files that belong to a package, grouped by package. Not
|
||||
specifying any packages will return a list of _every_ file on the system's
|
||||
@ -1323,7 +1323,7 @@ def file_dict(*packages):
|
||||
return {'errors': errors, 'packages': ret}
|
||||
|
||||
|
||||
def owner(*paths):
|
||||
def owner(*paths, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Return the name of the package that owns the file. Multiple file paths can
|
||||
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
|
||||
|
98
salt/modules/opsgenie.py
Normal file
98
salt/modules/opsgenie.py
Normal file
@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module for sending data to OpsGenie
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:configuration: This module can be used in Reactor System for
|
||||
posting data to OpsGenie as a remote-execution function.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
opsgenie_event_poster:
|
||||
local.opsgenie.post_data:
|
||||
- tgt: 'salt-minion'
|
||||
- kwarg:
|
||||
name: event.reactor
|
||||
api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
reason: {{ data['data']['reason'] }}
|
||||
action_type: Create
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
|
||||
API_ENDPOINT = "https://api.opsgenie.com/v1/json/saltstack?apiKey="
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
|
||||
action_type=None):
|
||||
'''
|
||||
Post data to OpsGenie. It's designed for Salt's Event Reactor.
|
||||
|
||||
After configuring the sls reaction file as shown above, you can trigger the
|
||||
module with your designated tag (og-tag in this case).
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}'
|
||||
|
||||
Required parameters:
|
||||
|
||||
api_key
|
||||
It's the API Key you've copied while adding integration in OpsGenie.
|
||||
|
||||
reason
|
||||
It will be used as alert's default message in OpsGenie.
|
||||
|
||||
action_type
|
||||
OpsGenie supports the default values Create/Close for action_type. You
|
||||
can customize this field with OpsGenie's custom actions for other
|
||||
purposes like adding notes or acknowledging alerts.
|
||||
|
||||
Optional parameters:
|
||||
|
||||
name
|
||||
It will be used as alert's alias. If you want to use the close
|
||||
functionality you must provide name field for both states like in
|
||||
this case.
|
||||
'''
|
||||
if api_key is None or reason is None or action_type is None:
|
||||
raise salt.exceptions.SaltInvocationError(
|
||||
'API Key or Reason or Action Type cannot be None.')
|
||||
|
||||
data = dict()
|
||||
data['name'] = name
|
||||
data['reason'] = reason
|
||||
data['actionType'] = action_type
|
||||
data['cpuModel'] = __grains__['cpu_model']
|
||||
data['cpuArch'] = __grains__['cpuarch']
|
||||
data['fqdn'] = __grains__['fqdn']
|
||||
data['host'] = __grains__['host']
|
||||
data['id'] = __grains__['id']
|
||||
data['kernel'] = __grains__['kernel']
|
||||
data['kernelRelease'] = __grains__['kernelrelease']
|
||||
data['master'] = __grains__['master']
|
||||
data['os'] = __grains__['os']
|
||||
data['saltPath'] = __grains__['saltpath']
|
||||
data['saltVersion'] = __grains__['saltversion']
|
||||
data['username'] = __grains__['username']
|
||||
data['uuid'] = __grains__['uuid']
|
||||
|
||||
log.debug('Below data will be posted:\n' + str(data))
|
||||
log.debug('API Key:' + api_key + '\t API Endpoint:' + API_ENDPOINT)
|
||||
|
||||
response = requests.post(url=API_ENDPOINT + api_key, data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
return response.status_code, response.text
|
@ -68,10 +68,10 @@ def _get_job_results(query=None):
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
# If the response contains a job, we will wait for the results
|
||||
if 'job' in response:
|
||||
jid = response['job']
|
||||
if 'result' in response and 'job' in response['result']:
|
||||
jid = response['result']['job']
|
||||
|
||||
while get_job(jid)['job']['status'] != 'FIN':
|
||||
while get_job(jid)['result']['job']['status'] != 'FIN':
|
||||
time.sleep(5)
|
||||
|
||||
return get_job(jid)
|
||||
@ -321,6 +321,56 @@ def fetch_license(auth_code=None):
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_address(address=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified get_address object. This will not return address objects that are
|
||||
marked as pre-defined objects.
|
||||
|
||||
address(str): The name of the address object.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_address myhost
|
||||
salt '*' panos.get_address myhost 3
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'address/entry[@name=\'{1}\']'.format(vsys, address)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_address_group(addressgroup=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified address group. This will not return address groups that are
|
||||
marked as pre-defined objects.
|
||||
|
||||
addressgroup(str): The name of the address group.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_address_group foobar
|
||||
salt '*' panos.get_address_group foobar 3
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'address-group/entry[@name=\'{1}\']'.format(vsys, addressgroup)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_admins_active():
|
||||
'''
|
||||
Show active administrators.
|
||||
@ -588,7 +638,7 @@ def get_hostname():
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/hostname'}
|
||||
|
||||
return __proxy__['panos.call'](query)['hostname']
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_interface_counters(name='all'):
|
||||
@ -930,9 +980,30 @@ def get_platform():
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_predefined_application(application=None):
|
||||
'''
|
||||
Get the configuration for the specified pre-defined application object. This will only return pre-defined
|
||||
application objects.
|
||||
|
||||
application(str): The name of the pre-defined application object.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_predefined_application saltstack
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/predefined/application/entry[@name=\'{0}\']'.format(application)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_security_rule(rulename=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified rule.
|
||||
Get the candidate configuration for the specified security rule.
|
||||
|
||||
rulename(str): The name of the security rule.
|
||||
|
||||
@ -954,6 +1025,56 @@ def get_security_rule(rulename=None, vsys='1'):
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_service(service=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified service object. This will not return services that are marked
|
||||
as pre-defined objects.
|
||||
|
||||
service(str): The name of the service object.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_service tcp-443
|
||||
salt '*' panos.get_service tcp-443 3
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'service/entry[@name=\'{1}\']'.format(vsys, service)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_service_group(servicegroup=None, vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified service group. This will not return service groups that are
|
||||
marked as pre-defined objects.
|
||||
|
||||
servicegroup(str): The name of the service group.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_service_group foobar
|
||||
salt '*' panos.get_service_group foobar 3
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'service-group/entry[@name=\'{1}\']'.format(vsys, servicegroup)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_session_info():
|
||||
'''
|
||||
Show device session statistics.
|
||||
@ -1069,11 +1190,11 @@ def get_system_services():
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_system_state(filter=None):
|
||||
def get_system_state(mask=None):
|
||||
'''
|
||||
Show the system state variables.
|
||||
|
||||
filter
|
||||
mask
|
||||
Filters by a subtree or a wildcard.
|
||||
|
||||
CLI Example:
|
||||
@ -1081,13 +1202,13 @@ def get_system_state(filter=None):
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_system_state
|
||||
salt '*' panos.get_system_state filter=cfg.ha.config.enabled
|
||||
salt '*' panos.get_system_state filter=cfg.ha.*
|
||||
salt '*' panos.get_system_state mask=cfg.ha.config.enabled
|
||||
salt '*' panos.get_system_state mask=cfg.ha.*
|
||||
|
||||
'''
|
||||
if filter:
|
||||
if mask:
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><system><state><filter>{0}</filter></state></system></show>'.format(filter)}
|
||||
'cmd': '<show><system><state><filter>{0}</filter></state></system></show>'.format(mask)}
|
||||
else:
|
||||
query = {'type': 'op', 'cmd': '<show><system><state></state></system></show>'}
|
||||
|
||||
@ -1097,6 +1218,7 @@ def get_system_state(filter=None):
|
||||
def get_uncommitted_changes():
|
||||
'''
|
||||
Retrieve a list of all uncommitted changes on the device.
|
||||
Requires PANOS version 8.0.0 or greater.
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -1105,6 +1227,10 @@ def get_uncommitted_changes():
|
||||
salt '*' panos.get_uncommitted_changes
|
||||
|
||||
'''
|
||||
_required_version = '8.0.0'
|
||||
if not __proxy__['panos.is_required_version'](_required_version):
|
||||
return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version)
|
||||
|
||||
query = {'type': 'op',
|
||||
'cmd': '<show><config><list><changes></changes></list></config></show>'}
|
||||
|
||||
@ -1145,6 +1271,72 @@ def get_vlans():
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_xpath(xpath=''):
|
||||
'''
|
||||
Retrieve a specified xpath from the candidate configuration.
|
||||
|
||||
xpath(str): The specified xpath in the candidate configuration.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_xpath /config/shared/service
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': xpath}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_zone(zone='', vsys='1'):
|
||||
'''
|
||||
Get the candidate configuration for the specified zone.
|
||||
|
||||
zone(str): The name of the zone.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_zone trust
|
||||
salt '*' panos.get_zone trust 2
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'zone/entry[@name=\'{1}\']'.format(vsys, zone)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def get_zones(vsys='1'):
|
||||
'''
|
||||
Get all the zones in the candidate configuration.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' panos.get_zones
|
||||
salt '*' panos.get_zones 2
|
||||
|
||||
'''
|
||||
query = {'type': 'config',
|
||||
'action': 'get',
|
||||
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/'
|
||||
'zone'.format(vsys)}
|
||||
|
||||
return __proxy__['panos.call'](query)
|
||||
|
||||
|
||||
def install_antivirus(version=None, latest=False, synch=False, skip_commit=False,):
|
||||
'''
|
||||
Install anti-virus packages.
|
||||
|
@ -93,7 +93,7 @@ def _validate_partition_boundary(boundary):
|
||||
'''
|
||||
try:
|
||||
for unit in VALID_UNITS:
|
||||
if boundary.endswith(unit):
|
||||
if str(boundary).endswith(unit):
|
||||
return
|
||||
int(boundary)
|
||||
except Exception:
|
||||
|
@ -9,7 +9,7 @@ Module to provide redis functionality to Salt
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
redis.host: 'localhost'
|
||||
redis.host: 'salt'
|
||||
redis.port: 6379
|
||||
redis.db: 0
|
||||
redis.password: None
|
||||
|
@ -2,6 +2,8 @@
|
||||
'''
|
||||
Module for controlling the LED matrix or reading environment data on the SenseHat of a Raspberry Pi.
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
:maintainer: Benedikt Werner <1benediktwerner@gmail.com>, Joachim Werner <joe@suse.com>
|
||||
:maturity: new
|
||||
:depends: sense_hat Python module
|
||||
|
@ -24,6 +24,7 @@ import salt.utils.decorators.path
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
import salt.utils.versions
|
||||
from salt.exceptions import (
|
||||
SaltInvocationError,
|
||||
CommandExecutionError,
|
||||
@ -794,6 +795,22 @@ def set_auth_key(
|
||||
return 'new'
|
||||
|
||||
|
||||
def _get_matched_host_line_numbers(lines, enc):
|
||||
'''
|
||||
Helper function which parses ssh-keygen -F function output and yield line
|
||||
number of known_hosts entries with encryption key type matching enc,
|
||||
one by one.
|
||||
'''
|
||||
enc = enc if enc else "rsa"
|
||||
for i, line in enumerate(lines):
|
||||
if i % 2 == 0:
|
||||
line_no = int(line.strip().split()[-1])
|
||||
line_enc = lines[i + 1].strip().split()[-2]
|
||||
if line_enc != enc:
|
||||
continue
|
||||
yield line_no
|
||||
|
||||
|
||||
def _parse_openssh_output(lines, fingerprint_hash_type=None):
|
||||
'''
|
||||
Helper function which parses ssh-keygen -F and ssh-keyscan function output
|
||||
@ -830,12 +847,42 @@ def get_known_host(user,
|
||||
Return information about known host from the configfile, if any.
|
||||
If there is no such key, return None.
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' ssh.get_known_host <user> <hostname>
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'\'get_known_host\' has been deprecated in favour of '
|
||||
'\'get_known_host_entries\'. \'get_known_host\' will be '
|
||||
'removed in Salt Neon.'
|
||||
)
|
||||
known_hosts = get_known_host_entries(user, hostname, config, port, fingerprint_hash_type)
|
||||
return known_hosts[0] if known_hosts else None
|
||||
|
||||
|
||||
@salt.utils.decorators.path.which('ssh-keygen')
|
||||
def get_known_host_entries(user,
|
||||
hostname,
|
||||
config=None,
|
||||
port=None,
|
||||
fingerprint_hash_type=None):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Return information about known host entries from the configfile, if any.
|
||||
If there are no entries for a matching hostname, return None.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' ssh.get_known_host_entries <user> <hostname>
|
||||
'''
|
||||
full = _get_known_hosts_file(config=config, user=user)
|
||||
|
||||
if isinstance(full, dict):
|
||||
@ -846,11 +893,11 @@ def get_known_host(user,
|
||||
lines = __salt__['cmd.run'](cmd,
|
||||
ignore_retcode=True,
|
||||
python_shell=False).splitlines()
|
||||
known_hosts = list(
|
||||
known_host_entries = list(
|
||||
_parse_openssh_output(lines,
|
||||
fingerprint_hash_type=fingerprint_hash_type)
|
||||
)
|
||||
return known_hosts[0] if known_hosts else None
|
||||
return known_host_entries if known_host_entries else None
|
||||
|
||||
|
||||
@salt.utils.decorators.path.which('ssh-keyscan')
|
||||
@ -863,6 +910,8 @@ def recv_known_host(hostname,
|
||||
'''
|
||||
Retrieve information about host public key from remote server
|
||||
|
||||
.. deprecated:: Oxygen
|
||||
|
||||
hostname
|
||||
The name of the remote host (e.g. "github.com")
|
||||
|
||||
@ -871,9 +920,8 @@ def recv_known_host(hostname,
|
||||
or ssh-dss
|
||||
|
||||
port
|
||||
optional parameter, denoting the port of the remote host, which will be
|
||||
used in case, if the public key will be requested from it. By default
|
||||
the port 22 is used.
|
||||
Optional parameter, denoting the port of the remote host on which an
|
||||
SSH daemon is running. By default the port 22 is used.
|
||||
|
||||
hash_known_hosts : True
|
||||
Hash all hostnames and addresses in the known hosts file.
|
||||
@ -887,8 +935,8 @@ def recv_known_host(hostname,
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
fingerprint_hash_type
|
||||
The public key fingerprint hash type that the public key fingerprint
|
||||
was originally hashed with. This defaults to ``sha256`` if not specified.
|
||||
The fingerprint hash type that the public key fingerprints were
|
||||
originally hashed with. This defaults to ``sha256`` if not specified.
|
||||
|
||||
.. versionadded:: 2016.11.4
|
||||
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
|
||||
@ -899,6 +947,61 @@ def recv_known_host(hostname,
|
||||
|
||||
salt '*' ssh.recv_known_host <hostname> enc=<enc> port=<port>
|
||||
'''
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'\'recv_known_host\' has been deprecated in favour of '
|
||||
'\'recv_known_host_entries\'. \'recv_known_host\' will be '
|
||||
'removed in Salt Neon.'
|
||||
)
|
||||
known_hosts = recv_known_host_entries(hostname, enc, port, hash_known_hosts, timeout, fingerprint_hash_type)
|
||||
return known_hosts[0] if known_hosts else None
|
||||
|
||||
|
||||
@salt.utils.decorators.path.which('ssh-keyscan')
|
||||
def recv_known_host_entries(hostname,
|
||||
enc=None,
|
||||
port=None,
|
||||
hash_known_hosts=True,
|
||||
timeout=5,
|
||||
fingerprint_hash_type=None):
|
||||
'''
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Retrieve information about host public keys from remote server
|
||||
|
||||
hostname
|
||||
The name of the remote host (e.g. "github.com")
|
||||
|
||||
enc
|
||||
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
|
||||
or ssh-dss
|
||||
|
||||
port
|
||||
Optional parameter, denoting the port of the remote host on which an
|
||||
SSH daemon is running. By default the port 22 is used.
|
||||
|
||||
hash_known_hosts : True
|
||||
Hash all hostnames and addresses in the known hosts file.
|
||||
|
||||
timeout : int
|
||||
Set the timeout for connection attempts. If ``timeout`` seconds have
|
||||
elapsed since a connection was initiated to a host or since the last
|
||||
time anything was read from that host, then the connection is closed
|
||||
and the host in question considered unavailable. Default is 5 seconds.
|
||||
|
||||
fingerprint_hash_type
|
||||
The fingerprint hash type that the public key fingerprints were
|
||||
originally hashed with. This defaults to ``sha256`` if not specified.
|
||||
|
||||
.. versionadded:: 2016.11.4
|
||||
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' ssh.recv_known_host_entries <hostname> enc=<enc> port=<port>
|
||||
'''
|
||||
# The following list of OSes have an old version of openssh-clients
|
||||
# and thus require the '-t' option for ssh-keyscan
|
||||
need_dash_t = ('CentOS-5',)
|
||||
@ -919,9 +1022,9 @@ def recv_known_host(hostname,
|
||||
while not lines and attempts > 0:
|
||||
attempts = attempts - 1
|
||||
lines = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
|
||||
known_hosts = list(_parse_openssh_output(lines,
|
||||
known_host_entries = list(_parse_openssh_output(lines,
|
||||
fingerprint_hash_type=fingerprint_hash_type))
|
||||
return known_hosts[0] if known_hosts else None
|
||||
return known_host_entries if known_host_entries else None
|
||||
|
||||
|
||||
def check_known_host(user=None, hostname=None, key=None, fingerprint=None,
|
||||
@ -952,18 +1055,20 @@ def check_known_host(user=None, hostname=None, key=None, fingerprint=None,
|
||||
else:
|
||||
config = config or '.ssh/known_hosts'
|
||||
|
||||
known_host = get_known_host(user,
|
||||
known_host_entries = get_known_host_entries(user,
|
||||
hostname,
|
||||
config=config,
|
||||
port=port,
|
||||
fingerprint_hash_type=fingerprint_hash_type)
|
||||
known_keys = [h['key'] for h in known_host_entries] if known_host_entries else []
|
||||
known_fingerprints = [h['fingerprint'] for h in known_host_entries] if known_host_entries else []
|
||||
|
||||
if not known_host or 'fingerprint' not in known_host:
|
||||
if not known_host_entries:
|
||||
return 'add'
|
||||
if key:
|
||||
return 'exists' if key == known_host['key'] else 'update'
|
||||
return 'exists' if key in known_keys else 'update'
|
||||
elif fingerprint:
|
||||
return ('exists' if fingerprint == known_host['fingerprint']
|
||||
return ('exists' if fingerprint in known_fingerprints
|
||||
else 'update')
|
||||
else:
|
||||
return 'exists'
|
||||
@ -1083,70 +1188,99 @@ def set_known_host(user=None,
|
||||
|
||||
update_required = False
|
||||
check_required = False
|
||||
stored_host = get_known_host(user,
|
||||
stored_host_entries = get_known_host_entries(user,
|
||||
hostname,
|
||||
config=config,
|
||||
port=port,
|
||||
fingerprint_hash_type=fingerprint_hash_type)
|
||||
stored_keys = [h['key'] for h in stored_host_entries] if stored_host_entries else []
|
||||
stored_fingerprints = [h['fingerprint'] for h in stored_host_entries] if stored_host_entries else []
|
||||
|
||||
if not stored_host:
|
||||
if not stored_host_entries:
|
||||
update_required = True
|
||||
elif fingerprint and fingerprint != stored_host['fingerprint']:
|
||||
elif fingerprint and fingerprint not in stored_fingerprints:
|
||||
update_required = True
|
||||
elif key and key != stored_host['key']:
|
||||
elif key and key not in stored_keys:
|
||||
update_required = True
|
||||
elif key != stored_host['key']:
|
||||
elif key is None and fingerprint is None:
|
||||
check_required = True
|
||||
|
||||
if not update_required and not check_required:
|
||||
return {'status': 'exists', 'key': stored_host['key']}
|
||||
return {'status': 'exists', 'keys': stored_keys}
|
||||
|
||||
if not key:
|
||||
remote_host = recv_known_host(hostname,
|
||||
remote_host_entries = recv_known_host_entries(hostname,
|
||||
enc=enc,
|
||||
port=port,
|
||||
hash_known_hosts=hash_known_hosts,
|
||||
timeout=timeout,
|
||||
fingerprint_hash_type=fingerprint_hash_type)
|
||||
if not remote_host:
|
||||
known_keys = [h['key'] for h in remote_host_entries] if remote_host_entries else []
|
||||
known_fingerprints = [h['fingerprint'] for h in remote_host_entries] if remote_host_entries else []
|
||||
if not remote_host_entries:
|
||||
return {'status': 'error',
|
||||
'error': 'Unable to receive remote host key'}
|
||||
'error': 'Unable to receive remote host keys'}
|
||||
|
||||
if fingerprint and fingerprint != remote_host['fingerprint']:
|
||||
if fingerprint and fingerprint not in known_fingerprints:
|
||||
return {'status': 'error',
|
||||
'error': ('Remote host public key found but its fingerprint '
|
||||
'does not match one you have provided')}
|
||||
'error': ('Remote host public keys found but none of their'
|
||||
'fingerprints match the one you have provided')}
|
||||
|
||||
if check_required:
|
||||
if remote_host['key'] == stored_host['key']:
|
||||
return {'status': 'exists', 'key': stored_host['key']}
|
||||
for key in known_keys:
|
||||
if key in stored_keys:
|
||||
return {'status': 'exists', 'keys': stored_keys}
|
||||
|
||||
full = _get_known_hosts_file(config=config, user=user)
|
||||
|
||||
if isinstance(full, dict):
|
||||
return full
|
||||
|
||||
# Get information about the known_hosts file before rm_known_host()
|
||||
# because it will create a new file with mode 0600
|
||||
orig_known_hosts_st = None
|
||||
try:
|
||||
orig_known_hosts_st = os.stat(full)
|
||||
except OSError as exc:
|
||||
if exc.args[1] == 'No such file or directory':
|
||||
log.debug('{0} doesnt exist. Nothing to preserve.'.format(full))
|
||||
if os.path.isfile(full):
|
||||
origmode = os.stat(full).st_mode
|
||||
|
||||
# remove everything we had in the config so far
|
||||
rm_known_host(user, hostname, config=config)
|
||||
# remove existing known_host entry with matching hostname and encryption key type
|
||||
# use ssh-keygen -F to find the specific line(s) for this host + enc combo
|
||||
ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port)
|
||||
cmd = ['ssh-keygen', '-F', ssh_hostname, '-f', full]
|
||||
lines = __salt__['cmd.run'](cmd,
|
||||
ignore_retcode=True,
|
||||
python_shell=False).splitlines()
|
||||
remove_lines = list(
|
||||
_get_matched_host_line_numbers(lines, enc)
|
||||
)
|
||||
|
||||
if remove_lines:
|
||||
try:
|
||||
with salt.utils.files.fopen(full, 'r+') as ofile:
|
||||
known_hosts_lines = list(ofile)
|
||||
# Delete from last line to first to avoid invalidating earlier indexes
|
||||
for line_no in sorted(remove_lines, reverse=True):
|
||||
del known_hosts_lines[line_no - 1]
|
||||
# Write out changed known_hosts file
|
||||
ofile.seek(0)
|
||||
ofile.truncate()
|
||||
for line in known_hosts_lines:
|
||||
ofile.write(line)
|
||||
except (IOError, OSError) as exception:
|
||||
raise CommandExecutionError(
|
||||
"Couldn't remove old entry(ies) from known hosts file: '{0}'".format(exception)
|
||||
)
|
||||
else:
|
||||
origmode = None
|
||||
|
||||
# set up new value
|
||||
if key:
|
||||
remote_host = {'hostname': hostname, 'enc': enc, 'key': key}
|
||||
remote_host_entries = [{'hostname': hostname, 'enc': enc, 'key': key}]
|
||||
|
||||
if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in remote_host['hostname']:
|
||||
line = '{hostname} {enc} {key}\n'.format(**remote_host)
|
||||
else:
|
||||
remote_host['port'] = port
|
||||
line = '[{hostname}]:{port} {enc} {key}\n'.format(**remote_host)
|
||||
lines = []
|
||||
for entry in remote_host_entries:
|
||||
if hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ':' in entry['hostname']:
|
||||
line = '{hostname} {enc} {key}\n'.format(**entry)
|
||||
else:
|
||||
entry['port'] = port
|
||||
line = '[{hostname}]:{port} {enc} {key}\n'.format(**entry)
|
||||
lines.append(line)
|
||||
|
||||
# ensure ~/.ssh exists
|
||||
ssh_dir = os.path.dirname(full)
|
||||
@ -1172,27 +1306,25 @@ def set_known_host(user=None,
|
||||
# write line to known_hosts file
|
||||
try:
|
||||
with salt.utils.files.fopen(full, 'a') as ofile:
|
||||
ofile.write(line)
|
||||
for line in lines:
|
||||
ofile.write(line)
|
||||
except (IOError, OSError) as exception:
|
||||
raise CommandExecutionError(
|
||||
"Couldn't append to known hosts file: '{0}'".format(exception)
|
||||
)
|
||||
|
||||
if os.geteuid() == 0:
|
||||
if user:
|
||||
os.chown(full, uinfo['uid'], uinfo['gid'])
|
||||
elif orig_known_hosts_st:
|
||||
os.chown(full, orig_known_hosts_st.st_uid, orig_known_hosts_st.st_gid)
|
||||
|
||||
if orig_known_hosts_st:
|
||||
os.chmod(full, orig_known_hosts_st.st_mode)
|
||||
if os.geteuid() == 0 and user:
|
||||
os.chown(full, uinfo['uid'], uinfo['gid'])
|
||||
if origmode:
|
||||
os.chmod(full, origmode)
|
||||
else:
|
||||
os.chmod(full, 0o600)
|
||||
|
||||
if key and hash_known_hosts:
|
||||
cmd_result = __salt__['ssh.hash_known_hosts'](user=user, config=full)
|
||||
|
||||
return {'status': 'updated', 'old': stored_host, 'new': remote_host}
|
||||
rval = {'status': 'updated', 'old': stored_host_entries, 'new': remote_host_entries}
|
||||
return rval
|
||||
|
||||
|
||||
def user_keys(user=None, pubfile=None, prvfile=None):
|
||||
|
@ -894,8 +894,8 @@ def highstate(test=None, queue=False, **kwargs):
|
||||
finally:
|
||||
st_.pop_active()
|
||||
|
||||
if __salt__['config.option']('state_data', '') == 'terse' or \
|
||||
kwargs.get('terse'):
|
||||
if isinstance(ret, dict) and (__salt__['config.option']('state_data', '') == 'terse' or
|
||||
kwargs.get('terse')):
|
||||
ret = _filter_running(ret)
|
||||
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
@ -923,8 +923,9 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
salt '*' state.apply test pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source.
|
||||
Values passed this way will override existing Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source. Pillar values that
|
||||
are not included in the kwarg will not be overwritten.
|
||||
|
||||
.. versionchanged:: 2016.3.0
|
||||
GPG-encrypted CLI Pillar data is now supported via the GPG
|
||||
@ -1379,6 +1380,20 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
:conf_minion:`pillarenv` minion config option nor this CLI argument is
|
||||
used, all Pillar environments will be merged together.
|
||||
|
||||
pillar
|
||||
Custom Pillar values, passed as a dictionary of key-value pairs
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override existing Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source. Pillar values that
|
||||
are not included in the kwarg will not be overwritten.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1399,12 +1414,26 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
|
@ -596,7 +596,7 @@ def set_computer_name(hostname):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' system.set_conputer_name master.saltstack.com
|
||||
salt '*' system.set_computer_name master.saltstack.com
|
||||
'''
|
||||
return __salt__['network.mod_hostname'](hostname)
|
||||
|
||||
|
@ -55,7 +55,6 @@ def __virtual__():
|
||||
'''
|
||||
run Vagrant commands if possible
|
||||
'''
|
||||
# noinspection PyUnresolvedReferences
|
||||
if salt.utils.path.which('vagrant') is None:
|
||||
return False, 'The vagrant module could not be loaded: vagrant command not found'
|
||||
return __virtualname__
|
||||
@ -298,6 +297,11 @@ def vm_state(name='', cwd=None):
|
||||
'provider': _, # the Vagrant VM provider
|
||||
'name': _} # salt_id name
|
||||
|
||||
Known bug: if there are multiple machines in your Vagrantfile, and you request
|
||||
the status of the ``primary`` machine, which you defined by leaving the ``machine``
|
||||
parameter blank, then you may receive the status of all of them.
|
||||
Please specify the actual machine name for each VM if there are more than one.
|
||||
|
||||
'''
|
||||
|
||||
if name:
|
||||
@ -321,7 +325,7 @@ def vm_state(name='', cwd=None):
|
||||
datum = {'machine': tokens[0],
|
||||
'state': ' '.join(tokens[1:-1]),
|
||||
'provider': tokens[-1].lstrip('(').rstrip(')'),
|
||||
'name': name or get_machine_id(tokens[0], cwd)
|
||||
'name': get_machine_id(tokens[0], cwd)
|
||||
}
|
||||
info.append(datum)
|
||||
except IndexError:
|
||||
@ -365,7 +369,7 @@ def init(name, # Salt_id for created VM
|
||||
# passed-in keyword arguments overwrite vm dictionary values
|
||||
vm_['cwd'] = cwd or vm_.get('cwd')
|
||||
if not vm_['cwd']:
|
||||
raise SaltInvocationError('Path to Vagrantfile must be defined by \'cwd\' argument')
|
||||
raise SaltInvocationError('Path to Vagrantfile must be defined by "cwd" argument')
|
||||
vm_['machine'] = machine or vm_.get('machine', machine)
|
||||
vm_['runas'] = runas or vm_.get('runas', runas)
|
||||
vm_['vagrant_provider'] = vagrant_provider or vm_.get('vagrant_provider', '')
|
||||
@ -423,7 +427,7 @@ def shutdown(name):
|
||||
'''
|
||||
Send a soft shutdown (vagrant halt) signal to the named vm.
|
||||
|
||||
This does the same thing as vagrant.stop. Other VM control
|
||||
This does the same thing as vagrant.stop. Other-VM control
|
||||
modules use "stop" and "shutdown" to differentiate between
|
||||
hard and soft shutdowns.
|
||||
|
||||
@ -476,7 +480,7 @@ def pause(name):
|
||||
return ret == 0
|
||||
|
||||
|
||||
def reboot(name):
|
||||
def reboot(name, provision=False):
|
||||
'''
|
||||
Reboot a VM. (vagrant reload)
|
||||
|
||||
@ -484,12 +488,16 @@ def reboot(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt <host> vagrant.reboot <salt_id>
|
||||
salt <host> vagrant.reboot <salt_id> provision=True
|
||||
|
||||
:param name: The salt_id name you will use to control this VM
|
||||
:param provision: (False) also re-run the Vagrant provisioning scripts.
|
||||
'''
|
||||
vm_ = get_vm_info(name)
|
||||
machine = vm_['machine']
|
||||
prov = '--provision' if provision else ''
|
||||
|
||||
cmd = 'vagrant reload {}'.format(machine)
|
||||
cmd = 'vagrant reload {} {}'.format(machine, prov)
|
||||
ret = __salt__['cmd.retcode'](cmd,
|
||||
runas=vm_.get('runas'),
|
||||
cwd=vm_.get('cwd'))
|
||||
|
@ -21,6 +21,7 @@ Functions to interact with Hashicorp Vault.
|
||||
|
||||
vault:
|
||||
url: https://vault.service.domain:8200
|
||||
verify: /etc/ssl/certs/ca-certificates.crt
|
||||
auth:
|
||||
method: token
|
||||
token: 11111111-2222-3333-4444-555555555555
|
||||
@ -32,6 +33,12 @@ Functions to interact with Hashicorp Vault.
|
||||
url
|
||||
Url to your Vault installation. Required.
|
||||
|
||||
verify
|
||||
For details please see
|
||||
http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
auth
|
||||
Currently only token auth is supported. The token must be able to create
|
||||
tokens with the policies that should be assigned to minions. Required.
|
||||
|
@ -656,7 +656,7 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
|
||||
if key not in attributes or not attributes[key]:
|
||||
attributes[key] = value
|
||||
|
||||
def _assign_mac(attributes):
|
||||
def _assign_mac(attributes, hypervisor):
|
||||
dmac = kwargs.get('dmac', None)
|
||||
if dmac is not None:
|
||||
log.debug('DMAC address is {0}'.format(dmac))
|
||||
@ -666,11 +666,15 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
|
||||
msg = 'Malformed MAC address: {0}'.format(dmac)
|
||||
raise CommandExecutionError(msg)
|
||||
else:
|
||||
attributes['mac'] = salt.utils.network.gen_mac()
|
||||
if hypervisor in ['qemu', 'kvm']:
|
||||
attributes['mac'] = salt.utils.network.gen_mac(
|
||||
prefix='52:54:00')
|
||||
else:
|
||||
attributes['mac'] = salt.utils.network.gen_mac()
|
||||
|
||||
for interface in interfaces:
|
||||
_normalize_net_types(interface)
|
||||
_assign_mac(interface)
|
||||
_assign_mac(interface, hypervisor)
|
||||
if hypervisor in overlays:
|
||||
_apply_default_overlay(interface)
|
||||
|
||||
|
@ -36,6 +36,60 @@ def __virtual__():
|
||||
return (False, "Module win_groupadd: module only works on Windows systems")
|
||||
|
||||
|
||||
def _get_computer_object():
|
||||
'''
|
||||
A helper function to get the object for the local machine
|
||||
|
||||
Returns:
|
||||
object: Returns the computer object for the local machine
|
||||
'''
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
return nt.GetObject('', 'WinNT://.,computer')
|
||||
|
||||
|
||||
def _get_group_object(name):
|
||||
'''
|
||||
A helper function to get a specified group object
|
||||
|
||||
Args:
|
||||
|
||||
name (str): The name of the object
|
||||
|
||||
Returns:
|
||||
object: The specified group object
|
||||
'''
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
return nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
|
||||
|
||||
def _get_all_groups():
|
||||
'''
|
||||
A helper function that gets a list of group objects for all groups on the
|
||||
machine
|
||||
|
||||
Returns:
|
||||
iter: A list of objects for all groups on the machine
|
||||
'''
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
results = nt.GetObject('', 'WinNT://.')
|
||||
results.Filter = ['group']
|
||||
return results
|
||||
|
||||
|
||||
def _get_username(member):
|
||||
'''
|
||||
Resolve the username from the member object returned from a group query
|
||||
|
||||
Returns:
|
||||
str: The username converted to domain\\username format
|
||||
'''
|
||||
return member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace')
|
||||
|
||||
|
||||
def add(name, **kwargs):
|
||||
'''
|
||||
Add the specified group
|
||||
@ -60,10 +114,8 @@ def add(name, **kwargs):
|
||||
'comment': ''}
|
||||
|
||||
if not info(name):
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
compObj = _get_computer_object()
|
||||
try:
|
||||
compObj = nt.GetObject('', 'WinNT://.,computer')
|
||||
newGroup = compObj.Create('group', name)
|
||||
newGroup.SetInfo()
|
||||
ret['changes'].append('Successfully created group {0}'.format(name))
|
||||
@ -104,10 +156,8 @@ def delete(name, **kwargs):
|
||||
'comment': ''}
|
||||
|
||||
if info(name):
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
compObj = _get_computer_object()
|
||||
try:
|
||||
compObj = nt.GetObject('', 'WinNT://.,computer')
|
||||
compObj.Delete('group', name)
|
||||
ret['changes'].append(('Successfully removed group {0}').format(name))
|
||||
except pywintypes.com_error as com_err:
|
||||
@ -144,17 +194,10 @@ def info(name):
|
||||
|
||||
salt '*' group.info foo
|
||||
'''
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
|
||||
try:
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
groupObj = _get_group_object(name)
|
||||
gr_name = groupObj.Name
|
||||
gr_mem = []
|
||||
for member in groupObj.members():
|
||||
gr_mem.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace'))
|
||||
gr_mem = [_get_username(x) for x in groupObj.members()]
|
||||
except pywintypes.com_error:
|
||||
return False
|
||||
|
||||
@ -193,20 +236,12 @@ def getent(refresh=False):
|
||||
|
||||
ret = []
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
results = _get_all_groups()
|
||||
|
||||
results = nt.GetObject('', 'WinNT://.')
|
||||
results.Filter = ['group']
|
||||
for result in results:
|
||||
member_list = []
|
||||
for member in result.members():
|
||||
member_list.append(
|
||||
member.AdsPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace'))
|
||||
group = {'gid': __salt__['file.group_to_gid'](result.name),
|
||||
'members': member_list,
|
||||
'name': result.name,
|
||||
group = {'gid': __salt__['file.group_to_gid'](result.Name),
|
||||
'members': [_get_username(x) for x in result.members()],
|
||||
'name': result.Name,
|
||||
'passwd': 'x'}
|
||||
ret.append(group)
|
||||
__context__['group.getent'] = ret
|
||||
@ -240,17 +275,21 @@ def adduser(name, username, **kwargs):
|
||||
'changes': {'Users Added': []},
|
||||
'comment': ''}
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
try:
|
||||
groupObj = _get_group_object(name)
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failure accessing group {0}. {1}' \
|
||||
''.format(name, friendly_error)
|
||||
return ret
|
||||
|
||||
existingMembers = [_get_username(x) for x in groupObj.members()]
|
||||
username = salt.utils.win_functions.get_sam_name(username)
|
||||
|
||||
try:
|
||||
if salt.utils.win_functions.get_sam_name(username) not in existingMembers:
|
||||
if username not in existingMembers:
|
||||
if not __opts__['test']:
|
||||
groupObj.Add('WinNT://' + username.replace('\\', '/'))
|
||||
|
||||
@ -299,14 +338,17 @@ def deluser(name, username, **kwargs):
|
||||
'changes': {'Users Removed': []},
|
||||
'comment': ''}
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
try:
|
||||
groupObj = _get_group_object(name)
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Failure accessing group {0}. {1}' \
|
||||
''.format(name, friendly_error)
|
||||
return ret
|
||||
|
||||
existingMembers = [_get_username(x) for x in groupObj.members()]
|
||||
|
||||
try:
|
||||
if salt.utils.win_functions.get_sam_name(username) in existingMembers:
|
||||
@ -365,10 +407,8 @@ def members(name, members_list, **kwargs):
|
||||
ret['comment'].append('Members is not a list object')
|
||||
return ret
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
try:
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
groupObj = _get_group_object(name)
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
@ -377,12 +417,7 @@ def members(name, members_list, **kwargs):
|
||||
'Failure accessing group {0}. {1}'
|
||||
).format(name, friendly_error))
|
||||
return ret
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
existingMembers = [_get_username(x) for x in groupObj.members()]
|
||||
existingMembers.sort()
|
||||
members_list.sort()
|
||||
|
||||
@ -448,18 +483,14 @@ def list_groups(refresh=False):
|
||||
salt '*' group.list_groups
|
||||
'''
|
||||
if 'group.list_groups' in __context__ and not refresh:
|
||||
return __context__['group.getent']
|
||||
return __context__['group.list_groups']
|
||||
|
||||
results = _get_all_groups()
|
||||
|
||||
ret = []
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
|
||||
results = nt.GetObject('', 'WinNT://.')
|
||||
results.Filter = ['group']
|
||||
|
||||
for result in results:
|
||||
ret.append(result.name)
|
||||
ret.append(result.Name)
|
||||
|
||||
__context__['group.list_groups'] = ret
|
||||
|
||||
|
@ -34,15 +34,18 @@ Current known limitations
|
||||
- pywin32 Python module
|
||||
- lxml
|
||||
- uuid
|
||||
- codecs
|
||||
- struct
|
||||
- salt.modules.reg
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import io
|
||||
import os
|
||||
import logging
|
||||
import re
|
||||
import locale
|
||||
import ctypes
|
||||
import time
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
@ -89,7 +92,6 @@ try:
|
||||
import win32net
|
||||
import win32security
|
||||
import uuid
|
||||
import codecs
|
||||
import lxml
|
||||
import struct
|
||||
from lxml import etree
|
||||
@ -116,6 +118,16 @@ try:
|
||||
ADMX_DISPLAYNAME_SEARCH_XPATH = etree.XPath('//*[local-name() = "policy" and @*[local-name() = "displayName"] = $display_name and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = $registry_class) ]')
|
||||
PRESENTATION_ANCESTOR_XPATH = etree.XPath('ancestor::*[local-name() = "presentation"]')
|
||||
TEXT_ELEMENT_XPATH = etree.XPath('.//*[local-name() = "text"]')
|
||||
# Get the System Install Language
|
||||
# https://msdn.microsoft.com/en-us/library/dd318123(VS.85).aspx
|
||||
# local.windows_locale is a dict
|
||||
# GetSystemDefaultUILanguage() returns a 4 digit language code that
|
||||
# corresponds to an entry in the dict
|
||||
# Not available in win32api, so we have to use ctypes
|
||||
# Default to `en-US` (1033)
|
||||
windll = ctypes.windll.kernel32
|
||||
INSTALL_LANGUAGE = locale.windows_locale.get(
|
||||
windll.GetSystemDefaultUILanguage(), 1033).replace('_', '-')
|
||||
except ImportError:
|
||||
HAS_WINDOWS_MODULES = False
|
||||
|
||||
@ -2708,7 +2720,8 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
||||
helper function to process all ADMX files in the specified policy_def_path
|
||||
and build a single XML doc that we can search/use for ADMX policy processing
|
||||
'''
|
||||
display_language_fallback = 'en-US'
|
||||
# Fallback to the System Install Language
|
||||
display_language_fallback = INSTALL_LANGUAGE
|
||||
t_policy_definitions = lxml.etree.Element('policyDefinitions')
|
||||
t_policy_definitions.append(lxml.etree.Element('categories'))
|
||||
t_policy_definitions.append(lxml.etree.Element('policies'))
|
||||
@ -2772,22 +2785,44 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
||||
temp_ns = policy_ns
|
||||
temp_ns = _updateNamespace(temp_ns, this_namespace)
|
||||
policydefs_policyns_xpath(t_policy_definitions)[0].append(temp_ns)
|
||||
adml_file = os.path.join(root, display_language, os.path.splitext(t_admfile)[0] + '.adml')
|
||||
|
||||
# We need to make sure the adml file exists. First we'll check
|
||||
# the passed display_language (eg: en-US). Then we'll try the
|
||||
# abbreviated version (en) to account for alternate locations.
|
||||
# We'll do the same for the display_language_fallback (en_US).
|
||||
adml_file = os.path.join(root, display_language,
|
||||
os.path.splitext(t_admfile)[0] + '.adml')
|
||||
if not __salt__['file.file_exists'](adml_file):
|
||||
msg = ('An ADML file in the specified ADML language "{0}" '
|
||||
'does not exist for the ADMX "{1}", the fallback '
|
||||
'language will be tried.')
|
||||
'does not exist for the ADMX "{1}", the abbreviated '
|
||||
'language code will be tried.')
|
||||
log.info(msg.format(display_language, t_admfile))
|
||||
adml_file = os.path.join(root,
|
||||
display_language_fallback,
|
||||
os.path.splitext(t_admfile)[0] + '.adml')
|
||||
|
||||
adml_file = os.path.join(root, display_language.split('-')[0],
|
||||
os.path.splitext(t_admfile)[0] + '.adml')
|
||||
if not __salt__['file.file_exists'](adml_file):
|
||||
msg = ('An ADML file in the specified ADML language '
|
||||
'"{0}" and the fallback language "{1}" do not '
|
||||
'exist for the ADMX "{2}".')
|
||||
raise SaltInvocationError(msg.format(display_language,
|
||||
display_language_fallback,
|
||||
t_admfile))
|
||||
msg = ('An ADML file in the specified ADML language code "{0}" '
|
||||
'does not exist for the ADMX "{1}", the fallback '
|
||||
'language will be tried.')
|
||||
log.info(msg.format(display_language[:2], t_admfile))
|
||||
|
||||
adml_file = os.path.join(root, display_language_fallback,
|
||||
os.path.splitext(t_admfile)[0] + '.adml')
|
||||
if not __salt__['file.file_exists'](adml_file):
|
||||
msg = ('An ADML file in the specified ADML fallback language "{0}" '
|
||||
'does not exist for the ADMX "{1}", the abbreviated'
|
||||
'fallback language code will be tried.')
|
||||
log.info(msg.format(display_language_fallback, t_admfile))
|
||||
|
||||
adml_file = os.path.join(root, display_language_fallback.split('-')[0],
|
||||
os.path.splitext(t_admfile)[0] + '.adml')
|
||||
if not __salt__['file.file_exists'](adml_file):
|
||||
msg = ('An ADML file in the specified ADML language '
|
||||
'"{0}" and the fallback language "{1}" do not '
|
||||
'exist for the ADMX "{2}".')
|
||||
raise SaltInvocationError(msg.format(display_language,
|
||||
display_language_fallback,
|
||||
t_admfile))
|
||||
try:
|
||||
xmltree = lxml.etree.parse(adml_file)
|
||||
except lxml.etree.XMLSyntaxError:
|
||||
@ -2795,8 +2830,8 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
||||
try:
|
||||
xmltree = _remove_unicode_encoding(adml_file)
|
||||
except Exception:
|
||||
msg = ('An error was found while processing adml file {0}, all policy'
|
||||
' languange data from this file will be unavailable via this module')
|
||||
msg = ('An error was found while processing adml file {0}, all policy '
|
||||
'language data from this file will be unavailable via this module')
|
||||
log.error(msg.format(adml_file))
|
||||
continue
|
||||
if None in namespaces:
|
||||
@ -2827,15 +2862,23 @@ def _findOptionValueInSeceditFile(option):
|
||||
'''
|
||||
try:
|
||||
_d = uuid.uuid4().hex
|
||||
_tfile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
|
||||
_tfile = '{0}\\{1}'.format(__opts__['cachedir'],
|
||||
'salt-secedit-dump-{0}.txt'.format(_d))
|
||||
_ret = __salt__['cmd.run']('secedit /export /cfg {0}'.format(_tfile))
|
||||
if _ret:
|
||||
_reader = codecs.open(_tfile, 'r', encoding='utf-16')
|
||||
_secdata = _reader.readlines()
|
||||
_reader.close()
|
||||
with io.open(_tfile, encoding='utf-16') as _reader:
|
||||
_secdata = _reader.readlines()
|
||||
if __salt__['file.file_exists'](_tfile):
|
||||
_ret = __salt__['file.remove'](_tfile)
|
||||
for _ in range(5):
|
||||
try:
|
||||
__salt__['file.remove'](_tfile)
|
||||
except CommandExecutionError:
|
||||
time.sleep(.1)
|
||||
continue
|
||||
else:
|
||||
break
|
||||
else:
|
||||
log.error('error occurred removing {0}'.format(_tfile))
|
||||
for _line in _secdata:
|
||||
if _line.startswith(option):
|
||||
return True, _line.split('=')[1].strip()
|
||||
@ -2851,9 +2894,9 @@ def _importSeceditConfig(infdata):
|
||||
'''
|
||||
try:
|
||||
_d = uuid.uuid4().hex
|
||||
_tSdbfile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
|
||||
_tSdbfile = '{0}\\{1}'.format(__opts__['cachedir'],
|
||||
'salt-secedit-import-{0}.sdb'.format(_d))
|
||||
_tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'),
|
||||
_tInfFile = '{0}\\{1}'.format(__opts__['cachedir'],
|
||||
'salt-secedit-config-{0}.inf'.format(_d))
|
||||
# make sure our temp files don't already exist
|
||||
if __salt__['file.file_exists'](_tSdbfile):
|
||||
|
@ -1531,24 +1531,26 @@ def install(name=None,
|
||||
to_install.append((pkgname, pkgstr))
|
||||
break
|
||||
else:
|
||||
if re.match('kernel(-.+)?', name):
|
||||
# kernel and its subpackages support multiple
|
||||
# installs as their paths do not conflict.
|
||||
# Performing a yum/dnf downgrade will be a no-op
|
||||
# so just do an install instead. It will fail if
|
||||
# there are other interdependencies that have
|
||||
# conflicts, and that's OK. We don't want to force
|
||||
# anything, we just want to properly handle it if
|
||||
# someone tries to install a kernel/kernel-devel of
|
||||
# a lower version than the currently-installed one.
|
||||
# TODO: find a better way to determine if a package
|
||||
# supports multiple installs.
|
||||
to_install.append((pkgname, pkgstr))
|
||||
else:
|
||||
# None of the currently-installed versions are
|
||||
# greater than the specified version, so this is a
|
||||
# downgrade.
|
||||
to_downgrade.append((pkgname, pkgstr))
|
||||
if pkgname is not None:
|
||||
if re.match('kernel(-.+)?', pkgname):
|
||||
# kernel and its subpackages support multiple
|
||||
# installs as their paths do not conflict.
|
||||
# Performing a yum/dnf downgrade will be a
|
||||
# no-op so just do an install instead. It will
|
||||
# fail if there are other interdependencies
|
||||
# that have conflicts, and that's OK. We don't
|
||||
# want to force anything, we just want to
|
||||
# properly handle it if someone tries to
|
||||
# install a kernel/kernel-devel of a lower
|
||||
# version than the currently-installed one.
|
||||
# TODO: find a better way to determine if a
|
||||
# package supports multiple installs.
|
||||
to_install.append((pkgname, pkgstr))
|
||||
else:
|
||||
# None of the currently-installed versions are
|
||||
# greater than the specified version, so this
|
||||
# is a downgrade.
|
||||
to_downgrade.append((pkgname, pkgstr))
|
||||
|
||||
def _add_common_args(cmd):
|
||||
'''
|
||||
|
@ -909,10 +909,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
||||
|
||||
f_call = self._format_call_run_job_async(chunk)
|
||||
# fire a job off
|
||||
try:
|
||||
pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
|
||||
except EauthAuthenticationError:
|
||||
raise tornado.gen.Return('Not authorized to run this job')
|
||||
pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {}))
|
||||
|
||||
# if the job didn't publish, lets not wait around for nothing
|
||||
# TODO: set header??
|
||||
|
@ -891,11 +891,11 @@ class Pillar(object):
|
||||
# Avoid circular import
|
||||
import salt.utils.gitfs
|
||||
import salt.pillar.git_pillar
|
||||
git_pillar = salt.utils.gitfs.GitPillar(self.opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
self.opts,
|
||||
self.ext['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_pillar.fetch_remotes()
|
||||
except TypeError:
|
||||
# Handle malformed ext_pillar
|
||||
|
@ -348,12 +348,6 @@ from salt.ext import six
|
||||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.pillar.git_pillar.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -371,7 +365,7 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
salt.utils.gitfs.GitPillar(__opts__, init_remotes=False)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
@ -387,8 +381,11 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument
|
||||
opts = copy.deepcopy(__opts__)
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
git_pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
repos,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
|
@ -191,7 +191,9 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import Salt Libs
|
||||
from salt._compat import ElementTree as ET
|
||||
import salt.exceptions
|
||||
import salt.utils.xmlutil as xml
|
||||
|
||||
# This must be present or the Salt loader won't load this module.
|
||||
__proxyenabled__ = ['panos']
|
||||
@ -214,6 +216,22 @@ def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def _strip_dirty(xmltree):
|
||||
'''
|
||||
Removes dirtyID tags from the candidate config result. Palo Alto devices will make the candidate configuration with
|
||||
a dirty ID after a change. This can cause unexpected results when parsing.
|
||||
'''
|
||||
dirty = xmltree.attrib.pop('dirtyId', None)
|
||||
if dirty:
|
||||
xmltree.attrib.pop('admin', None)
|
||||
xmltree.attrib.pop('time', None)
|
||||
|
||||
for child in xmltree:
|
||||
child = _strip_dirty(child)
|
||||
|
||||
return xmltree
|
||||
|
||||
|
||||
def init(opts):
|
||||
'''
|
||||
This function gets called when the proxy starts up. For
|
||||
@ -271,7 +289,7 @@ def call(payload=None):
|
||||
'''
|
||||
This function captures the query string and sends it to the Palo Alto device.
|
||||
'''
|
||||
ret = {}
|
||||
r = None
|
||||
try:
|
||||
if DETAILS['method'] == 'dev_key':
|
||||
# Pass the api key without the target declaration
|
||||
@ -280,11 +298,10 @@ def call(payload=None):
|
||||
r = __utils__['http.query'](DETAILS['url'],
|
||||
data=payload,
|
||||
method='POST',
|
||||
decode_type='xml',
|
||||
decode_type='plain',
|
||||
decode=True,
|
||||
verify_ssl=False,
|
||||
raise_error=True)
|
||||
ret = r['dict'][0]
|
||||
elif DETAILS['method'] == 'dev_pass':
|
||||
# Pass credentials without the target declaration
|
||||
r = __utils__['http.query'](DETAILS['url'],
|
||||
@ -292,11 +309,10 @@ def call(payload=None):
|
||||
password=DETAILS['password'],
|
||||
data=payload,
|
||||
method='POST',
|
||||
decode_type='xml',
|
||||
decode_type='plain',
|
||||
decode=True,
|
||||
verify_ssl=False,
|
||||
raise_error=True)
|
||||
ret = r['dict'][0]
|
||||
elif DETAILS['method'] == 'pan_key':
|
||||
# Pass the api key with the target declaration
|
||||
conditional_payload = {'key': DETAILS['apikey'],
|
||||
@ -305,11 +321,10 @@ def call(payload=None):
|
||||
r = __utils__['http.query'](DETAILS['url'],
|
||||
data=payload,
|
||||
method='POST',
|
||||
decode_type='xml',
|
||||
decode_type='plain',
|
||||
decode=True,
|
||||
verify_ssl=False,
|
||||
raise_error=True)
|
||||
ret = r['dict'][0]
|
||||
elif DETAILS['method'] == 'pan_pass':
|
||||
# Pass credentials with the target declaration
|
||||
conditional_payload = {'target': DETAILS['serial']}
|
||||
@ -319,14 +334,23 @@ def call(payload=None):
|
||||
password=DETAILS['password'],
|
||||
data=payload,
|
||||
method='POST',
|
||||
decode_type='xml',
|
||||
decode_type='plain',
|
||||
decode=True,
|
||||
verify_ssl=False,
|
||||
raise_error=True)
|
||||
ret = r['dict'][0]
|
||||
except KeyError as err:
|
||||
raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.")
|
||||
return ret
|
||||
|
||||
if not r:
|
||||
raise salt.exceptions.CommandExecutionError("Did not receive a valid response from host.")
|
||||
|
||||
xmldata = ET.fromstring(r['text'])
|
||||
|
||||
# If we are pulling the candidate configuration, we need to strip the dirtyId
|
||||
if payload['type'] == 'config' and payload['action'] == 'get':
|
||||
xmldata = (_strip_dirty(xmldata))
|
||||
|
||||
return xml.to_dict(xmldata, True)
|
||||
|
||||
|
||||
def is_required_version(required_version='0.0.0'):
|
||||
@ -382,7 +406,7 @@ def grains():
|
||||
DETAILS['grains_cache'] = GRAINS_CACHE
|
||||
try:
|
||||
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
|
||||
DETAILS['grains_cache'] = call(query)['system']
|
||||
DETAILS['grains_cache'] = call(query)['result']['system']
|
||||
except Exception as err:
|
||||
pass
|
||||
return DETAILS['grains_cache']
|
||||
@ -402,7 +426,7 @@ def ping():
|
||||
'''
|
||||
try:
|
||||
query = {'type': 'op', 'cmd': '<show><system><info></info></system></show>'}
|
||||
if 'system' in call(query):
|
||||
if 'result' in call(query):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -3,7 +3,9 @@
|
||||
The local returner is used to test the returner interface, it just prints the
|
||||
return data to the console to verify that it is being passed properly
|
||||
|
||||
To use the local returner, append '--return local' to the salt command. ex:
|
||||
To use the local returner, append '--return local' to the salt command. ex:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' test.ping --return local
|
||||
'''
|
||||
|
@ -328,11 +328,14 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
salt.utils.args.invalid_kwargs(kwargs)
|
||||
|
||||
if role == 'gitfs':
|
||||
git_objects = [salt.utils.gitfs.GitFS(__opts__)]
|
||||
git_objects[0].init_remotes(
|
||||
__opts__['gitfs_remotes'],
|
||||
salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
salt.fileserver.gitfs.PER_REMOTE_ONLY)
|
||||
git_objects = [
|
||||
salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY
|
||||
)
|
||||
]
|
||||
elif role == 'git_pillar':
|
||||
git_objects = []
|
||||
for ext_pillar in __opts__['ext_pillar']:
|
||||
@ -340,11 +343,11 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
if key == 'git':
|
||||
if not isinstance(ext_pillar['git'], list):
|
||||
continue
|
||||
obj = salt.utils.gitfs.GitPillar(__opts__)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
ext_pillar['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_objects.append(obj)
|
||||
elif role == 'winrepo':
|
||||
winrepo_dir = __opts__['winrepo_dir']
|
||||
@ -355,11 +358,12 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
(winrepo_remotes, winrepo_dir),
|
||||
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng'])
|
||||
):
|
||||
obj = salt.utils.gitfs.WinRepo(__opts__, base_dir)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.WinRepo(
|
||||
__opts__,
|
||||
remotes,
|
||||
salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
salt.runners.winrepo.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
git_objects.append(obj)
|
||||
else:
|
||||
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
|
||||
|
@ -66,10 +66,11 @@ def update(branch=None, repo=None):
|
||||
if pillar_type != 'git':
|
||||
continue
|
||||
pillar_conf = ext_pillar[pillar_type]
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
pillar_conf,
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
|
@ -56,14 +56,20 @@ def generate_token(minion_id, signature, impersonated_by_master=False):
|
||||
'metadata': audit_data
|
||||
}
|
||||
|
||||
verify = config.get('verify', None)
|
||||
|
||||
log.trace('Sending token creation request to Vault')
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response = requests.post(url, headers=headers, json=payload, verify=verify)
|
||||
|
||||
if response.status_code != 200:
|
||||
return {'error': response.reason}
|
||||
|
||||
authData = response.json()['auth']
|
||||
return {'token': authData['client_token'], 'url': config['url']}
|
||||
return {
|
||||
'token': authData['client_token'],
|
||||
'url': config['url'],
|
||||
'verify': verify,
|
||||
}
|
||||
except Exception as e:
|
||||
return {'error': str(e)}
|
||||
|
||||
|
@ -32,7 +32,7 @@ log = logging.getLogger(__name__)
|
||||
PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.GitBase.__init__ will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.runners.winrepo.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
@ -216,9 +216,12 @@ def update_git_repos(opts=None, clean=False, masterless=False):
|
||||
else:
|
||||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
|
||||
winrepo.init_remotes(
|
||||
remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
winrepo = salt.utils.gitfs.WinRepo(
|
||||
opts,
|
||||
remotes,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
winrepo.fetch_remotes()
|
||||
# Since we're not running update(), we need to manually call
|
||||
# clear_old_remotes() to remove directories from remotes that
|
||||
|
@ -159,7 +159,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs):
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
__salt__['disk.format_'](name, fs_type, force=force, **kwargs)
|
||||
__salt__['disk.format'](name, fs_type, force=force, **kwargs)
|
||||
|
||||
# Repeat fstype check up to 10 times with 3s sleeping between each
|
||||
# to avoid detection failing although mkfs has succeeded
|
||||
|
@ -126,6 +126,8 @@ def present(
|
||||
vpc_name=None,
|
||||
rules=None,
|
||||
rules_egress=None,
|
||||
delete_ingress_rules=True,
|
||||
delete_egress_rules=True,
|
||||
region=None,
|
||||
key=None,
|
||||
keyid=None,
|
||||
@ -160,6 +162,16 @@ def present(
|
||||
the egress rules will be unmanaged. If set to an empty list, ``[]``,
|
||||
then all egress rules will be removed.
|
||||
|
||||
delete_ingress_rules
|
||||
Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
|
||||
salt will happily remove on the next run. Set this param to False to
|
||||
avoid deleting rules which were added outside of salt.
|
||||
|
||||
delete_egress_rules
|
||||
Some tools (EMR comes to mind) insist on adding rules on-the-fly, which
|
||||
salt will happily remove on the next run. Set this param to False to
|
||||
avoid deleting rules which were added outside of salt.
|
||||
|
||||
region
|
||||
Region to connect to.
|
||||
|
||||
@ -191,17 +203,18 @@ def present(
|
||||
elif ret['result'] is None:
|
||||
return ret
|
||||
if rules is not None:
|
||||
_ret = _rules_present(name, rules, vpc_id=vpc_id, vpc_name=vpc_name,
|
||||
region=region, key=key, keyid=keyid,
|
||||
profile=profile)
|
||||
_ret = _rules_present(name, rules, delete_ingress_rules, vpc_id=vpc_id,
|
||||
vpc_name=vpc_name, region=region, key=key,
|
||||
keyid=keyid, profile=profile)
|
||||
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
|
||||
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
|
||||
if not _ret['result']:
|
||||
ret['result'] = _ret['result']
|
||||
if rules_egress is not None:
|
||||
_ret = _rules_egress_present(name, rules_egress, vpc_id=vpc_id,
|
||||
vpc_name=vpc_name, region=region, key=key,
|
||||
keyid=keyid, profile=profile)
|
||||
_ret = _rules_egress_present(name, rules_egress, delete_egress_rules,
|
||||
vpc_id=vpc_id, vpc_name=vpc_name,
|
||||
region=region, key=key, keyid=keyid,
|
||||
profile=profile)
|
||||
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
|
||||
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
|
||||
if not _ret['result']:
|
||||
@ -389,13 +402,14 @@ def _get_rule_changes(rules, _rules):
|
||||
return (to_delete, to_create)
|
||||
|
||||
|
||||
def _rules_present(name, rules, vpc_id=None, vpc_name=None,
|
||||
region=None, key=None, keyid=None, profile=None):
|
||||
def _rules_present(name, rules, delete_ingress_rules=True, vpc_id=None,
|
||||
vpc_name=None, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
given a group name or group name and vpc_id (or vpc name):
|
||||
1. get lists of desired rule changes (using _get_rule_changes)
|
||||
2. delete/revoke or authorize/create rules
|
||||
3. return 'old' and 'new' group rules
|
||||
2. authorize/create rules missing rules
|
||||
3. if delete_ingress_rules is True, delete/revoke non-requested rules
|
||||
4. return 'old' and 'new' group rules
|
||||
'''
|
||||
ret = {'result': True, 'comment': '', 'changes': {}}
|
||||
sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key,
|
||||
@ -424,11 +438,13 @@ def _rules_present(name, rules, vpc_id=None, vpc_name=None,
|
||||
# rules = rules that exist in salt state
|
||||
# sg['rules'] = that exist in present group
|
||||
to_delete, to_create = _get_rule_changes(rules, sg['rules'])
|
||||
to_delete = to_delete if delete_ingress_rules else []
|
||||
if to_create or to_delete:
|
||||
if __opts__['test']:
|
||||
msg = """Security group {0} set to have rules modified.
|
||||
To be created: {1}
|
||||
To be deleted: {2}""".format(name, pprint.pformat(to_create), pprint.pformat(to_delete))
|
||||
To be deleted: {2}""".format(name, pprint.pformat(to_create),
|
||||
pprint.pformat(to_delete))
|
||||
ret['comment'] = msg
|
||||
ret['result'] = None
|
||||
return ret
|
||||
@ -470,13 +486,14 @@ def _rules_present(name, rules, vpc_id=None, vpc_name=None,
|
||||
return ret
|
||||
|
||||
|
||||
def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None,
|
||||
region=None, key=None, keyid=None, profile=None):
|
||||
def _rules_egress_present(name, rules_egress, delete_egress_rules=True, vpc_id=None,
|
||||
vpc_name=None, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
given a group name or group name and vpc_id (or vpc name):
|
||||
1. get lists of desired rule changes (using _get_rule_changes)
|
||||
2. delete/revoke or authorize/create rules
|
||||
3. return 'old' and 'new' group rules
|
||||
2. authorize/create missing rules
|
||||
3. if delete_egress_rules is True, delete/revoke non-requested rules
|
||||
4. return 'old' and 'new' group rules
|
||||
'''
|
||||
ret = {'result': True, 'comment': '', 'changes': {}}
|
||||
sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key,
|
||||
@ -504,20 +521,20 @@ def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None,
|
||||
rule['source_group_group_id'] = _group_id
|
||||
# rules_egress = rules that exist in salt state
|
||||
# sg['rules_egress'] = that exist in present group
|
||||
to_delete_egress, to_create_egress = _get_rule_changes(
|
||||
rules_egress, sg['rules_egress']
|
||||
)
|
||||
if to_create_egress or to_delete_egress:
|
||||
to_delete, to_create = _get_rule_changes(rules_egress, sg['rules_egress'])
|
||||
to_delete = to_delete if delete_egress_rules else []
|
||||
if to_create or to_delete:
|
||||
if __opts__['test']:
|
||||
msg = """Security group {0} set to have rules modified.
|
||||
To be created: {1}
|
||||
To be deleted: {2}""".format(name, pprint.pformat(to_create_egress), pprint.pformat(to_delete_egress))
|
||||
To be deleted: {2}""".format(name, pprint.pformat(to_create),
|
||||
pprint.pformat(to_delete))
|
||||
ret['comment'] = msg
|
||||
ret['result'] = None
|
||||
return ret
|
||||
if to_delete_egress:
|
||||
if to_delete:
|
||||
deleted = True
|
||||
for rule in to_delete_egress:
|
||||
for rule in to_delete:
|
||||
_deleted = __salt__['boto_secgroup.revoke'](
|
||||
name, vpc_id=vpc_id, vpc_name=vpc_name, region=region,
|
||||
key=key, keyid=keyid, profile=profile, egress=True, **rule)
|
||||
@ -530,9 +547,9 @@ def _rules_egress_present(name, rules_egress, vpc_id=None, vpc_name=None,
|
||||
msg = 'Failed to remove egress rule on {0} security group.'
|
||||
ret['comment'] = ' '.join([ret['comment'], msg.format(name)])
|
||||
ret['result'] = False
|
||||
if to_create_egress:
|
||||
if to_create:
|
||||
created = True
|
||||
for rule in to_create_egress:
|
||||
for rule in to_create:
|
||||
_created = __salt__['boto_secgroup.authorize'](
|
||||
name, vpc_id=vpc_id, vpc_name=vpc_name, region=region,
|
||||
key=key, keyid=keyid, profile=profile, egress=True, **rule)
|
||||
|
@ -638,6 +638,7 @@ def run(name,
|
||||
runas=None,
|
||||
shell=None,
|
||||
env=None,
|
||||
prepend_path=None,
|
||||
stateful=False,
|
||||
umask=None,
|
||||
output_loglevel='debug',
|
||||
@ -712,6 +713,12 @@ def run(name,
|
||||
- env:
|
||||
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
|
||||
|
||||
prepend_path
|
||||
$PATH segment to prepend (trailing ':' not necessary) to $PATH. This is
|
||||
an easier alternative to the Jinja workaround.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
stateful
|
||||
The command being executed is expected to return data about executing
|
||||
a state. For more information, see the :ref:`stateful-argument` section.
|
||||
@ -807,6 +814,7 @@ def run(name,
|
||||
'use_vt': use_vt,
|
||||
'shell': shell or __grains__['shell'],
|
||||
'env': env,
|
||||
'prepend_path': prepend_path,
|
||||
'umask': umask,
|
||||
'output_loglevel': output_loglevel,
|
||||
'quiet': quiet})
|
||||
|
@ -1853,7 +1853,7 @@ def stopped(name=None,
|
||||
.. code-block:: yaml
|
||||
|
||||
stopped_containers:
|
||||
docker.stopped:
|
||||
docker_container.stopped:
|
||||
- names:
|
||||
- foo
|
||||
- bar
|
||||
@ -1862,7 +1862,7 @@ def stopped(name=None,
|
||||
.. code-block:: yaml
|
||||
|
||||
stopped_containers:
|
||||
docker.stopped:
|
||||
docker_container.stopped:
|
||||
- containers:
|
||||
- foo
|
||||
- bar
|
||||
@ -1998,10 +1998,10 @@ def absent(name, force=False):
|
||||
.. code-block:: yaml
|
||||
|
||||
mycontainer:
|
||||
docker.absent
|
||||
docker_container.absent
|
||||
|
||||
multiple_containers:
|
||||
docker.absent:
|
||||
docker_container.absent:
|
||||
- names:
|
||||
- foo
|
||||
- bar
|
||||
|
@ -108,7 +108,8 @@ def present(name,
|
||||
# Build out all dashboard fields
|
||||
new_dashboard = _inherited_dashboard(
|
||||
dashboard, base_dashboards_from_pillar, ret)
|
||||
new_dashboard['title'] = name
|
||||
if 'title' not in new_dashboard:
|
||||
new_dashboard['title'] = name
|
||||
rows = new_dashboard.get('rows', [])
|
||||
for i, row in enumerate(rows):
|
||||
rows[i] = _inherited_row(row, base_rows_from_pillar, ret)
|
||||
|
@ -151,6 +151,12 @@ def present(name,
|
||||
ret['changes'] = data
|
||||
return ret
|
||||
|
||||
# At this stage, the datasource exists; however, the object provided by
|
||||
# Grafana may lack some null keys compared to our "data" dict:
|
||||
for key in data:
|
||||
if key not in datasource:
|
||||
datasource[key] = None
|
||||
|
||||
if data == datasource:
|
||||
ret['changes'] = None
|
||||
ret['comment'] = 'Data source {0} already up-to-date'.format(name)
|
||||
|
@ -65,11 +65,11 @@ def _changes(name,
|
||||
if lgrp['members']:
|
||||
lgrp['members'] = [user.lower() for user in lgrp['members']]
|
||||
if members:
|
||||
members = [salt.utils.win_functions.get_sam_name(user) for user in members]
|
||||
members = [salt.utils.win_functions.get_sam_name(user).lower() for user in members]
|
||||
if addusers:
|
||||
addusers = [salt.utils.win_functions.get_sam_name(user) for user in addusers]
|
||||
addusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in addusers]
|
||||
if delusers:
|
||||
delusers = [salt.utils.win_functions.get_sam_name(user) for user in delusers]
|
||||
delusers = [salt.utils.win_functions.get_sam_name(user).lower() for user in delusers]
|
||||
|
||||
change = {}
|
||||
if gid:
|
||||
@ -244,9 +244,7 @@ def present(name,
|
||||
return ret
|
||||
|
||||
# Group is not present, make it.
|
||||
if __salt__['group.add'](name,
|
||||
gid,
|
||||
system=system):
|
||||
if __salt__['group.add'](name, gid=gid, system=system):
|
||||
# if members to be added
|
||||
grp_members = None
|
||||
if members:
|
||||
@ -269,7 +267,7 @@ def present(name,
|
||||
ret['result'] = False
|
||||
ret['comment'] = (
|
||||
'Group {0} has been created but, some changes could not'
|
||||
' be applied')
|
||||
' be applied'.format(name))
|
||||
ret['changes'] = {'Failed': changes}
|
||||
else:
|
||||
ret['result'] = False
|
||||
|
@ -131,7 +131,7 @@ def absent(name, ip): # pylint: disable=C0103
|
||||
comments.append('Host {0} ({1}) already absent'.format(name, _ip))
|
||||
else:
|
||||
if __opts__['test']:
|
||||
comments.append('Host {0} ({1} needs to be removed'.format(name, _ip))
|
||||
comments.append('Host {0} ({1}) needs to be removed'.format(name, _ip))
|
||||
else:
|
||||
if __salt__['hosts.rm_host'](_ip, name):
|
||||
ret['changes'] = {'host': name}
|
||||
|
157
salt/states/opsgenie.py
Normal file
157
salt/states/opsgenie.py
Normal file
@ -0,0 +1,157 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Create/Close an alert in OpsGenie
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
This state is useful for creating or closing alerts in OpsGenie
|
||||
during state runs.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
used_space:
|
||||
disk.status:
|
||||
- name: /
|
||||
- maximum: 79%
|
||||
- minimum: 20%
|
||||
|
||||
opsgenie_create_action_sender:
|
||||
opsgenie.create_alert:
|
||||
- api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
- reason: 'Disk capacity is out of designated range.'
|
||||
- name: disk.status
|
||||
- onfail:
|
||||
- disk: used_space
|
||||
|
||||
opsgenie_close_action_sender:
|
||||
opsgenie.close_alert:
|
||||
- api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
- name: disk.status
|
||||
- require:
|
||||
- disk: used_space
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import inspect
|
||||
|
||||
# Import Salt libs
|
||||
import salt.exceptions
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_alert(name=None, api_key=None, reason=None, action_type="Create"):
|
||||
'''
|
||||
Create an alert in OpsGenie. Example usage with Salt's requisites and other
|
||||
global state arguments could be found above.
|
||||
|
||||
Required Parameters:
|
||||
|
||||
api_key
|
||||
It's the API Key you've copied while adding integration in OpsGenie.
|
||||
|
||||
reason
|
||||
It will be used as alert's default message in OpsGenie.
|
||||
|
||||
Optional Parameters:
|
||||
|
||||
name
|
||||
It will be used as alert's alias. If you want to use the close
|
||||
functionality you must provide name field for both states like
|
||||
in above case.
|
||||
|
||||
action_type
|
||||
OpsGenie supports the default values Create/Close for action_type.
|
||||
You can customize this field with OpsGenie's custom actions for
|
||||
other purposes like adding notes or acknowledging alerts.
|
||||
'''
|
||||
|
||||
_, _, _, values = inspect.getargvalues(inspect.currentframe())
|
||||
log.info("Arguments values:" + str(values))
|
||||
|
||||
ret = {
|
||||
'result': '',
|
||||
'name': '',
|
||||
'changes': '',
|
||||
'comment': ''
|
||||
}
|
||||
|
||||
if api_key is None or reason is None:
|
||||
raise salt.exceptions.SaltInvocationError(
|
||||
'API Key or Reason cannot be None.')
|
||||
|
||||
if __opts__['test'] is True:
|
||||
ret[
|
||||
'comment'] = 'Test: {0} alert request will be processed ' \
|
||||
'using the API Key="{1}".'.format(
|
||||
action_type,
|
||||
api_key)
|
||||
|
||||
# Return ``None`` when running with ``test=true``.
|
||||
ret['result'] = None
|
||||
|
||||
return ret
|
||||
|
||||
response_status_code, response_text = __salt__['opsgenie.post_data'](
|
||||
api_key=api_key,
|
||||
name=name,
|
||||
reason=reason,
|
||||
action_type=action_type
|
||||
)
|
||||
|
||||
if 200 <= response_status_code < 300:
|
||||
log.info(
|
||||
"POST Request has succeeded with message:" +
|
||||
response_text + " status code:" + str(
|
||||
response_status_code))
|
||||
ret[
|
||||
'comment'] = 'Test: {0} alert request will be processed' \
|
||||
' using the API Key="{1}".'.format(
|
||||
action_type,
|
||||
api_key)
|
||||
ret['result'] = True
|
||||
else:
|
||||
log.error(
|
||||
"POST Request has failed with error:" +
|
||||
response_text + " status code:" + str(
|
||||
response_status_code))
|
||||
ret['result'] = False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def close_alert(name=None, api_key=None, reason="Conditions are met.",
|
||||
action_type="Close"):
|
||||
'''
|
||||
Close an alert in OpsGenie. It's a wrapper function for create_alert.
|
||||
Example usage with Salt's requisites and other global state arguments
|
||||
could be found above.
|
||||
|
||||
Required Parameters:
|
||||
|
||||
name
|
||||
It will be used as alert's alias. If you want to use the close
|
||||
functionality you must provide name field for both states like
|
||||
in above case.
|
||||
|
||||
Optional Parameters:
|
||||
|
||||
api_key
|
||||
It's the API Key you've copied while adding integration in OpsGenie.
|
||||
|
||||
reason
|
||||
It will be used as alert's default message in OpsGenie.
|
||||
|
||||
action_type
|
||||
OpsGenie supports the default values Create/Close for action_type.
|
||||
You can customize this field with OpsGenie's custom actions for
|
||||
other purposes like adding notes or acknowledging alerts.
|
||||
'''
|
||||
if name is None:
|
||||
raise salt.exceptions.SaltInvocationError(
|
||||
'Name cannot be None.')
|
||||
|
||||
return create_alert(name, api_key, reason, action_type)
|
@ -87,6 +87,10 @@ greater than the passed version. For example, proxy['panos.is_required_version']
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.xmlutil as xml
|
||||
from salt._compat import ElementTree as ET
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -135,7 +139,7 @@ def _edit_config(xpath, element):
|
||||
query = {'type': 'config',
|
||||
'action': 'edit',
|
||||
'xpath': xpath,
|
||||
'element': element}
|
||||
'element': element}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
|
||||
@ -239,21 +243,26 @@ def _validate_response(response):
|
||||
|
||||
'''
|
||||
if not response:
|
||||
return False, "Error during move configuration. Verify connectivity to device."
|
||||
return False, 'Unable to validate response from device.'
|
||||
elif 'msg' in response:
|
||||
if response['msg'] == 'command succeeded':
|
||||
return True, response['msg']
|
||||
if 'line' in response['msg']:
|
||||
if response['msg']['line'] == 'already at the top':
|
||||
return True, response
|
||||
elif response['msg']['line'] == 'already at the bottom':
|
||||
return True, response
|
||||
else:
|
||||
return False, response
|
||||
elif response['msg'] == 'command succeeded':
|
||||
return True, response
|
||||
else:
|
||||
return False, response['msg']
|
||||
elif 'line' in response:
|
||||
if response['line'] == 'already at the top':
|
||||
return True, response['line']
|
||||
elif response['line'] == 'already at the bottom':
|
||||
return True, response['line']
|
||||
return False, response
|
||||
elif 'status' in response:
|
||||
if response['status'] == "success":
|
||||
return True, response
|
||||
else:
|
||||
return False, response['line']
|
||||
return False, response
|
||||
else:
|
||||
return False, "Error during move configuration. Verify connectivity to device."
|
||||
return False, response
|
||||
|
||||
|
||||
def add_config_lock(name):
|
||||
@ -280,6 +289,247 @@ def add_config_lock(name):
|
||||
return ret
|
||||
|
||||
|
||||
def address_exists(name,
|
||||
addressname=None,
|
||||
vsys=1,
|
||||
ipnetmask=None,
|
||||
iprange=None,
|
||||
fqdn=None,
|
||||
description=None,
|
||||
commit=False):
|
||||
'''
|
||||
Ensures that an address object exists in the configured state. If it does not exist or is not configured with the
|
||||
specified attributes, it will be adjusted to match the specified values.
|
||||
|
||||
This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified
|
||||
value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address
|
||||
type.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
addressname(str): The name of the address object. The name is case-sensitive and can have up to 31 characters,
|
||||
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
|
||||
Panorama, unique within its device group and any ancestor or descendant device groups.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
|
||||
|
||||
ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where
|
||||
the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6,
|
||||
you specify only the network portion, not the host portion.
|
||||
|
||||
iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be IPv4 or both
|
||||
can be IPv6.
|
||||
|
||||
fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are
|
||||
subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the
|
||||
entries are picked up at the refresh cycle.
|
||||
|
||||
description(str): A description for the policy (up to 255 characters).
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
|
||||
SLS Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/address/h-10.10.10.10:
|
||||
panos.address_exists:
|
||||
- addressname: h-10.10.10.10
|
||||
- vsys: 1
|
||||
- ipnetmask: 10.10.10.10
|
||||
- commit: False
|
||||
|
||||
panos/address/10.0.0.1-10.0.0.50:
|
||||
panos.address_exists:
|
||||
- addressname: r-10.0.0.1-10.0.0.50
|
||||
- vsys: 1
|
||||
- iprange: 10.0.0.1-10.0.0.50
|
||||
- commit: False
|
||||
|
||||
panos/address/foo.bar.com:
|
||||
panos.address_exists:
|
||||
- addressname: foo.bar.com
|
||||
- vsys: 1
|
||||
- fqdn: foo.bar.com
|
||||
- description: My fqdn object
|
||||
- commit: False
|
||||
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not addressname:
|
||||
ret.update({'comment': "The service name field must be provided."})
|
||||
return ret
|
||||
|
||||
# Check if address object currently exists
|
||||
address = __salt__['panos.get_address'](addressname, vsys)['result']
|
||||
|
||||
if address and 'entry' in address:
|
||||
address = address['entry']
|
||||
else:
|
||||
address = {}
|
||||
|
||||
element = ""
|
||||
|
||||
# Verify the arguments
|
||||
if ipnetmask:
|
||||
element = "<ip-netmask>{0}</ip-netmask>".format(ipnetmask)
|
||||
elif iprange:
|
||||
element = "<ip-range>{0}</ip-range>".format(iprange)
|
||||
elif fqdn:
|
||||
element = "<fqdn>{0}</fqdn>".format(fqdn)
|
||||
else:
|
||||
ret.update({'comment': "A valid address type must be specified."})
|
||||
return ret
|
||||
|
||||
if description:
|
||||
element += "<description>{0}</description>".format(description)
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(addressname, element)
|
||||
|
||||
new_address = xml.to_dict(ET.fromstring(full_element), True)
|
||||
|
||||
if address == new_address:
|
||||
ret.update({
|
||||
'comment': 'Address object already exists. No changes required.',
|
||||
'result': True
|
||||
})
|
||||
return ret
|
||||
else:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/address/" \
|
||||
"entry[@name=\'{1}\']".format(vsys, addressname)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
|
||||
if not result:
|
||||
ret.update({
|
||||
'comment': msg
|
||||
})
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'changes': {'before': address, 'after': new_address},
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Address object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'before': address, 'after': new_address},
|
||||
'comment': 'Service object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def address_group_exists(name,
|
||||
groupname=None,
|
||||
vsys=1,
|
||||
members=None,
|
||||
description=None,
|
||||
commit=False):
|
||||
'''
|
||||
Ensures that an address group object exists in the configured state. If it does not exist or is not configured with
|
||||
the specified attributes, it will be adjusted to match the specified values.
|
||||
|
||||
This module will enforce group membership. If a group exists and contains members this state does not include,
|
||||
those members will be removed and replaced with the specified members in the state.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
groupname(str): The name of the address group object. The name is case-sensitive and can have up to 31 characters,
|
||||
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
|
||||
Panorama, unique within its device group and any ancestor or descendant device groups.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
|
||||
|
||||
members(str, list): The members of the address group. These must be valid address objects or address groups on the
|
||||
system that already exist prior to the execution of this state.
|
||||
|
||||
description(str): A description for the policy (up to 255 characters).
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
|
||||
SLS Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/address-group/my-group:
|
||||
panos.address_group_exists:
|
||||
- groupname: my-group
|
||||
- vsys: 1
|
||||
- members:
|
||||
- my-address-object
|
||||
- my-other-address-group
|
||||
- description: A group that needs to exist
|
||||
- commit: False
|
||||
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not groupname:
|
||||
ret.update({'comment': "The group name field must be provided."})
|
||||
return ret
|
||||
|
||||
# Check if address group object currently exists
|
||||
group = __salt__['panos.get_address_group'](groupname, vsys)['result']
|
||||
|
||||
if group and 'entry' in group:
|
||||
group = group['entry']
|
||||
else:
|
||||
group = {}
|
||||
|
||||
# Verify the arguments
|
||||
if members:
|
||||
element = "<static>{0}</static>".format(_build_members(members, True))
|
||||
else:
|
||||
ret.update({'comment': "The group members must be provided."})
|
||||
return ret
|
||||
|
||||
if description:
|
||||
element += "<description>{0}</description>".format(description)
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(groupname, element)
|
||||
|
||||
new_group = xml.to_dict(ET.fromstring(full_element), True)
|
||||
|
||||
if group == new_group:
|
||||
ret.update({
|
||||
'comment': 'Address group object already exists. No changes required.',
|
||||
'result': True
|
||||
})
|
||||
return ret
|
||||
else:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/address-group/" \
|
||||
"entry[@name=\'{1}\']".format(vsys, groupname)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
|
||||
if not result:
|
||||
ret.update({
|
||||
'comment': msg
|
||||
})
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'changes': {'before': group, 'after': new_group},
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Address group object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'before': group, 'after': new_group},
|
||||
'comment': 'Address group object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def clone_config(name, xpath=None, newname=None, commit=False):
|
||||
'''
|
||||
Clone a specific XPATH and set it to a new name.
|
||||
@ -317,13 +567,16 @@ def clone_config(name, xpath=None, newname=None, commit=False):
|
||||
'xpath': xpath,
|
||||
'newname': newname}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, response = _validate_response(__proxy__['panos.call'](query))
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'result': result
|
||||
})
|
||||
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
@ -386,15 +639,18 @@ def delete_config(name, xpath=None, commit=False):
|
||||
|
||||
query = {'type': 'config',
|
||||
'action': 'delete',
|
||||
'xpath': xpath}
|
||||
'xpath': xpath}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, response = _validate_response(__proxy__['panos.call'](query))
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'result': result
|
||||
})
|
||||
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
@ -434,7 +690,7 @@ def download_software(name, version=None, synch=False, check=False):
|
||||
if check is True:
|
||||
__salt__['panos.check_software']()
|
||||
|
||||
versions = __salt__['panos.get_software_info']()
|
||||
versions = __salt__['panos.get_software_info']()['result']
|
||||
|
||||
if 'sw-updates' not in versions \
|
||||
or 'versions' not in versions['sw-updates'] \
|
||||
@ -457,7 +713,7 @@ def download_software(name, version=None, synch=False, check=False):
|
||||
'changes': __salt__['panos.download_software_version'](version=version, synch=synch)
|
||||
})
|
||||
|
||||
versions = __salt__['panos.get_software_info']()
|
||||
versions = __salt__['panos.get_software_info']()['result']
|
||||
|
||||
if 'sw-updates' not in versions \
|
||||
or 'versions' not in versions['sw-updates'] \
|
||||
@ -508,6 +764,32 @@ def edit_config(name, xpath=None, value=None, commit=False):
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
# Verify if the current XPATH is equal to the specified value.
|
||||
# If we are equal, no changes required.
|
||||
xpath_split = xpath.split("/")
|
||||
|
||||
# Retrieve the head of the xpath for validation.
|
||||
if len(xpath_split) > 0:
|
||||
head = xpath_split[-1]
|
||||
if "[" in head:
|
||||
head = head.split("[")[0]
|
||||
|
||||
current_element = __salt__['panos.get_xpath'](xpath)['result']
|
||||
|
||||
if head and current_element and head in current_element:
|
||||
current_element = current_element[head]
|
||||
else:
|
||||
current_element = {}
|
||||
|
||||
new_element = xml.to_dict(ET.fromstring(value), True)
|
||||
|
||||
if current_element == new_element:
|
||||
ret.update({
|
||||
'comment': 'XPATH is already equal to the specified value.',
|
||||
'result': True
|
||||
})
|
||||
return ret
|
||||
|
||||
result, msg = _edit_config(xpath, value)
|
||||
|
||||
ret.update({
|
||||
@ -515,15 +797,20 @@ def edit_config(name, xpath=None, value=None, commit=False):
|
||||
'result': result
|
||||
})
|
||||
|
||||
# Ensure we do not commit after a failed action
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'changes': {'before': current_element, 'after': new_element},
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'before': current_element, 'after': new_element},
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
@ -585,7 +872,8 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False):
|
||||
result, msg = _move_bottom(xpath)
|
||||
|
||||
ret.update({
|
||||
'result': result
|
||||
'result': result,
|
||||
'comment': msg
|
||||
})
|
||||
|
||||
if not result:
|
||||
@ -660,13 +948,16 @@ def rename_config(name, xpath=None, newname=None, commit=False):
|
||||
'xpath': xpath,
|
||||
'newname': newname}
|
||||
|
||||
response = __proxy__['panos.call'](query)
|
||||
result, response = _validate_response(__proxy__['panos.call'](query))
|
||||
|
||||
ret.update({
|
||||
'changes': response,
|
||||
'result': True
|
||||
'result': result
|
||||
})
|
||||
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
@ -854,7 +1145,12 @@ def security_rule_exists(name,
|
||||
return ret
|
||||
|
||||
# Check if rule currently exists
|
||||
rule = __salt__['panos.get_security_rule'](rulename, vsys)
|
||||
rule = __salt__['panos.get_security_rule'](rulename, vsys)['result']
|
||||
|
||||
if rule and 'entry' in rule:
|
||||
rule = rule['entry']
|
||||
else:
|
||||
rule = {}
|
||||
|
||||
# Build the rule element
|
||||
element = ""
|
||||
@ -964,29 +1260,32 @@ def security_rule_exists(name,
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(rulename, element)
|
||||
|
||||
create_rule = False
|
||||
new_rule = xml.to_dict(ET.fromstring(full_element), True)
|
||||
|
||||
if 'result' in rule:
|
||||
if rule['result'] == "None":
|
||||
create_rule = True
|
||||
config_change = False
|
||||
|
||||
if create_rule:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules".format(vsys)
|
||||
|
||||
result, msg = _set_config(xpath, full_element)
|
||||
if not result:
|
||||
ret['changes']['set'] = msg
|
||||
return ret
|
||||
if rule == new_rule:
|
||||
ret.update({
|
||||
'comment': 'Security rule already exists. No changes required.'
|
||||
})
|
||||
else:
|
||||
config_change = True
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
|
||||
if not result:
|
||||
ret['changes']['edit'] = msg
|
||||
ret.update({
|
||||
'comment': msg
|
||||
})
|
||||
return ret
|
||||
|
||||
ret.update({
|
||||
'changes': {'before': rule, 'after': new_rule},
|
||||
'comment': 'Security rule verified successfully.'
|
||||
})
|
||||
|
||||
if move:
|
||||
movepath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
|
||||
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
|
||||
@ -1001,19 +1300,244 @@ def security_rule_exists(name,
|
||||
elif move == "bottom":
|
||||
move_result, move_msg = _move_bottom(movepath)
|
||||
|
||||
if config_change:
|
||||
ret.update({
|
||||
'changes': {'before': rule, 'after': new_rule, 'move': move_msg}
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'move': move_msg}
|
||||
})
|
||||
|
||||
if not move_result:
|
||||
ret['changes']['move'] = move_msg
|
||||
ret.update({
|
||||
'comment': move_msg
|
||||
})
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Security rule verified successfully.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'comment': 'Security rule verified successfully.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def service_exists(name, servicename=None, vsys=1, protocol=None, port=None, description=None, commit=False):
|
||||
'''
|
||||
Ensures that a service object exists in the configured state. If it does not exist or is not configured with the
|
||||
specified attributes, it will be adjusted to match the specified values.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
servicename(str): The name of the security object. The name is case-sensitive and can have up to 31 characters,
|
||||
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
|
||||
Panorama, unique within its device group and any ancestor or descendant device groups.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
|
||||
|
||||
protocol(str): The protocol that is used by the service object. The only valid options are tcp and udp.
|
||||
|
||||
port(str): The port number that is used by the service object. This can be specified as a single integer or a
|
||||
valid range of ports.
|
||||
|
||||
description(str): A description for the policy (up to 255 characters).
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
|
||||
SLS Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/service/tcp-80:
|
||||
panos.service_exists:
|
||||
- servicename: tcp-80
|
||||
- vsys: 1
|
||||
- protocol: tcp
|
||||
- port: 80
|
||||
- description: Hypertext Transfer Protocol
|
||||
- commit: False
|
||||
|
||||
panos/service/udp-500-550:
|
||||
panos.service_exists:
|
||||
- servicename: udp-500-550
|
||||
- vsys: 3
|
||||
- protocol: udp
|
||||
- port: 500-550
|
||||
- commit: False
|
||||
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not servicename:
|
||||
ret.update({'comment': "The service name field must be provided."})
|
||||
return ret
|
||||
|
||||
# Check if service object currently exists
|
||||
service = __salt__['panos.get_service'](servicename, vsys)['result']
|
||||
|
||||
if service and 'entry' in service:
|
||||
service = service['entry']
|
||||
else:
|
||||
service = {}
|
||||
|
||||
# Verify the arguments
|
||||
if not protocol and protocol not in ['tcp', 'udp']:
|
||||
ret.update({'comment': "The protocol must be provided and must be tcp or udp."})
|
||||
return ret
|
||||
if not port:
|
||||
ret.update({'comment': "The port field must be provided."})
|
||||
return ret
|
||||
|
||||
element = "<protocol><{0}><port>{1}</port></{0}></protocol>".format(protocol, port)
|
||||
|
||||
if description:
|
||||
element += "<description>{0}</description>".format(description)
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(servicename, element)
|
||||
|
||||
new_service = xml.to_dict(ET.fromstring(full_element), True)
|
||||
|
||||
if service == new_service:
|
||||
ret.update({
|
||||
'comment': 'Service object already exists. No changes required.',
|
||||
'result': True
|
||||
})
|
||||
return ret
|
||||
else:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/service/" \
|
||||
"entry[@name=\'{1}\']".format(vsys, servicename)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
|
||||
if not result:
|
||||
ret.update({
|
||||
'comment': msg
|
||||
})
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'changes': {'before': service, 'after': new_service},
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Service object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'before': service, 'after': new_service},
|
||||
'comment': 'Service object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def service_group_exists(name,
|
||||
groupname=None,
|
||||
vsys=1,
|
||||
members=None,
|
||||
description=None,
|
||||
commit=False):
|
||||
'''
|
||||
Ensures that a service group object exists in the configured state. If it does not exist or is not configured with
|
||||
the specified attributes, it will be adjusted to match the specified values.
|
||||
|
||||
This module will enforce group membership. If a group exists and contains members this state does not include,
|
||||
those members will be removed and replaced with the specified members in the state.
|
||||
|
||||
name: The name of the module function to execute.
|
||||
|
||||
groupname(str): The name of the service group object. The name is case-sensitive and can have up to 31 characters,
|
||||
which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
|
||||
Panorama, unique within its device group and any ancestor or descendant device groups.
|
||||
|
||||
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
|
||||
|
||||
members(str, list): The members of the service group. These must be valid service objects or service groups on the
|
||||
system that already exist prior to the execution of this state.
|
||||
|
||||
description(str): A description for the policy (up to 255 characters).
|
||||
|
||||
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
|
||||
|
||||
SLS Example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
panos/service-group/my-group:
|
||||
panos.service_group_exists:
|
||||
- groupname: my-group
|
||||
- vsys: 1
|
||||
- members:
|
||||
- tcp-80
|
||||
- custom-port-group
|
||||
- description: A group that needs to exist
|
||||
- commit: False
|
||||
|
||||
'''
|
||||
ret = _default_ret(name)
|
||||
|
||||
if not groupname:
|
||||
ret.update({'comment': "The group name field must be provided."})
|
||||
return ret
|
||||
|
||||
# Check if service group object currently exists
|
||||
group = __salt__['panos.get_service_group'](groupname, vsys)['result']
|
||||
|
||||
if group and 'entry' in group:
|
||||
group = group['entry']
|
||||
else:
|
||||
group = {}
|
||||
|
||||
# Verify the arguments
|
||||
if members:
|
||||
element = "<members>{0}</members>".format(_build_members(members, True))
|
||||
else:
|
||||
ret.update({'comment': "The group members must be provided."})
|
||||
return ret
|
||||
|
||||
if description:
|
||||
element += "<description>{0}</description>".format(description)
|
||||
|
||||
full_element = "<entry name='{0}'>{1}</entry>".format(groupname, element)
|
||||
|
||||
new_group = xml.to_dict(ET.fromstring(full_element), True)
|
||||
|
||||
if group == new_group:
|
||||
ret.update({
|
||||
'comment': 'Service group object already exists. No changes required.',
|
||||
'result': True
|
||||
})
|
||||
return ret
|
||||
else:
|
||||
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/service-group/" \
|
||||
"entry[@name=\'{1}\']".format(vsys, groupname)
|
||||
|
||||
result, msg = _edit_config(xpath, full_element)
|
||||
|
||||
if not result:
|
||||
ret.update({
|
||||
'comment': msg
|
||||
})
|
||||
return ret
|
||||
|
||||
if commit is True:
|
||||
ret.update({
|
||||
'changes': {'before': group, 'after': new_group},
|
||||
'commit': __salt__['panos.commit'](),
|
||||
'comment': 'Service group object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
else:
|
||||
ret.update({
|
||||
'changes': {'before': group, 'after': new_group},
|
||||
'comment': 'Service group object successfully configured.',
|
||||
'result': True
|
||||
})
|
||||
|
||||
@ -1056,7 +1580,6 @@ def set_config(name, xpath=None, value=None, commit=False):
|
||||
'result': result
|
||||
})
|
||||
|
||||
# Ensure we do not commit after a failed action
|
||||
if not result:
|
||||
return ret
|
||||
|
||||
|
@ -91,7 +91,6 @@ import sys
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
from salt.modules.aptpkg import _strip_uri
|
||||
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
@ -406,7 +405,7 @@ def managed(name, ppa=None, **kwargs):
|
||||
sanitizedkwargs = kwargs
|
||||
|
||||
if os_family == 'debian':
|
||||
repo = _strip_uri(repo)
|
||||
repo = salt.utils.pkg.deb.strip_uri(repo)
|
||||
|
||||
if pre:
|
||||
for kwarg in sanitizedkwargs:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user