mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 17:33:54 +00:00
Merge branch 'develop' into boto_asg
This commit is contained in:
commit
23deab2f4d
@ -34,6 +34,7 @@ Full list of Salt Cloud modules
|
||||
scaleway
|
||||
softlayer
|
||||
softlayer_hw
|
||||
vagrant
|
||||
virtualbox
|
||||
vmware
|
||||
vultrpy
|
||||
|
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
6
doc/ref/clouds/all/salt.cloud.clouds.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
=========================
|
||||
salt.cloud.clouds.vagrant
|
||||
=========================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.vagrant
|
||||
:members:
|
@ -267,6 +267,7 @@ state modules
|
||||
tuned
|
||||
uptime
|
||||
user
|
||||
vagrant
|
||||
vault
|
||||
vbox_guest
|
||||
victorops
|
||||
|
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
6
doc/ref/states/all/salt.states.vagrant.rst
Normal file
@ -0,0 +1,6 @@
|
||||
===================
|
||||
salt.states.vagrant
|
||||
===================
|
||||
|
||||
.. automodule:: salt.states.vagrant
|
||||
:members:
|
@ -6,7 +6,7 @@ Introduced in Salt version ``2017.7.0`` it is now possible to run select states
|
||||
in parallel. This is accomplished very easily by adding the ``parallel: True``
|
||||
option to your state declaration:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
nginx:
|
||||
service.running:
|
||||
@ -24,7 +24,7 @@ state to finish.
|
||||
|
||||
Given this example:
|
||||
|
||||
.. code_block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
sleep 10:
|
||||
cmd.run:
|
||||
@ -74,16 +74,16 @@ also complete.
|
||||
Things to be Careful of
|
||||
=======================
|
||||
|
||||
Parallel States does not prevent you from creating parallel conflicts on your
|
||||
Parallel States do not prevent you from creating parallel conflicts on your
|
||||
system. This means that if you start multiple package installs using Salt then
|
||||
the package manager will block or fail. If you attempt to manage the same file
|
||||
with multiple states in parallel then the result can produce an unexpected
|
||||
file.
|
||||
|
||||
Make sure that the states you choose to run in parallel do not conflict, or
|
||||
else, like in and parallel programming environment, the outcome may not be
|
||||
else, like in any parallel programming environment, the outcome may not be
|
||||
what you expect. Doing things like just making all states run in parallel
|
||||
will almost certinly result in unexpected behavior.
|
||||
will almost certainly result in unexpected behavior.
|
||||
|
||||
With that said, running states in parallel should be safe the vast majority
|
||||
of the time and the most likely culprit for unexpected behavior is running
|
||||
|
@ -540,6 +540,17 @@ machines which are already installed, but not Salted. For more information about
|
||||
this driver and for configuration examples, please see the
|
||||
:ref:`Gettting Started with Saltify <getting-started-with-saltify>` documentation.
|
||||
|
||||
.. _config_vagrant:
|
||||
|
||||
Vagrant
|
||||
-------
|
||||
|
||||
The Vagrant driver is a new, experimental driver for controlling a VagrantBox
|
||||
virtual machine, and installing Salt on it. The target host machine must be a
|
||||
working salt minion, which is controlled via the salt master using salt-api.
|
||||
For more information, see
|
||||
:ref:`Getting Started With Vagrant <getting-started-with-vagrant>`.
|
||||
|
||||
|
||||
Extending Profiles and Cloud Providers Configuration
|
||||
====================================================
|
||||
|
@ -38,26 +38,30 @@ These are features that are available for almost every cloud host.
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
| |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Vagrant|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=======+=========+=========+======+
|
||||
|Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Images |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[2] |[2] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|create |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|destroy |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |[1] |[1] |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+-------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
||||
[2] Always returns `{}`.
|
||||
|
||||
Actions
|
||||
=======
|
||||
@ -70,46 +74,46 @@ instance name to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|attach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|del_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|detach_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keypairs | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rename |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|set_tags |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_instance | | |Yes |Yes| | |Yes | |Yes | | |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_term_protect | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|start |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|stop |Yes | | |Yes| |Yes |Yes | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|take_action | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
Functions
|
||||
=========
|
||||
@ -122,81 +126,83 @@ require the name of the provider to be passed in. For example:
|
||||
|
||||
.. container:: scrollable
|
||||
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | | |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+=======+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+-------+---------+---------+------+
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify&|Softlayer|Softlayer|Aliyun|
|
||||
| |(Legacy)| |Ocean | | | | | | |(Legacy) | Vagrant| |Hardware | |
|
||||
+=======================+========+==========+=======+===+======+======+======+=========+=========+=========+========+=========+=========+======+
|
||||
|block_device_mappings |Yes | | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|create_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_keypair | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|delete_volume | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_image | | |Yes | | |Yes | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_ip | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_key | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keyid | | |Yes | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_keypair | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_networkid | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_password | |Yes | | | | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_size | | |Yes | | |Yes | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_spot_config | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|get_subnetid | | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|iam_profile |Yes | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|import_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|key_list | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|keyname |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_availability_zones| | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_custom_images | | | | | | | | | | | |Yes | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_keys | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_full |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_nodes_select |Yes |Yes |Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|list_vlans | | | | | | | | | | | |Yes |Yes | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|rackconnect | | | | | | | |Yes | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reboot | | | |Yes| |Yes | | | | |[1] | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|reformat_node | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroup |Yes | | |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|securitygroupid | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_image | | | |Yes| | | | |Yes | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_key | | | | | |Yes | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_keypair | | |Yes |Yes| | | | | | | | | | |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|show_volume | | | |Yes| | | | | | | | | |Yes |
|
||||
+-----------------------+--------+----------+-------+---+------+------+------+---------+---------+---------+--------+---------+---------+------+
|
||||
|
||||
[1] Yes, if salt-api is enabled.
|
||||
|
@ -129,6 +129,7 @@ Cloud Provider Specifics
|
||||
Getting Started With Scaleway <scaleway>
|
||||
Getting Started With Saltify <saltify>
|
||||
Getting Started With SoftLayer <softlayer>
|
||||
Getting Started With Vagrant <vagrant>
|
||||
Getting Started With Vexxhost <vexxhost>
|
||||
Getting Started With Virtualbox <virtualbox>
|
||||
Getting Started With VMware <vmware>
|
||||
|
@ -183,6 +183,8 @@ simple installation.
|
||||
ssh_username: vagrant # a user name which has passwordless sudo
|
||||
password: vagrant # on your target machine
|
||||
provider: my_saltify_provider
|
||||
shutdown_on_destroy: true # halt the target on "salt-cloud -d" command
|
||||
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
268
doc/topics/cloud/vagrant.rst
Normal file
268
doc/topics/cloud/vagrant.rst
Normal file
@ -0,0 +1,268 @@
|
||||
.. _getting-started-with-vagrant:
|
||||
|
||||
============================
|
||||
Getting Started With Vagrant
|
||||
============================
|
||||
|
||||
The Vagrant driver is a new, experimental driver for spinning up a VagrantBox
|
||||
virtual machine, and installing Salt on it.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
The Vagrant driver itself has no external dependencies.
|
||||
|
||||
The machine which will host the VagrantBox must be an already existing minion
|
||||
of the cloud server's Salt master.
|
||||
It must have Vagrant_ installed, and a Vagrant-compatible virtual machine engine,
|
||||
such as VirtualBox_.
|
||||
(Note: The Vagrant driver does not depend on the salt-cloud VirtualBox driver in any way.)
|
||||
|
||||
.. _Vagrant: https://www.vagrantup.com/
|
||||
.. _VirtualBox: https://www.virtualbox.org/
|
||||
|
||||
\[Caution: The version of Vagrant packaged for ``apt install`` in Ubuntu 16.04 will not connect a bridged
|
||||
network adapter correctly. Use a version downloaded directly from the web site.\]
|
||||
|
||||
Include the Vagrant guest editions plugin:
|
||||
``vagrant plugin install vagrant-vbguest``.
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
Configuration of the client virtual machine (using VirtualBox, VMware, etc)
|
||||
will be done by Vagrant as specified in the Vagrantfile on the host machine.
|
||||
|
||||
Salt-cloud will push the commands to install and provision a salt minion on
|
||||
the virtual machine, so you need not (perhaps **should** not) provision salt
|
||||
in your Vagrantfile, in most cases.
|
||||
|
||||
If, however, your cloud master cannot open an SSH connection to the child VM,
|
||||
you may **need** to let Vagrant provision the VM with Salt, and use some other
|
||||
method (such as passing a pillar dictionary to the VM) to pass the master's
|
||||
IP address to the VM. The VM can then attempt to reach the salt master in the
|
||||
usual way for non-cloud minions. Specify the profile configuration argument
|
||||
as ``deploy: False`` to prevent the cloud master from trying.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# Note: This example is for /etc/salt/cloud.providers file or any file in
|
||||
# the /etc/salt/cloud.providers.d/ directory.
|
||||
|
||||
my-vagrant-config:
|
||||
minion:
|
||||
master: 111.222.333.444
|
||||
provider: vagrant
|
||||
|
||||
|
||||
Because the Vagrant driver needs a place to store the mapping between the
|
||||
node name you use for Salt commands and the Vagrantfile which controls the VM,
|
||||
you must configure your salt minion as a Salt smb server.
|
||||
(See `host provisioning example`_ below.)
|
||||
|
||||
Profiles
|
||||
========
|
||||
|
||||
Vagrant requires a profile to be configured for each machine that needs Salt
|
||||
installed. The initial profile can be set up at ``/etc/salt/cloud.profiles``
|
||||
or in the ``/etc/salt/cloud.profiles.d/`` directory.
|
||||
|
||||
Each profile requires a ``vagrantfile`` parameter. If the Vagrantfile has
|
||||
definitions for `multiple machines`_ then you need a ``machine`` parameter,
|
||||
|
||||
.. _`multiple machines`: https://www.vagrantup.com/docs/multi-machine/
|
||||
|
||||
Salt-cloud uses SSH to provision the minion. There must be a routable path
|
||||
from the cloud master to the VM. Usually, you will want to use
|
||||
a bridged network adapter for SSH. The address may not be known until
|
||||
DHCP assigns it. If ``ssh_host`` is not defined, and ``target_network``
|
||||
is defined, the driver will attempt to read the address from the output
|
||||
of an ``ifconfig`` command. Lacking either setting,
|
||||
the driver will try to use the value Vagrant returns as its ``ssh_host``,
|
||||
which will work only if the cloud master is running somewhere on the same host.
|
||||
|
||||
The ``target_network`` setting should be used
|
||||
to identify the IP network your bridged adapter is expected to appear on.
|
||||
Use CIDR notation, like ``target_network: '2001:DB8::/32'``
|
||||
or ``target_network: '192.0.2.0/24'``.
|
||||
|
||||
Profile configuration example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# /etc/salt/cloud.profiles.d/vagrant.conf
|
||||
|
||||
vagrant-machine:
|
||||
host: my-vhost # the Salt id of the virtual machine's host computer.
|
||||
provider: my-vagrant-config
|
||||
cwd: /srv/machines # the path to your Virtualbox file.
|
||||
vagrant_runas: my-username # the username who defined the Vagrantbox on the host
|
||||
# vagrant_up_timeout: 300 # (seconds) timeout for cmd.run of the "vagrant up" command
|
||||
# vagrant_provider: '' # option for "vagrant up" like: "--provider vmware_fusion"
|
||||
# ssh_host: None # "None" means try to find the routable IP address from "ifconfig"
|
||||
# target_network: None # Expected CIDR address of your bridged network
|
||||
# force_minion_config: false # Set "true" to re-purpose an existing VM
|
||||
|
||||
The machine can now be created and configured with the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -p vagrant-machine my-id
|
||||
|
||||
This will create the machine specified by the cloud profile
|
||||
``vagrant-machine``, and will give the machine the minion id of
|
||||
``my-id``. If the cloud master is also the salt-master, its Salt
|
||||
key will automatically be accepted on the master.
|
||||
|
||||
Once a salt-minion has been successfully installed on the instance, connectivity
|
||||
to it can be verified with Salt:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt my-id test.ping
|
||||
|
||||
.. _host provisioning example:
|
||||
|
||||
Provisioning a Vagrant cloud host (example)
|
||||
===========================================
|
||||
|
||||
In order to query or control minions it created, each host
|
||||
minion needs to track the Salt node names associated with
|
||||
any guest virtual machines on it.
|
||||
It does that using a Salt sdb database.
|
||||
|
||||
The Salt sdb is not configured by default. The following example shows a
|
||||
simple installation.
|
||||
|
||||
This example assumes:
|
||||
|
||||
- you are on a large network using the 10.x.x.x IP address space
|
||||
- your Salt master's Salt id is "bevymaster"
|
||||
- it will also be your salt-cloud controller
|
||||
- it is at hardware address 10.124.30.7
|
||||
- it is running a recent Debian family Linux (raspbian)
|
||||
- your workstation is a Salt minion of bevymaster
|
||||
- your workstation's minion id is "my_laptop"
|
||||
- VirtualBox has been installed on "my_laptop" (apt install is okay)
|
||||
- Vagrant was installed from vagrantup.com. (not the 16.04 Ubuntu apt)
|
||||
- "my_laptop" has done "vagrant plugin install vagrant-vbguest"
|
||||
- the VM you want to start is on "my_laptop" at "/home/my_username/Vagrantfile"
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/minion.d/vagrant_sdb.conf on host computer "my_laptop"
|
||||
# -- this sdb database is required by the Vagrant module --
|
||||
vagrant_sdb_data: # The sdb database must have this name.
|
||||
driver: sqlite3 # Let's use SQLite to store the data ...
|
||||
database: /var/cache/salt/vagrant.sqlite # ... in this file ...
|
||||
table: sdb # ... using this table name.
|
||||
create_table: True # if not present
|
||||
|
||||
Remember to re-start your minion after changing its configuration files...
|
||||
|
||||
``sudo systemctl restart salt-minion``
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
# -*- mode: ruby -*-
|
||||
# file /home/my_username/Vagrantfile on host computer "my_laptop"
|
||||
BEVY = "bevy1"
|
||||
DOMAIN = BEVY + ".test" # .test is an ICANN reserved non-public TLD
|
||||
|
||||
# must supply a list of names to avoid Vagrant asking for interactive input
|
||||
def get_good_ifc() # try to find a working Ubuntu network adapter name
|
||||
addr_infos = Socket.getifaddrs
|
||||
addr_infos.each do |info|
|
||||
a = info.addr
|
||||
if a and a.ip? and not a.ip_address.start_with?("127.")
|
||||
return info.name
|
||||
end
|
||||
end
|
||||
return "eth0" # fall back to an old reliable name
|
||||
end
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.ssh.forward_agent = true # so you can use git ssh://...
|
||||
|
||||
# add a bridged network interface. (try to detect name, then guess MacOS names, too)
|
||||
interface_guesses = [get_good_ifc(), 'en0: Ethernet', 'en1: Wi-Fi (AirPort)']
|
||||
config.vm.network "public_network", bridge: interface_guesses
|
||||
if ARGV[0] == "up"
|
||||
puts "Trying bridge network using interfaces: #{interface_guesses}"
|
||||
end
|
||||
config.vm.provision "shell", inline: "ip address", run: "always" # make user feel good
|
||||
|
||||
# . . . . . . . . . . . . Define machine QUAIL1 . . . . . . . . . . . . . .
|
||||
config.vm.define "quail1", primary: true do |quail_config|
|
||||
quail_config.vm.box = "boxesio/xenial64-standard" # a public VMware & Virtualbox box
|
||||
quail_config.vm.hostname = "quail1." + DOMAIN # supply a name in our bevy
|
||||
quail_config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 1024 # limit memory for the virtual box
|
||||
v.cpus = 1
|
||||
v.linked_clone = true # make a soft copy of the base Vagrant box
|
||||
v.customize ["modifyvm", :id, "--natnet1", "192.168.128.0/24"] # do not use 10.x network for NAT
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.profiles.d/my_vagrant_profiles.conf on bevymaster
|
||||
q1:
|
||||
host: my_laptop # the Salt id of your virtual machine host
|
||||
machine: quail1 # a machine name in the Vagrantfile (if not primary)
|
||||
vagrant_runas: my_username # owner of Vagrant box files on "my_laptop"
|
||||
cwd: '/home/my_username' # the path (on "my_laptop") of the Vagrantfile
|
||||
provider: my_vagrant_provider # name of entry in provider.conf file
|
||||
target_network: '10.0.0.0/8' # VM external address will be somewhere here
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# file /etc/salt/cloud.providers.d/vagrant_provider.conf on bevymaster
|
||||
my_vagrant_provider:
|
||||
driver: vagrant
|
||||
minion:
|
||||
master: 10.124.30.7 # the hard address of the master
|
||||
|
||||
|
||||
Create and use your new Salt minion
|
||||
-----------------------------------
|
||||
|
||||
- Typing on the Salt master computer ``bevymaster``, tell it to create a new minion named ``v1`` using profile ``q1``...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo salt-cloud -p q1 v1
|
||||
sudo salt v1 network.ip_addrs
|
||||
[ you get a list of IP addresses, including the bridged one ]
|
||||
|
||||
- logged in to your laptop (or some other computer known to GitHub)...
|
||||
|
||||
\[NOTE:\] if you are using MacOS, you need to type ``ssh-add -K`` after each boot,
|
||||
unless you use one of the methods in `this gist`_.
|
||||
|
||||
.. _this gist: https://github.com/jirsbek/SSH-keys-in-macOS-Sierra-keychain
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
ssh -A vagrant@< the bridged network address >
|
||||
# [ or, if you are at /home/my_username/ on my_laptop ]
|
||||
vagrant ssh quail1
|
||||
|
||||
- then typing on your new node "v1" (a.k.a. quail1.bevy1.test)...
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
password: vagrant
|
||||
# [ stuff types out ... ]
|
||||
|
||||
ls -al /vagrant
|
||||
# [ should be shared /home/my_username from my_laptop ]
|
||||
|
||||
# you can access other network facilities using the ssh authorization
|
||||
# as recorded in your ~.ssh/ directory on my_laptop ...
|
||||
|
||||
sudo apt update
|
||||
sudo apt install git
|
||||
git clone ssh://git@github.com/yourID/your_project
|
||||
# etc...
|
||||
|
@ -14,23 +14,33 @@ CVE-2017-14695 Directory traversal vulnerability in minion id validation in Salt
|
||||
|
||||
CVE-2017-14696 Remote Denial of Service with a specially crafted authentication request. Credit for discovering the security flaw goes to: Julian Brost (julian@0x4a42.net)
|
||||
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
Known Issues
|
||||
============
|
||||
|
||||
On 2017.7.2 when using salt-api and cherrypy version 5.6.0, issue `#43581`_ will occur when starting the salt-api service. We have patched the cherry-py packages for python-cherrypy-5.6.0-2 from repo.saltstack.com. If you are using python-cherrypy-5.6.0-1 please ensure to run `yum install python-cherrypy` to install the new patched version.
|
||||
|
||||
*Generated at: 2017-09-26T21:06:19Z*
|
||||
Extended changelog courtesy of Todd Stansell (https://github.com/tjstansell/salt-changelogs):
|
||||
|
||||
Statistics:
|
||||
*Generated at: 2017-10-02T21:10:14Z*
|
||||
|
||||
- Total Merges: **326**
|
||||
- Total Issue references: **133**
|
||||
- Total PR references: **389**
|
||||
Statistics
|
||||
==========
|
||||
|
||||
Changes:
|
||||
- Total Merges: **328**
|
||||
- Total Issue references: **134**
|
||||
- Total PR references: **391**
|
||||
|
||||
Changes
|
||||
=======
|
||||
|
||||
- **PR** `#43868`_: (*rallytime*) Back-port `#43847`_ to 2017.7.2
|
||||
* Fix to module.run
|
||||
|
||||
- **PR** `#43756`_: (*gtmanfred*) split build and install for pkg osx
|
||||
@ *2017-09-26T20:51:28Z*
|
||||
|
||||
* 88414d5 Merge pull request `#43756`_ from gtmanfred/2017.7.2
|
||||
* f7df41f split build and install for pkg osx
|
||||
|
||||
- **PR** `#43585`_: (*rallytime*) Back-port `#43330`_ to 2017.7.2
|
||||
@ *2017-09-19T17:33:34Z*
|
||||
@ -3110,6 +3120,12 @@ Changes:
|
||||
.. _`#480`: https://github.com/saltstack/salt/issues/480
|
||||
.. _`#495`: https://github.com/saltstack/salt/issues/495
|
||||
.. _`#43581`: https://github.com/saltstack/salt/issues/43581
|
||||
.. _`#43756`: https://github.com/saltstack/salt/pull/43756
|
||||
.. _`#43847`: https://github.com/saltstack/salt/pull/43847
|
||||
.. _`#43868`: https://github.com/saltstack/salt/pull/43868
|
||||
.. _`#475`: https://github.com/saltstack/salt/issues/475
|
||||
.. _`#480`: https://github.com/saltstack/salt/issues/480
|
||||
.. _`#495`: https://github.com/saltstack/salt/issues/495
|
||||
.. _`bp-37424`: https://github.com/saltstack/salt/pull/37424
|
||||
.. _`bp-39366`: https://github.com/saltstack/salt/pull/39366
|
||||
.. _`bp-41543`: https://github.com/saltstack/salt/pull/41543
|
||||
|
@ -138,6 +138,56 @@ file. For example:
|
||||
|
||||
These commands will run in sequence **before** the bootstrap script is executed.
|
||||
|
||||
New salt-cloud Grains
|
||||
=====================
|
||||
|
||||
When salt cloud creates a new minon, it will now add grain information
|
||||
to the minion configuration file, identifying the resources originally used
|
||||
to create it.
|
||||
|
||||
The generated grain information will appear similar to:
|
||||
|
||||
.. code-block:: yaml
|
||||
grains:
|
||||
salt-cloud:
|
||||
driver: ec2
|
||||
provider: my_ec2:ec2
|
||||
profile: ec2-web
|
||||
The generation of salt-cloud grains can be surpressed by the
|
||||
option ``enable_cloud_grains: 'False'`` in the cloud configuration file.
|
||||
|
||||
Upgraded Saltify Driver
|
||||
=======================
|
||||
|
||||
The salt-cloud Saltify driver is used to provision machines which
|
||||
are not controlled by a dedicated cloud supervisor (such as typical hardware
|
||||
machines) by pushing a salt-bootstrap command to them and accepting them on
|
||||
the salt master. Creation of a node has been its only function and no other
|
||||
salt-cloud commands were implemented.
|
||||
|
||||
With this upgrade, it can use the salt-api to provide advanced control,
|
||||
such as rebooting a machine, querying it along with conventional cloud minions,
|
||||
and, ultimately, disconnecting it from its master.
|
||||
|
||||
After disconnection from ("destroying" on) one master, a machine can be
|
||||
re-purposed by connecting to ("creating" on) a subsequent master.
|
||||
|
||||
New Vagrant Driver
|
||||
==================
|
||||
|
||||
The salt-cloud Vagrant driver brings virtual machines running in a limited
|
||||
environment, such as a programmer's workstation, under salt-cloud control.
|
||||
This can be useful for experimentation, instruction, or testing salt configurations.
|
||||
|
||||
Using salt-api on the master, and a salt-minion running on the host computer,
|
||||
the Vagrant driver can create (``vagrant up``), restart (``vagrant reload``),
|
||||
and destroy (``vagrant destroy``) VMs, as controlled by salt-cloud profiles
|
||||
which designate a ``Vagrantfile`` on the host machine.
|
||||
|
||||
The master can be a very limited machine, such as a Raspberry Pi, or a small
|
||||
VagrantBox VM.
|
||||
|
||||
|
||||
New pillar/master_tops module called saltclass
|
||||
----------------------------------------------
|
||||
|
||||
|
@ -27,7 +27,7 @@ Installing Dependencies
|
||||
=======================
|
||||
|
||||
Both pygit2_ and GitPython_ are supported Python interfaces to git. If
|
||||
compatible versions of both are installed, pygit2_ will preferred. In these
|
||||
compatible versions of both are installed, pygit2_ will be preferred. In these
|
||||
cases, GitPython_ can be forced using the :conf_master:`gitfs_provider`
|
||||
parameter in the master config file.
|
||||
|
||||
|
@ -88,7 +88,8 @@ sudo $PKGRESOURCES/build_env.sh $PYVER
|
||||
echo -n -e "\033]0;Build: Install Salt\007"
|
||||
sudo rm -rf $SRCDIR/build
|
||||
sudo rm -rf $SRCDIR/dist
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s" install
|
||||
sudo $PYTHON $SRCDIR/setup.py build -e "$PYTHON -E -s"
|
||||
sudo $PYTHON $SRCDIR/setup.py install
|
||||
|
||||
############################################################################
|
||||
# Build Package
|
||||
|
@ -1417,7 +1417,7 @@ class Cloud(object):
|
||||
if name in vms:
|
||||
prov = vms[name]['provider']
|
||||
driv = vms[name]['driver']
|
||||
msg = six.u('{0} already exists under {1}:{2}').format(
|
||||
msg = u'{0} already exists under {1}:{2}'.format(
|
||||
name, prov, driv
|
||||
)
|
||||
log.error(msg)
|
||||
|
@ -2080,6 +2080,7 @@ def attach_disk(name=None, kwargs=None, call=None):
|
||||
disk_name = kwargs['disk_name']
|
||||
mode = kwargs.get('mode', 'READ_WRITE').upper()
|
||||
boot = kwargs.get('boot', False)
|
||||
auto_delete = kwargs.get('auto_delete', False)
|
||||
if boot and boot.lower() in ['true', 'yes', 'enabled']:
|
||||
boot = True
|
||||
else:
|
||||
@ -2109,7 +2110,8 @@ def attach_disk(name=None, kwargs=None, call=None):
|
||||
transport=__opts__['transport']
|
||||
)
|
||||
|
||||
result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot)
|
||||
result = conn.attach_volume(node, disk, ex_mode=mode, ex_boot=boot,
|
||||
ex_auto_delete=auto_delete)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
@ -2389,6 +2391,8 @@ def create_attach_volumes(name, kwargs, call=None):
|
||||
'type': The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard.
|
||||
'image': An image to use for this new disk. Optional.
|
||||
'snapshot': A snapshot to use for this new disk. Optional.
|
||||
'auto_delete': An option(bool) to keep or remove the disk upon
|
||||
instance deletion. Optional, defaults to False.
|
||||
|
||||
Volumes are attached in the order in which they are given, thus on a new
|
||||
node the first volume will be /dev/sdb, the second /dev/sdc, and so on.
|
||||
@ -2416,7 +2420,8 @@ def create_attach_volumes(name, kwargs, call=None):
|
||||
'size': volume['size'],
|
||||
'type': volume.get('type', 'pd-standard'),
|
||||
'image': volume.get('image', None),
|
||||
'snapshot': volume.get('snapshot', None)
|
||||
'snapshot': volume.get('snapshot', None),
|
||||
'auto_delete': volume.get('auto_delete', False)
|
||||
}
|
||||
|
||||
create_disk(volume_dict, 'function')
|
||||
|
338
salt/cloud/clouds/vagrant.py
Normal file
338
salt/cloud/clouds/vagrant.py
Normal file
@ -0,0 +1,338 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Vagrant Cloud Driver
|
||||
====================
|
||||
|
||||
The Vagrant cloud is designed to "vagrant up" a virtual machine as a
|
||||
Salt minion.
|
||||
|
||||
Use of this module requires some configuration in cloud profile and provider
|
||||
files as described in the
|
||||
:ref:`Gettting Started with Vagrant <getting-started-with-vagrant>` documentation.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.config as config
|
||||
import salt.client
|
||||
import salt.ext.six as six
|
||||
if six.PY3:
|
||||
import ipaddress
|
||||
else:
|
||||
import salt.ext.ipaddress as ipaddress
|
||||
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
|
||||
|
||||
# Get logging started
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Needs no special configuration
|
||||
'''
|
||||
return True
|
||||
|
||||
|
||||
def avail_locations(call=None):
|
||||
r'''
|
||||
This function returns a list of locations available.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-locations my-cloud-provider
|
||||
|
||||
# \[ vagrant will always returns an empty dictionary \]
|
||||
|
||||
'''
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def avail_images(call=None):
|
||||
'''This function returns a list of images available for this cloud provider.
|
||||
vagrant will return a list of profiles.
|
||||
salt-cloud --list-images my-cloud-provider
|
||||
'''
|
||||
vm_ = get_configured_provider()
|
||||
return {'Profiles': [profile for profile in vm_['profiles']]}
|
||||
|
||||
|
||||
def avail_sizes(call=None):
|
||||
r'''
|
||||
This function returns a list of sizes available for this cloud provider.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --list-sizes my-cloud-provider
|
||||
|
||||
# \[ vagrant always returns an empty dictionary \]
|
||||
|
||||
'''
|
||||
return {}
|
||||
|
||||
|
||||
def list_nodes(call=None):
|
||||
'''
|
||||
List the nodes which have salt-cloud:driver:vagrant grains.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -Q
|
||||
'''
|
||||
nodes = _list_nodes(call)
|
||||
return _build_required_items(nodes)
|
||||
|
||||
|
||||
def _build_required_items(nodes):
|
||||
ret = {}
|
||||
for name, grains in nodes.items():
|
||||
if grains:
|
||||
private_ips = []
|
||||
public_ips = []
|
||||
ips = grains['ipv4'] + grains['ipv6']
|
||||
for adrs in ips:
|
||||
ip_ = ipaddress.ip_address(adrs)
|
||||
if not ip_.is_loopback:
|
||||
if ip_.is_private:
|
||||
private_ips.append(adrs)
|
||||
else:
|
||||
public_ips.append(adrs)
|
||||
|
||||
ret[name] = {
|
||||
'id': grains['id'],
|
||||
'image': grains['salt-cloud']['profile'],
|
||||
'private_ips': private_ips,
|
||||
'public_ips': public_ips,
|
||||
'size': '',
|
||||
'state': 'running'
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_full(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'vagrant' minions, return dict of grains (enhanced).
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-call -F
|
||||
'''
|
||||
ret = _list_nodes(call)
|
||||
|
||||
for key, grains in ret.items(): # clean up some hyperverbose grains -- everything is too much
|
||||
try:
|
||||
del grains['cpu_flags'], grains['disks'], grains['pythonpath'], grains['dns'], grains['gpus']
|
||||
except KeyError:
|
||||
pass # ignore absence of things we are eliminating
|
||||
except TypeError:
|
||||
del ret[key] # eliminate all reference to unexpected (None) values.
|
||||
|
||||
reqs = _build_required_items(ret)
|
||||
for name in ret:
|
||||
ret[name].update(reqs[name])
|
||||
return ret
|
||||
|
||||
|
||||
def _list_nodes(call=None):
|
||||
'''
|
||||
List the nodes, ask all 'vagrant' minions, return dict of grains.
|
||||
'''
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd('salt-cloud:driver:vagrant', 'grains.items', '', tgt_type='grain')
|
||||
return ret
|
||||
|
||||
|
||||
def list_nodes_select(call=None):
|
||||
'''
|
||||
Return a list of the minions that have salt-cloud grains, with
|
||||
select fields.
|
||||
'''
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
|
||||
def show_instance(name, call=None):
|
||||
'''
|
||||
List the a single node, return dict of grains.
|
||||
'''
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(name, 'grains.items', '')
|
||||
reqs = _build_required_items(ret)
|
||||
ret[name].update(reqs[name])
|
||||
return ret
|
||||
|
||||
|
||||
def _get_my_info(name):
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd(name, 'grains.get', ['salt-cloud'])
|
||||
|
||||
|
||||
def create(vm_):
|
||||
'''
|
||||
Provision a single machine
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
salt-cloud -p my_profile new_node_1
|
||||
|
||||
'''
|
||||
name = vm_['name']
|
||||
machine = config.get_cloud_config_value(
|
||||
'machine', vm_, __opts__, default='')
|
||||
vm_['machine'] = machine
|
||||
host = config.get_cloud_config_value(
|
||||
'host', vm_, __opts__, default=NotImplemented)
|
||||
vm_['cwd'] = config.get_cloud_config_value(
|
||||
'cwd', vm_, __opts__, default='/')
|
||||
vm_['runas'] = config.get_cloud_config_value(
|
||||
'vagrant_runas', vm_, __opts__, default=os.getenv('SUDO_USER'))
|
||||
vm_['timeout'] = config.get_cloud_config_value(
|
||||
'vagrant_up_timeout', vm_, __opts__, default=300)
|
||||
vm_['vagrant_provider'] = config.get_cloud_config_value(
|
||||
'vagrant_provider', vm_, __opts__, default='')
|
||||
vm_['grains'] = {'salt-cloud:vagrant': {'host': host, 'machine': machine}}
|
||||
|
||||
log.info('sending \'vagrant.init %s machine=%s\' command to %s', name, machine, host)
|
||||
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(host, 'vagrant.init', [name], kwarg={'vm': vm_, 'start': True})
|
||||
log.info('response ==> %s', ret[host])
|
||||
|
||||
network_mask = config.get_cloud_config_value(
|
||||
'network_mask', vm_, __opts__, default='')
|
||||
if 'ssh_host' not in vm_:
|
||||
ret = local.cmd(host,
|
||||
'vagrant.get_ssh_config',
|
||||
[name],
|
||||
kwarg={'network_mask': network_mask,
|
||||
'get_private_key': True})[host]
|
||||
with tempfile.NamedTemporaryFile() as pks:
|
||||
if 'private_key' not in vm_ and ret.get('private_key', False):
|
||||
pks.write(ret['private_key'])
|
||||
pks.flush()
|
||||
log.debug('wrote private key to %s', pks.name)
|
||||
vm_['key_filename'] = pks.name
|
||||
if 'ssh_host' not in vm_:
|
||||
vm_.setdefault('ssh_username', ret['ssh_username'])
|
||||
if ret.get('ip_address'):
|
||||
vm_['ssh_host'] = ret['ip_address']
|
||||
else: # if probe failed or not used, use Vagrant's reported ssh info
|
||||
vm_['ssh_host'] = ret['ssh_host']
|
||||
vm_.setdefault('ssh_port', ret['ssh_port'])
|
||||
|
||||
log.info('Provisioning machine %s as node %s using ssh %s',
|
||||
machine, name, vm_['ssh_host'])
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
return ret
|
||||
|
||||
|
||||
def get_configured_provider():
|
||||
'''
|
||||
Return the first configured instance.
|
||||
'''
|
||||
ret = config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or 'vagrant',
|
||||
''
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
def destroy(name, call=None):
|
||||
'''
|
||||
Destroy a node.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud --destroy mymachine
|
||||
'''
|
||||
if call == 'function':
|
||||
raise SaltCloudSystemExit(
|
||||
'The destroy action must be called with -d, --destroy, '
|
||||
'-a, or --action.'
|
||||
)
|
||||
|
||||
opts = __opts__
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroying instance',
|
||||
'salt/cloud/{0}/destroying'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
my_info = _get_my_info(name)
|
||||
profile_name = my_info[name]['profile']
|
||||
profile = opts['profiles'][profile_name]
|
||||
host = profile['host']
|
||||
local = salt.client.LocalClient()
|
||||
ret = local.cmd(host, 'vagrant.destroy', [name])
|
||||
|
||||
if ret[host]:
|
||||
__utils__['cloud.fire_event'](
|
||||
'event',
|
||||
'destroyed instance',
|
||||
'salt/cloud/{0}/destroyed'.format(name),
|
||||
args={'name': name},
|
||||
sock_dir=opts['sock_dir'],
|
||||
transport=opts['transport']
|
||||
)
|
||||
|
||||
if opts.get('update_cachedir', False) is True:
|
||||
__utils__['cloud.delete_minion_cachedir'](
|
||||
name, __active_provider_name__.split(':')[0], opts)
|
||||
|
||||
return {'Destroyed': '{0} was destroyed.'.format(name)}
|
||||
else:
|
||||
return {'Error': 'Error destroying {}'.format(name)}
|
||||
|
||||
|
||||
# noinspection PyTypeChecker
|
||||
def reboot(name, call=None):
|
||||
'''
|
||||
Reboot a vagrant minion.
|
||||
|
||||
name
|
||||
The name of the VM to reboot.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt-cloud -a reboot vm_name
|
||||
'''
|
||||
if call != 'action':
|
||||
raise SaltCloudException(
|
||||
'The reboot action must be called with -a or --action.'
|
||||
)
|
||||
my_info = _get_my_info(name)
|
||||
profile_name = my_info[name]['profile']
|
||||
profile = __opts__['profiles'][profile_name]
|
||||
host = profile['host']
|
||||
local = salt.client.LocalClient()
|
||||
return local.cmd(host, 'vagrant.reboot', [name])
|
@ -69,12 +69,11 @@ def init_git_pillar(opts):
|
||||
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
|
||||
if 'git' in opts_dict:
|
||||
try:
|
||||
pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
pillar.init_remotes(
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
opts_dict['git'],
|
||||
git_pillar.PER_REMOTE_OVERRIDES,
|
||||
git_pillar.PER_REMOTE_ONLY
|
||||
)
|
||||
per_remote_overrides=git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=git_pillar.PER_REMOTE_ONLY)
|
||||
ret.append(pillar)
|
||||
except FileserverConfigError:
|
||||
if opts.get('git_pillar_verify_config', True):
|
||||
|
@ -71,6 +71,15 @@ log = logging.getLogger(__name__)
|
||||
__virtualname__ = 'git'
|
||||
|
||||
|
||||
def _gitfs(init_remotes=True):
|
||||
return salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
init_remotes=init_remotes)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if the desired provider module is present and gitfs is enabled
|
||||
@ -79,7 +88,7 @@ def __virtual__():
|
||||
if __virtualname__ not in __opts__['fileserver_backend']:
|
||||
return False
|
||||
try:
|
||||
salt.utils.gitfs.GitFS(__opts__)
|
||||
_gitfs(init_remotes=False)
|
||||
# Initialization of the GitFS object did not fail, so we know we have
|
||||
# valid configuration syntax and that a valid provider was detected.
|
||||
return __virtualname__
|
||||
@ -92,18 +101,14 @@ def clear_cache():
|
||||
'''
|
||||
Completely clear gitfs cache
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
return gitfs.clear_cache()
|
||||
return _gitfs(init_remotes=False).clear_cache()
|
||||
|
||||
|
||||
def clear_lock(remote=None, lock_type='update'):
|
||||
'''
|
||||
Clear update.lk
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.clear_lock(remote=remote, lock_type=lock_type)
|
||||
return _gitfs().clear_lock(remote=remote, lock_type=lock_type)
|
||||
|
||||
|
||||
def lock(remote=None):
|
||||
@ -114,30 +119,21 @@ def lock(remote=None):
|
||||
information, or a pattern. If the latter, then remotes for which the URL
|
||||
matches the pattern will be locked.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.lock(remote=remote)
|
||||
return _gitfs().lock(remote=remote)
|
||||
|
||||
|
||||
def update():
|
||||
'''
|
||||
Execute a git fetch on all of the repos
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
gitfs.update()
|
||||
_gitfs().update()
|
||||
|
||||
|
||||
def envs(ignore_cache=False):
|
||||
'''
|
||||
Return a list of refs that can be used as environments
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.envs(ignore_cache=ignore_cache)
|
||||
return _gitfs().envs(ignore_cache=ignore_cache)
|
||||
|
||||
|
||||
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
@ -145,10 +141,7 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
Find the first file to match the path and ref, read the file out of git
|
||||
and send the path to the newly cached file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
return _gitfs().find_file(path, tgt_env=tgt_env, **kwargs)
|
||||
|
||||
|
||||
def init():
|
||||
@ -156,29 +149,21 @@ def init():
|
||||
Initialize remotes. This is only used by the master's pre-flight checks,
|
||||
and is not invoked by GitFS.
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
_gitfs()
|
||||
|
||||
|
||||
def serve_file(load, fnd):
|
||||
'''
|
||||
Return a chunk from a file based on the data received
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.serve_file(load, fnd)
|
||||
return _gitfs().serve_file(load, fnd)
|
||||
|
||||
|
||||
def file_hash(load, fnd):
|
||||
'''
|
||||
Return a file hash, the hash type is set in the master config file
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_hash(load, fnd)
|
||||
return _gitfs().file_hash(load, fnd)
|
||||
|
||||
|
||||
def file_list(load):
|
||||
@ -186,10 +171,7 @@ def file_list(load):
|
||||
Return a list of all files on the file server in a specified
|
||||
environment (specified as a key within the load dict).
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.file_list(load)
|
||||
return _gitfs().file_list(load)
|
||||
|
||||
|
||||
def file_list_emptydirs(load): # pylint: disable=W0613
|
||||
@ -204,17 +186,11 @@ def dir_list(load):
|
||||
'''
|
||||
Return a list of all directories on the master
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.dir_list(load)
|
||||
return _gitfs().dir_list(load)
|
||||
|
||||
|
||||
def symlink_list(load):
|
||||
'''
|
||||
Return a dict of all symlinks based on a given path in the repo
|
||||
'''
|
||||
gitfs = salt.utils.gitfs.GitFS(__opts__)
|
||||
gitfs.init_remotes(__opts__['gitfs_remotes'],
|
||||
PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
return gitfs.symlink_list(load)
|
||||
return _gitfs().symlink_list(load)
|
||||
|
@ -486,11 +486,11 @@ class Master(SMaster):
|
||||
for repo in git_pillars:
|
||||
new_opts[u'ext_pillar'] = [repo]
|
||||
try:
|
||||
git_pillar = salt.utils.gitfs.GitPillar(new_opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
new_opts,
|
||||
repo[u'git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
except FileserverConfigError as exc:
|
||||
critical_errors.append(exc.strerror)
|
||||
finally:
|
||||
|
@ -132,7 +132,7 @@ def version(*names, **kwargs):
|
||||
return __salt__['pkg_resource.version'](*names, **kwargs)
|
||||
|
||||
|
||||
def refresh_db(failhard=False):
|
||||
def refresh_db(failhard=False, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Updates the opkg database to latest packages based upon repositories
|
||||
|
||||
@ -514,7 +514,7 @@ def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
|
||||
return remove(name=name, pkgs=pkgs)
|
||||
|
||||
|
||||
def upgrade(refresh=True):
|
||||
def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Upgrades all packages via ``opkg upgrade``
|
||||
|
||||
@ -803,7 +803,7 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def list_upgrades(refresh=True):
|
||||
def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List all available package upgrades.
|
||||
|
||||
@ -976,7 +976,7 @@ def info_installed(*names, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def upgrade_available(name):
|
||||
def upgrade_available(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Check whether or not an upgrade is available for a given package
|
||||
|
||||
@ -989,7 +989,7 @@ def upgrade_available(name):
|
||||
return latest_version(name) != ''
|
||||
|
||||
|
||||
def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
||||
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
|
||||
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
|
||||
@ -1038,7 +1038,7 @@ def version_cmp(pkg1, pkg2, ignore_epoch=False):
|
||||
return None
|
||||
|
||||
|
||||
def list_repos():
|
||||
def list_repos(**kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Lists all repos on /etc/opkg/*.conf
|
||||
|
||||
@ -1075,7 +1075,7 @@ def list_repos():
|
||||
return repos
|
||||
|
||||
|
||||
def get_repo(alias):
|
||||
def get_repo(alias, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Display a repo from the /etc/opkg/*.conf
|
||||
|
||||
@ -1146,7 +1146,7 @@ def _mod_repo_in_file(alias, repostr, filepath):
|
||||
fhandle.writelines(output)
|
||||
|
||||
|
||||
def del_repo(alias):
|
||||
def del_repo(alias, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Delete a repo from /etc/opkg/*.conf
|
||||
|
||||
@ -1260,7 +1260,7 @@ def mod_repo(alias, **kwargs):
|
||||
refresh_db()
|
||||
|
||||
|
||||
def file_list(*packages):
|
||||
def file_list(*packages, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List the files that belong to a package. Not specifying any packages will
|
||||
return a list of _every_ file on the system's package database (not
|
||||
@ -1281,7 +1281,7 @@ def file_list(*packages):
|
||||
return {'errors': output['errors'], 'files': files}
|
||||
|
||||
|
||||
def file_dict(*packages):
|
||||
def file_dict(*packages, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
List the files that belong to a package, grouped by package. Not
|
||||
specifying any packages will return a list of _every_ file on the system's
|
||||
@ -1323,7 +1323,7 @@ def file_dict(*packages):
|
||||
return {'errors': errors, 'packages': ret}
|
||||
|
||||
|
||||
def owner(*paths):
|
||||
def owner(*paths, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Return the name of the package that owns the file. Multiple file paths can
|
||||
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
|
||||
|
@ -93,7 +93,7 @@ def _validate_partition_boundary(boundary):
|
||||
'''
|
||||
try:
|
||||
for unit in VALID_UNITS:
|
||||
if boundary.endswith(unit):
|
||||
if str(boundary).endswith(unit):
|
||||
return
|
||||
int(boundary)
|
||||
except Exception:
|
||||
|
@ -923,8 +923,9 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
salt '*' state.apply test pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source.
|
||||
Values passed this way will override existing Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source. Pillar values that
|
||||
are not included in the kwarg will not be overwritten.
|
||||
|
||||
.. versionchanged:: 2016.3.0
|
||||
GPG-encrypted CLI Pillar data is now supported via the GPG
|
||||
@ -1379,6 +1380,20 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
:conf_minion:`pillarenv` minion config option nor this CLI argument is
|
||||
used, all Pillar environments will be merged together.
|
||||
|
||||
pillar
|
||||
Custom Pillar values, passed as a dictionary of key-value pairs
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}'
|
||||
|
||||
.. note::
|
||||
Values passed this way will override existing Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source. Pillar values that
|
||||
are not included in the kwarg will not be overwritten.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1399,12 +1414,26 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
pillar_override = kwargs.get('pillar')
|
||||
pillar_enc = kwargs.get('pillar_enc')
|
||||
if pillar_enc is None \
|
||||
and pillar_override is not None \
|
||||
and not isinstance(pillar_override, dict):
|
||||
raise SaltInvocationError(
|
||||
'Pillar data must be formatted as a dictionary, unless pillar_enc '
|
||||
'is specified.'
|
||||
)
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts,
|
||||
pillar_override,
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
|
@ -55,7 +55,6 @@ def __virtual__():
|
||||
'''
|
||||
run Vagrant commands if possible
|
||||
'''
|
||||
# noinspection PyUnresolvedReferences
|
||||
if salt.utils.path.which('vagrant') is None:
|
||||
return False, 'The vagrant module could not be loaded: vagrant command not found'
|
||||
return __virtualname__
|
||||
@ -298,6 +297,11 @@ def vm_state(name='', cwd=None):
|
||||
'provider': _, # the Vagrant VM provider
|
||||
'name': _} # salt_id name
|
||||
|
||||
Known bug: if there are multiple machines in your Vagrantfile, and you request
|
||||
the status of the ``primary`` machine, which you defined by leaving the ``machine``
|
||||
parameter blank, then you may receive the status of all of them.
|
||||
Please specify the actual machine name for each VM if there are more than one.
|
||||
|
||||
'''
|
||||
|
||||
if name:
|
||||
@ -321,7 +325,7 @@ def vm_state(name='', cwd=None):
|
||||
datum = {'machine': tokens[0],
|
||||
'state': ' '.join(tokens[1:-1]),
|
||||
'provider': tokens[-1].lstrip('(').rstrip(')'),
|
||||
'name': name or get_machine_id(tokens[0], cwd)
|
||||
'name': get_machine_id(tokens[0], cwd)
|
||||
}
|
||||
info.append(datum)
|
||||
except IndexError:
|
||||
@ -365,7 +369,7 @@ def init(name, # Salt_id for created VM
|
||||
# passed-in keyword arguments overwrite vm dictionary values
|
||||
vm_['cwd'] = cwd or vm_.get('cwd')
|
||||
if not vm_['cwd']:
|
||||
raise SaltInvocationError('Path to Vagrantfile must be defined by \'cwd\' argument')
|
||||
raise SaltInvocationError('Path to Vagrantfile must be defined by "cwd" argument')
|
||||
vm_['machine'] = machine or vm_.get('machine', machine)
|
||||
vm_['runas'] = runas or vm_.get('runas', runas)
|
||||
vm_['vagrant_provider'] = vagrant_provider or vm_.get('vagrant_provider', '')
|
||||
@ -423,7 +427,7 @@ def shutdown(name):
|
||||
'''
|
||||
Send a soft shutdown (vagrant halt) signal to the named vm.
|
||||
|
||||
This does the same thing as vagrant.stop. Other VM control
|
||||
This does the same thing as vagrant.stop. Other-VM control
|
||||
modules use "stop" and "shutdown" to differentiate between
|
||||
hard and soft shutdowns.
|
||||
|
||||
@ -476,7 +480,7 @@ def pause(name):
|
||||
return ret == 0
|
||||
|
||||
|
||||
def reboot(name):
|
||||
def reboot(name, provision=False):
|
||||
'''
|
||||
Reboot a VM. (vagrant reload)
|
||||
|
||||
@ -484,12 +488,16 @@ def reboot(name):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt <host> vagrant.reboot <salt_id>
|
||||
salt <host> vagrant.reboot <salt_id> provision=True
|
||||
|
||||
:param name: The salt_id name you will use to control this VM
|
||||
:param provision: (False) also re-run the Vagrant provisioning scripts.
|
||||
'''
|
||||
vm_ = get_vm_info(name)
|
||||
machine = vm_['machine']
|
||||
prov = '--provision' if provision else ''
|
||||
|
||||
cmd = 'vagrant reload {}'.format(machine)
|
||||
cmd = 'vagrant reload {} {}'.format(machine, prov)
|
||||
ret = __salt__['cmd.retcode'](cmd,
|
||||
runas=vm_.get('runas'),
|
||||
cwd=vm_.get('cwd'))
|
||||
|
@ -656,7 +656,7 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
|
||||
if key not in attributes or not attributes[key]:
|
||||
attributes[key] = value
|
||||
|
||||
def _assign_mac(attributes):
|
||||
def _assign_mac(attributes, hypervisor):
|
||||
dmac = kwargs.get('dmac', None)
|
||||
if dmac is not None:
|
||||
log.debug('DMAC address is {0}'.format(dmac))
|
||||
@ -666,11 +666,15 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
|
||||
msg = 'Malformed MAC address: {0}'.format(dmac)
|
||||
raise CommandExecutionError(msg)
|
||||
else:
|
||||
attributes['mac'] = salt.utils.network.gen_mac()
|
||||
if hypervisor in ['qemu', 'kvm']:
|
||||
attributes['mac'] = salt.utils.network.gen_mac(
|
||||
prefix='52:54:00')
|
||||
else:
|
||||
attributes['mac'] = salt.utils.network.gen_mac()
|
||||
|
||||
for interface in interfaces:
|
||||
_normalize_net_types(interface)
|
||||
_assign_mac(interface)
|
||||
_assign_mac(interface, hypervisor)
|
||||
if hypervisor in overlays:
|
||||
_apply_default_overlay(interface)
|
||||
|
||||
|
@ -891,11 +891,11 @@ class Pillar(object):
|
||||
# Avoid circular import
|
||||
import salt.utils.gitfs
|
||||
import salt.pillar.git_pillar
|
||||
git_pillar = salt.utils.gitfs.GitPillar(self.opts)
|
||||
git_pillar.init_remotes(
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
self.opts,
|
||||
self.ext['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_pillar.fetch_remotes()
|
||||
except TypeError:
|
||||
# Handle malformed ext_pillar
|
||||
|
@ -348,12 +348,6 @@ from salt.ext import six
|
||||
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
|
||||
PER_REMOTE_ONLY = ('name', 'mountpoint')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.pillar.git_pillar.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
|
||||
# Set up logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -371,7 +365,7 @@ def __virtual__():
|
||||
return False
|
||||
|
||||
try:
|
||||
salt.utils.gitfs.GitPillar(__opts__)
|
||||
salt.utils.gitfs.GitPillar(__opts__, init_remotes=False)
|
||||
# Initialization of the GitPillar object did not fail, so we
|
||||
# know we have valid configuration syntax and that a valid
|
||||
# provider was detected.
|
||||
@ -387,8 +381,11 @@ def ext_pillar(minion_id, pillar, *repos): # pylint: disable=unused-argument
|
||||
opts = copy.deepcopy(__opts__)
|
||||
opts['pillar_roots'] = {}
|
||||
opts['__git_pillar'] = True
|
||||
git_pillar = salt.utils.gitfs.GitPillar(opts)
|
||||
git_pillar.init_remotes(repos, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
git_pillar = salt.utils.gitfs.GitPillar(
|
||||
opts,
|
||||
repos,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY)
|
||||
if __opts__.get('__role') == 'minion':
|
||||
# If masterless, fetch the remotes. We'll need to remove this once
|
||||
# we make the minion daemon able to run standalone.
|
||||
|
@ -328,11 +328,14 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
salt.utils.args.invalid_kwargs(kwargs)
|
||||
|
||||
if role == 'gitfs':
|
||||
git_objects = [salt.utils.gitfs.GitFS(__opts__)]
|
||||
git_objects[0].init_remotes(
|
||||
__opts__['gitfs_remotes'],
|
||||
salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
salt.fileserver.gitfs.PER_REMOTE_ONLY)
|
||||
git_objects = [
|
||||
salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY
|
||||
)
|
||||
]
|
||||
elif role == 'git_pillar':
|
||||
git_objects = []
|
||||
for ext_pillar in __opts__['ext_pillar']:
|
||||
@ -340,11 +343,11 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
if key == 'git':
|
||||
if not isinstance(ext_pillar['git'], list):
|
||||
continue
|
||||
obj = salt.utils.gitfs.GitPillar(__opts__)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
ext_pillar['git'],
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
git_objects.append(obj)
|
||||
elif role == 'winrepo':
|
||||
winrepo_dir = __opts__['winrepo_dir']
|
||||
@ -355,11 +358,12 @@ def clear_git_lock(role, remote=None, **kwargs):
|
||||
(winrepo_remotes, winrepo_dir),
|
||||
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng'])
|
||||
):
|
||||
obj = salt.utils.gitfs.WinRepo(__opts__, base_dir)
|
||||
obj.init_remotes(
|
||||
obj = salt.utils.gitfs.WinRepo(
|
||||
__opts__,
|
||||
remotes,
|
||||
salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
salt.runners.winrepo.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
git_objects.append(obj)
|
||||
else:
|
||||
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
|
||||
|
@ -66,10 +66,11 @@ def update(branch=None, repo=None):
|
||||
if pillar_type != 'git':
|
||||
continue
|
||||
pillar_conf = ext_pillar[pillar_type]
|
||||
pillar = salt.utils.gitfs.GitPillar(__opts__)
|
||||
pillar.init_remotes(pillar_conf,
|
||||
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
pillar = salt.utils.gitfs.GitPillar(
|
||||
__opts__,
|
||||
pillar_conf,
|
||||
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
|
||||
for remote in pillar.remotes:
|
||||
# Skip this remote if it doesn't match the search criteria
|
||||
if branch is not None:
|
||||
|
@ -32,7 +32,7 @@ log = logging.getLogger(__name__)
|
||||
PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
|
||||
|
||||
# Fall back to default per-remote-only. This isn't technically needed since
|
||||
# salt.utils.gitfs.GitBase.init_remotes() will default to
|
||||
# salt.utils.gitfs.GitBase.__init__ will default to
|
||||
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
|
||||
# runners and other modules that import salt.runners.winrepo.
|
||||
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
|
||||
@ -216,9 +216,12 @@ def update_git_repos(opts=None, clean=False, masterless=False):
|
||||
else:
|
||||
# New winrepo code utilizing salt.utils.gitfs
|
||||
try:
|
||||
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
|
||||
winrepo.init_remotes(
|
||||
remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
|
||||
winrepo = salt.utils.gitfs.WinRepo(
|
||||
opts,
|
||||
remotes,
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
cache_root=base_dir)
|
||||
winrepo.fetch_remotes()
|
||||
# Since we're not running update(), we need to manually call
|
||||
# clear_old_remotes() to remove directories from remotes that
|
||||
|
@ -159,7 +159,7 @@ def formatted(name, fs_type='ext4', force=False, **kwargs):
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
__salt__['disk.format_'](name, fs_type, force=force, **kwargs)
|
||||
__salt__['disk.format'](name, fs_type, force=force, **kwargs)
|
||||
|
||||
# Repeat fstype check up to 10 times with 3s sleeping between each
|
||||
# to avoid detection failing although mkfs has succeeded
|
||||
|
369
salt/states/vagrant.py
Normal file
369
salt/states/vagrant.py
Normal file
@ -0,0 +1,369 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
r'''
|
||||
.. index:: Vagrant state function
|
||||
|
||||
Manage Vagrant VMs
|
||||
==================
|
||||
|
||||
Manange execution of Vagrant virtual machines on Salt minions.
|
||||
|
||||
Vagrant_ is a tool for building and managing virtual machine environments.
|
||||
It can use various providers, such as VirtualBox_, Docker_, or VMware_, to run its VMs.
|
||||
Vagrant provides some of the functionality of a light-weight hypervisor.
|
||||
The combination of Salt modules, Vagrant running on the host, and a
|
||||
virtual machine provider, gives hypervisor-like functionality for
|
||||
developers who use Vagrant to quickly define their virtual environments.
|
||||
|
||||
.. _Vagrant: http://www.vagrantup.com/
|
||||
.. _VirtualBox: https://www.virtualbox.org/
|
||||
.. _Docker: https://www.docker.io/
|
||||
.. _VMWare: https://www.vmware.com/
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
The configuration of each virtual machine is defined in a file named
|
||||
``Vagrantfile`` which must exist on the VM host machine.
|
||||
The essential parameters which must be defined to start a Vagrant VM
|
||||
are the directory where the ``Vagrantfile`` is located \(argument ``cwd:``\),
|
||||
and the username which will own the ``Vagrant box`` created for the VM \(
|
||||
argument ``vagrant_runas:``\).
|
||||
|
||||
A single ``Vagrantfile`` may define one or more virtual machines.
|
||||
Use the ``machine`` argument to chose among them. The default (blank)
|
||||
value will select the ``primary`` (or only) machine in the Vagrantfile.
|
||||
|
||||
\[NOTE:\] Each virtual machine host must have the following:
|
||||
|
||||
- a working salt-minion
|
||||
- a Salt sdb database configured for ``vagrant_sdb_data``.
|
||||
- Vagrant installed and the ``vagrant`` command working
|
||||
- a suitable VM provider
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
# EXAMPLE:
|
||||
# file /etc/salt/minion.d/vagrant_sdb.conf on the host computer
|
||||
# -- this sdb database is required by the Vagrant module --
|
||||
vagrant_sdb_data: # The sdb database must have this name.
|
||||
driver: sqlite3 # Let's use SQLite to store the data ...
|
||||
database: /var/cache/salt/vagrant.sqlite # ... in this file ...
|
||||
table: sdb # ... using this table name.
|
||||
create_table: True # if not present
|
||||
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
import fnmatch
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.args
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
import salt.ext.six as six
|
||||
|
||||
__virtualname__ = 'vagrant'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only if vagrant module is available.
|
||||
|
||||
:return:
|
||||
'''
|
||||
|
||||
if 'vagrant.version' in __salt__:
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def _vagrant_call(node, function, section, comment, status_when_done=None, **kwargs):
|
||||
'''
|
||||
Helper to call the vagrant functions. Wildcards supported.
|
||||
|
||||
:param node: The Salt-id or wildcard
|
||||
:param function: the vagrant submodule to call
|
||||
:param section: the name for the state call.
|
||||
:param comment: what the state reply should say
|
||||
:param status_when_done: the Vagrant status expected for this state
|
||||
:return: the dictionary for the state reply
|
||||
'''
|
||||
ret = {'name': node, 'changes': {}, 'result': True, 'comment': ''}
|
||||
|
||||
targeted_nodes = []
|
||||
if isinstance(node, six.string_types):
|
||||
try: # use shortcut if a single node name
|
||||
if __salt__['vagrant.get_vm_info'](node):
|
||||
targeted_nodes = [node]
|
||||
except SaltInvocationError:
|
||||
pass
|
||||
|
||||
if not targeted_nodes: # the shortcut failed, do this the hard way
|
||||
all_domains = __salt__['vagrant.list_domains']()
|
||||
targeted_nodes = fnmatch.filter(all_domains, node)
|
||||
changed_nodes = []
|
||||
ignored_nodes = []
|
||||
for node in targeted_nodes:
|
||||
if status_when_done:
|
||||
try:
|
||||
present_state = __salt__['vagrant.vm_state'](node)[0]
|
||||
if present_state['state'] == status_when_done:
|
||||
continue # no change is needed
|
||||
except (IndexError, SaltInvocationError, CommandExecutionError):
|
||||
pass
|
||||
try:
|
||||
response = __salt__['vagrant.{0}'.format(function)](node, **kwargs)
|
||||
if isinstance(response, dict):
|
||||
response = response['name']
|
||||
changed_nodes.append({'node': node, function: response})
|
||||
except (SaltInvocationError, CommandExecutionError) as err:
|
||||
ignored_nodes.append({'node': node, 'issue': str(err)})
|
||||
if not changed_nodes:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'No changes seen'
|
||||
if ignored_nodes:
|
||||
ret['changes'] = {'ignored': ignored_nodes}
|
||||
else:
|
||||
ret['changes'] = {section: changed_nodes}
|
||||
ret['comment'] = comment
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def running(name, **kwargs):
|
||||
r'''
|
||||
Defines and starts a new VM with specified arguments, or restart a
|
||||
VM (or group of VMs). (Runs ``vagrant up``.)
|
||||
|
||||
:param name: the Salt_id node name you wish your VM to have.
|
||||
|
||||
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
|
||||
which have been paused or stopped.
|
||||
|
||||
Each machine must be initially started individually using this function
|
||||
or the vagrant.init execution module call.
|
||||
|
||||
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
|
||||
|
||||
Possible keyword arguments:
|
||||
|
||||
- cwd: The directory (path) containing the Vagrantfile
|
||||
- machine: ('') the name of the machine (in the Vagrantfile) if not default
|
||||
- vagrant_runas: ('root') the username who owns the vagrantbox file
|
||||
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
|
||||
- vm: ({}) a dictionary containing these or other keyword arguments
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.running
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.running:
|
||||
- cwd: /projects/my_project
|
||||
- vagrant_runas: my_username
|
||||
- machine: machine1
|
||||
|
||||
'''
|
||||
if '*' in name or '?' in name:
|
||||
|
||||
return _vagrant_call(name, 'start', 'restarted',
|
||||
"Machine has been restarted", "running")
|
||||
|
||||
else:
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': '{0} is already running'.format(name)
|
||||
}
|
||||
|
||||
try:
|
||||
info = __salt__['vagrant.vm_state'](name)
|
||||
if info[0]['state'] != 'running':
|
||||
__salt__['vagrant.start'](name)
|
||||
ret['changes'][name] = 'Machine started'
|
||||
ret['comment'] = 'Node {0} started'.format(name)
|
||||
except (SaltInvocationError, CommandExecutionError):
|
||||
# there was no viable existing machine to start
|
||||
ret, kwargs = _find_init_change(name, ret, **kwargs)
|
||||
kwargs['start'] = True
|
||||
__salt__['vagrant.init'](name, **kwargs)
|
||||
ret['changes'][name] = 'Node defined and started'
|
||||
ret['comment'] = 'Node {0} defined and started'.format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _find_init_change(name, ret, **kwargs):
|
||||
'''
|
||||
look for changes from any previous init of machine.
|
||||
|
||||
:return: modified ret and kwargs
|
||||
'''
|
||||
kwargs = salt.utils.args.clean_kwargs(**kwargs)
|
||||
if 'vm' in kwargs:
|
||||
kwargs.update(kwargs.pop('vm'))
|
||||
# the state processing eats 'runas' so we rename
|
||||
kwargs['runas'] = kwargs.pop('vagrant_runas', '')
|
||||
try:
|
||||
vm_ = __salt__['vagrant.get_vm_info'](name)
|
||||
except SaltInvocationError:
|
||||
vm_ = {}
|
||||
for key, value in kwargs.items():
|
||||
ret['changes'][key] = {'old': None, 'new': value}
|
||||
if vm_: # test for changed values
|
||||
for key in vm_:
|
||||
value = vm_[key] or '' # supply a blank if value is None
|
||||
if key != 'name': # will be missing in kwargs
|
||||
new = kwargs.get(key, '')
|
||||
if new != value:
|
||||
if key == 'machine' and new == '':
|
||||
continue # we don't know the default machine name
|
||||
ret['changes'][key] = {'old': value, 'new': new}
|
||||
return ret, kwargs
|
||||
|
||||
|
||||
def initialized(name, **kwargs):
|
||||
r'''
|
||||
Defines a new VM with specified arguments, but does not start it.
|
||||
|
||||
:param name: the Salt_id node name you wish your VM to have.
|
||||
|
||||
Each machine must be initialized individually using this function
|
||||
or the "vagrant.running" function, or the vagrant.init execution module call.
|
||||
|
||||
This command will not change the state of a running or paused machine.
|
||||
|
||||
Possible keyword arguments:
|
||||
|
||||
- cwd: The directory (path) containing the Vagrantfile
|
||||
- machine: ('') the name of the machine (in the Vagrantfile) if not default
|
||||
- vagrant_runas: ('root') the username who owns the vagrantbox file
|
||||
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
|
||||
- vm: ({}) a dictionary containing these or other keyword arguments
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name1:
|
||||
vagrant.initialized
|
||||
- cwd: /projects/my_project
|
||||
- vagrant_runas: my_username
|
||||
- machine: machine1
|
||||
|
||||
node_name2:
|
||||
vagrant.initialized
|
||||
- cwd: /projects/my_project
|
||||
- vagrant_runas: my_username
|
||||
- machine: machine2
|
||||
|
||||
start_nodes:
|
||||
vagrant.start:
|
||||
- name: node_name?
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': True,
|
||||
'comment': 'The VM is already correctly defined'
|
||||
}
|
||||
|
||||
# define a machine to start later
|
||||
ret, kwargs = _find_init_change(name, ret, **kwargs)
|
||||
|
||||
if ret['changes'] == {}:
|
||||
return ret
|
||||
|
||||
kwargs['start'] = False
|
||||
__salt__['vagrant.init'](name, **kwargs)
|
||||
ret['changes'][name] = 'Node initialized'
|
||||
ret['comment'] = 'Node {0} defined but not started.'.format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def stopped(name):
|
||||
'''
|
||||
Stops a VM (or VMs) by shutting it (them) down nicely. (Runs ``vagrant halt``)
|
||||
|
||||
:param name: May be a Salt_id node, or a POSIX-style wildcard string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.stopped
|
||||
'''
|
||||
|
||||
return _vagrant_call(name, 'shutdown', 'stopped',
|
||||
'Machine has been shut down', 'poweroff')
|
||||
|
||||
|
||||
def powered_off(name):
|
||||
'''
|
||||
Stops a VM (or VMs) by power off. (Runs ``vagrant halt``.)
|
||||
|
||||
This method is provided for compatibility with other VM-control
|
||||
state modules. For Vagrant, the action is identical with ``stopped``.
|
||||
|
||||
:param name: May be a Salt_id node or a POSIX-style wildcard string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.unpowered
|
||||
'''
|
||||
|
||||
return _vagrant_call(name, 'stop', 'unpowered',
|
||||
'Machine has been powered off', 'poweroff')
|
||||
|
||||
|
||||
def destroyed(name):
|
||||
'''
|
||||
Stops a VM (or VMs) and removes all refences to it (them). (Runs ``vagrant destroy``.)
|
||||
|
||||
Subsequent re-use of the same machine will requere another operation of ``vagrant.running``
|
||||
or a call to the ``vagrant.init`` execution module.
|
||||
|
||||
:param name: May be a Salt_id node or a POSIX-style wildcard string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.destroyed
|
||||
'''
|
||||
|
||||
return _vagrant_call(name, 'destroy', 'destroyed',
|
||||
'Machine has been removed')
|
||||
|
||||
|
||||
def paused(name):
|
||||
'''
|
||||
Stores the state of a VM (or VMs) for fast restart. (Runs ``vagrant suspend``.)
|
||||
|
||||
:param name: May be a Salt_id node or a POSIX-style wildcard string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.paused
|
||||
'''
|
||||
|
||||
return _vagrant_call(name, 'pause', 'paused',
|
||||
'Machine has been suspended', 'saved')
|
||||
|
||||
|
||||
def rebooted(name):
|
||||
'''
|
||||
Reboots a running, paused, or stopped VM (or VMs). (Runs ``vagrant reload``.)
|
||||
|
||||
The will re-run the provisioning
|
||||
|
||||
:param name: May be a Salt_id node or a POSIX-style wildcard string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
node_name:
|
||||
vagrant.reloaded
|
||||
'''
|
||||
|
||||
return _vagrant_call(name, 'reboot', 'rebooted', 'Machine has been reloaded')
|
@ -67,6 +67,10 @@ def managed(name,
|
||||
name
|
||||
Path to the virtualenv.
|
||||
|
||||
venv_bin: virtualenv
|
||||
The name (and optionally path) of the virtualenv command. This can also
|
||||
be set globally in the minion config file as ``virtualenv.venv_bin``.
|
||||
|
||||
requirements: None
|
||||
Path to a pip requirements file. If the path begins with ``salt://``
|
||||
the file will be transferred from the master file server.
|
||||
|
@ -398,13 +398,14 @@ def bootstrap(vm_, opts=None):
|
||||
|
||||
# NOTE: deploy_kwargs is also used to pass inline_script variable content
|
||||
# to run_inline_script function
|
||||
host = salt.config.get_cloud_config_value('ssh_host', vm_, opts)
|
||||
deploy_kwargs = {
|
||||
'opts': opts,
|
||||
'host': vm_['ssh_host'],
|
||||
'host': host,
|
||||
'port': salt.config.get_cloud_config_value(
|
||||
'ssh_port', vm_, opts, default=22
|
||||
),
|
||||
'salt_host': vm_.get('salt_host', vm_['ssh_host']),
|
||||
'salt_host': vm_.get('salt_host', host),
|
||||
'username': ssh_username,
|
||||
'script': deploy_script_code,
|
||||
'inline_script': inline_script_config,
|
||||
|
@ -20,6 +20,8 @@ import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
import time
|
||||
import tornado.ioloop
|
||||
import weakref
|
||||
from datetime import datetime
|
||||
|
||||
# Import salt libs
|
||||
@ -1925,12 +1927,47 @@ class GitBase(object):
|
||||
'''
|
||||
Base class for gitfs/git_pillar
|
||||
'''
|
||||
def __init__(self, opts, git_providers=None, cache_root=None):
|
||||
def __init__(self, opts, remotes=None, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
'''
|
||||
IMPORTANT: If specifying a cache_root, understand that this is also
|
||||
where the remotes will be cloned. A non-default cache_root is only
|
||||
really designed right now for winrepo, as its repos need to be checked
|
||||
out into the winrepo locations and not within the cachedir.
|
||||
|
||||
As of the Oxygen release cycle, the classes used to interface with
|
||||
Pygit2 and GitPython can be overridden by passing the git_providers
|
||||
argument when spawning a class instance. This allows for one to write
|
||||
classes which inherit from salt.utils.gitfs.Pygit2 or
|
||||
salt.utils.gitfs.GitPython, and then direct one of the GitBase
|
||||
subclasses (GitFS, GitPillar, WinRepo) to use the custom class. For
|
||||
example:
|
||||
|
||||
.. code-block:: Python
|
||||
|
||||
import salt.utils.gitfs
|
||||
from salt.fileserver.gitfs import PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY
|
||||
|
||||
class CustomPygit2(salt.utils.gitfs.Pygit2):
|
||||
def fetch_remotes(self):
|
||||
...
|
||||
Alternate fetch behavior here
|
||||
...
|
||||
|
||||
git_providers = {
|
||||
'pygit2': CustomPygit2,
|
||||
'gitpython': salt.utils.gitfs.GitPython,
|
||||
}
|
||||
|
||||
gitfs = salt.utils.gitfs.GitFS(
|
||||
__opts__,
|
||||
__opts__['gitfs_remotes'],
|
||||
per_remote_overrides=PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=PER_REMOTE_ONLY,
|
||||
git_providers=git_providers)
|
||||
|
||||
gitfs.fetch_remotes()
|
||||
'''
|
||||
self.opts = opts
|
||||
self.git_providers = git_providers if git_providers is not None \
|
||||
@ -1946,8 +1983,13 @@ class GitBase(object):
|
||||
self.hash_cachedir = salt.utils.path.join(self.cache_root, 'hash')
|
||||
self.file_list_cachedir = salt.utils.path.join(
|
||||
self.opts['cachedir'], 'file_lists', self.role)
|
||||
if init_remotes:
|
||||
self.init_remotes(
|
||||
remotes if remotes is not None else [],
|
||||
per_remote_overrides,
|
||||
per_remote_only)
|
||||
|
||||
def init_remotes(self, remotes, per_remote_overrides,
|
||||
def init_remotes(self, remotes, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY):
|
||||
'''
|
||||
Initialize remotes
|
||||
@ -2471,9 +2513,51 @@ class GitFS(GitBase):
|
||||
'''
|
||||
Functionality specific to the git fileserver backend
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.role = 'gitfs'
|
||||
super(GitFS, self).__init__(opts)
|
||||
role = 'gitfs'
|
||||
instance_map = weakref.WeakKeyDictionary()
|
||||
|
||||
def __new__(cls, opts, remotes=None, per_remote_overrides=(),
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
'''
|
||||
If we are not initializing remotes (such as in cases where we just want
|
||||
to load the config so that we can run clear_cache), then just return a
|
||||
new __init__'ed object. Otherwise, check the instance map and re-use an
|
||||
instance if one exists for the current process. Weak references are
|
||||
used to ensure that we garbage collect instances for threads which have
|
||||
exited.
|
||||
'''
|
||||
# No need to get the ioloop reference if we're not initializing remotes
|
||||
io_loop = tornado.ioloop.IOLoop.current() if init_remotes else None
|
||||
if not init_remotes or io_loop not in cls.instance_map:
|
||||
# We only evaluate the second condition in this if statement if
|
||||
# we're initializing remotes, so we won't get here unless io_loop
|
||||
# is something other than None.
|
||||
obj = object.__new__(cls)
|
||||
super(GitFS, obj).__init__(
|
||||
opts,
|
||||
remotes if remotes is not None else [],
|
||||
per_remote_overrides=per_remote_overrides,
|
||||
per_remote_only=per_remote_only,
|
||||
git_providers=git_providers if git_providers is not None
|
||||
else GIT_PROVIDERS,
|
||||
cache_root=cache_root,
|
||||
init_remotes=init_remotes)
|
||||
if not init_remotes:
|
||||
log.debug('Created gitfs object with uninitialized remotes')
|
||||
else:
|
||||
log.debug('Created gitfs object for process %s', os.getpid())
|
||||
# Add to the instance map so we can re-use later
|
||||
cls.instance_map[io_loop] = obj
|
||||
return obj
|
||||
log.debug('Re-using gitfs object for process %s', os.getpid())
|
||||
return cls.instance_map[io_loop]
|
||||
|
||||
def __init__(self, opts, remotes, per_remote_overrides=(), # pylint: disable=super-init-not-called
|
||||
per_remote_only=PER_REMOTE_ONLY, git_providers=None,
|
||||
cache_root=None, init_remotes=True):
|
||||
# Initialization happens above in __new__(), so don't do anything here
|
||||
pass
|
||||
|
||||
def dir_list(self, load):
|
||||
'''
|
||||
@ -2755,9 +2839,7 @@ class GitPillar(GitBase):
|
||||
'''
|
||||
Functionality specific to the git external pillar
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
self.role = 'git_pillar'
|
||||
super(GitPillar, self).__init__(opts)
|
||||
role = 'git_pillar'
|
||||
|
||||
def checkout(self):
|
||||
'''
|
||||
@ -2845,9 +2927,7 @@ class WinRepo(GitBase):
|
||||
'''
|
||||
Functionality specific to the winrepo runner
|
||||
'''
|
||||
def __init__(self, opts, winrepo_dir):
|
||||
self.role = 'winrepo'
|
||||
super(WinRepo, self).__init__(opts, cache_root=winrepo_dir)
|
||||
role = 'winrepo'
|
||||
|
||||
def checkout(self):
|
||||
'''
|
||||
|
@ -206,8 +206,13 @@ def wrap_tmpl_func(render_str):
|
||||
if six.PY2:
|
||||
output = output.encode(SLS_ENCODING)
|
||||
if salt.utils.platform.is_windows():
|
||||
newline = False
|
||||
if output.endswith(('\n', os.linesep)):
|
||||
newline = True
|
||||
# Write out with Windows newlines
|
||||
output = os.linesep.join(output.splitlines())
|
||||
if newline:
|
||||
output += os.linesep
|
||||
|
||||
except SaltRenderError as exc:
|
||||
log.error("Rendering exception occurred: {0}".format(exc))
|
||||
@ -331,7 +336,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
|
||||
# http://jinja.pocoo.org/docs/api/#unicode
|
||||
tmplstr = tmplstr.decode(SLS_ENCODING)
|
||||
|
||||
if tmplstr.endswith('\n'):
|
||||
if tmplstr.endswith(os.linesep):
|
||||
newline = True
|
||||
|
||||
if not saltenv:
|
||||
@ -441,7 +446,7 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
|
||||
# Workaround a bug in Jinja that removes the final newline
|
||||
# (https://github.com/mitsuhiko/jinja2/issues/75)
|
||||
if newline:
|
||||
output += '\n'
|
||||
output += os.linesep
|
||||
|
||||
return output
|
||||
|
||||
|
@ -281,7 +281,10 @@ def vb_get_network_addresses(machine_name=None, machine=None):
|
||||
# We can't trust virtualbox to give us up to date guest properties if the machine isn't running
|
||||
# For some reason it may give us outdated (cached?) values
|
||||
if machine.state == _virtualboxManager.constants.MachineState_Running:
|
||||
total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count'))
|
||||
try:
|
||||
total_slots = int(machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/Count'))
|
||||
except ValueError:
|
||||
total_slots = 0
|
||||
for i in range(total_slots):
|
||||
try:
|
||||
address = machine.getGuestPropertyValue('/VirtualBox/GuestInfo/Net/{0}/V4/IP'.format(i))
|
||||
|
@ -2,18 +2,18 @@
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import string
|
||||
import grp
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
|
||||
# Import Salt Testing libs
|
||||
from tests.support.case import ModuleCase
|
||||
from tests.support.helpers import destructiveTest, skip_if_not_root
|
||||
|
||||
# Import 3rd-party libs
|
||||
# Import Salt libs
|
||||
from salt.ext.six.moves import range
|
||||
import os
|
||||
import grp
|
||||
from salt import utils
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
@skip_if_not_root
|
||||
@ -66,7 +66,7 @@ class GroupModuleTest(ModuleCase):
|
||||
'''
|
||||
defs_file = '/etc/login.defs'
|
||||
if os.path.exists(defs_file):
|
||||
with utils.fopen(defs_file) as defs_fd:
|
||||
with salt.utils.files.fopen(defs_file) as defs_fd:
|
||||
login_defs = dict([x.split()
|
||||
for x in defs_fd.readlines()
|
||||
if x.strip()
|
||||
@ -102,12 +102,12 @@ class GroupModuleTest(ModuleCase):
|
||||
'''
|
||||
Test the add group function
|
||||
'''
|
||||
#add a new group
|
||||
# add a new group
|
||||
self.assertTrue(self.run_function('group.add', [self._group, self._gid]))
|
||||
group_info = self.run_function('group.info', [self._group])
|
||||
self.assertEqual(group_info['name'], self._group)
|
||||
self.assertEqual(group_info['gid'], self._gid)
|
||||
#try adding the group again
|
||||
# try adding the group again
|
||||
self.assertFalse(self.run_function('group.add', [self._group, self._gid]))
|
||||
|
||||
@destructiveTest
|
||||
@ -124,7 +124,7 @@ class GroupModuleTest(ModuleCase):
|
||||
group_info = self.run_function('group.info', [self._group])
|
||||
self.assertEqual(group_info['name'], self._group)
|
||||
self.assertTrue(gid_min <= group_info['gid'] <= gid_max)
|
||||
#try adding the group again
|
||||
# try adding the group again
|
||||
self.assertFalse(self.run_function('group.add',
|
||||
[self._group]))
|
||||
|
||||
@ -142,7 +142,7 @@ class GroupModuleTest(ModuleCase):
|
||||
group_info = self.run_function('group.info', [self._group])
|
||||
self.assertEqual(group_info['name'], self._group)
|
||||
self.assertEqual(group_info['gid'], gid)
|
||||
#try adding the group again
|
||||
# try adding the group again
|
||||
self.assertFalse(self.run_function('group.add',
|
||||
[self._group, gid]))
|
||||
|
||||
@ -153,10 +153,10 @@ class GroupModuleTest(ModuleCase):
|
||||
'''
|
||||
self.assertTrue(self.run_function('group.add', [self._group]))
|
||||
|
||||
#correct functionality
|
||||
# correct functionality
|
||||
self.assertTrue(self.run_function('group.delete', [self._group]))
|
||||
|
||||
#group does not exist
|
||||
# group does not exist
|
||||
self.assertFalse(self.run_function('group.delete', [self._no_group]))
|
||||
|
||||
@destructiveTest
|
||||
@ -193,11 +193,11 @@ class GroupModuleTest(ModuleCase):
|
||||
self.assertTrue(self.run_function('group.adduser', [self._group, self._user]))
|
||||
group_info = self.run_function('group.info', [self._group])
|
||||
self.assertIn(self._user, group_info['members'])
|
||||
#try add a non existing user
|
||||
# try to add a non existing user
|
||||
self.assertFalse(self.run_function('group.adduser', [self._group, self._no_user]))
|
||||
#try add a user to non existing group
|
||||
# try to add a user to non existing group
|
||||
self.assertFalse(self.run_function('group.adduser', [self._no_group, self._user]))
|
||||
#try add a non existing user to a non existing group
|
||||
# try to add a non existing user to a non existing group
|
||||
self.assertFalse(self.run_function('group.adduser', [self._no_group, self._no_user]))
|
||||
|
||||
@destructiveTest
|
||||
|
@ -5,10 +5,12 @@
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import errno
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import textwrap
|
||||
import tornado.ioloop
|
||||
import logging
|
||||
import stat
|
||||
try:
|
||||
@ -40,18 +42,26 @@ import salt.utils.win_functions
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
TMP_SOCK_DIR = tempfile.mkdtemp(dir=TMP)
|
||||
TMP_REPO_DIR = os.path.join(TMP, 'gitfs_root')
|
||||
INTEGRATION_BASE_FILES = os.path.join(FILES, 'file', 'base')
|
||||
|
||||
|
||||
def _rmtree_error(func, path, excinfo):
|
||||
os.chmod(path, stat.S_IWRITE)
|
||||
func(path)
|
||||
|
||||
|
||||
@skipIf(not HAS_GITPYTHON, 'GitPython is not installed')
|
||||
class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
def setup_loader_modules(self):
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP)
|
||||
return {
|
||||
gitfs: {
|
||||
'__opts__': {
|
||||
'cachedir': self.tmp_cachedir,
|
||||
'sock_dir': self.tmp_sock_dir,
|
||||
'sock_dir': TMP_SOCK_DIR,
|
||||
'gitfs_root': 'salt',
|
||||
'fileserver_backend': ['git'],
|
||||
'gitfs_base': 'master',
|
||||
@ -81,9 +91,17 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
||||
}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Clear the instance map so that we make sure to create a new instance
|
||||
# for this test class.
|
||||
try:
|
||||
del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmp_cachedir)
|
||||
shutil.rmtree(self.tmp_sock_dir)
|
||||
|
||||
def test_per_saltenv_config(self):
|
||||
opts_override = textwrap.dedent('''
|
||||
@ -109,10 +127,11 @@ class GitfsConfigTestCase(TestCase, LoaderModuleMockMixin):
|
||||
- mountpoint: abc
|
||||
''')
|
||||
with patch.dict(gitfs.__opts__, yaml.safe_load(opts_override)):
|
||||
git_fs = salt.utils.gitfs.GitFS(gitfs.__opts__)
|
||||
git_fs.init_remotes(
|
||||
git_fs = salt.utils.gitfs.GitFS(
|
||||
gitfs.__opts__,
|
||||
gitfs.__opts__['gitfs_remotes'],
|
||||
gitfs.PER_REMOTE_OVERRIDES, gitfs.PER_REMOTE_ONLY)
|
||||
per_remote_overrides=gitfs.PER_REMOTE_OVERRIDES,
|
||||
per_remote_only=gitfs.PER_REMOTE_ONLY)
|
||||
|
||||
# repo1 (branch: foo)
|
||||
# The mountpoint should take the default (from gitfs_mountpoint), while
|
||||
@ -169,14 +188,12 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
def setup_loader_modules(self):
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_sock_dir = tempfile.mkdtemp(dir=TMP)
|
||||
self.tmp_repo_dir = os.path.join(TMP, 'gitfs_root')
|
||||
return {
|
||||
gitfs: {
|
||||
'__opts__': {
|
||||
'cachedir': self.tmp_cachedir,
|
||||
'sock_dir': self.tmp_sock_dir,
|
||||
'gitfs_remotes': ['file://' + self.tmp_repo_dir],
|
||||
'sock_dir': TMP_SOCK_DIR,
|
||||
'gitfs_remotes': ['file://' + TMP_REPO_DIR],
|
||||
'gitfs_root': '',
|
||||
'fileserver_backend': ['git'],
|
||||
'gitfs_base': 'master',
|
||||
@ -206,26 +223,26 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
||||
}
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
We don't want to check in another .git dir into GH because that just gets messy.
|
||||
Instead, we'll create a temporary repo on the fly for the tests to examine.
|
||||
'''
|
||||
if not gitfs.__virtual__():
|
||||
self.skipTest("GitFS could not be loaded. Skipping GitFS tests!")
|
||||
self.integration_base_files = os.path.join(FILES, 'file', 'base')
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Clear the instance map so that we make sure to create a new instance
|
||||
# for this test class.
|
||||
try:
|
||||
del salt.utils.gitfs.GitFS.instance_map[tornado.ioloop.IOLoop.current()]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Create the dir if it doesn't already exist
|
||||
try:
|
||||
shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/')
|
||||
shutil.copytree(INTEGRATION_BASE_FILES, TMP_REPO_DIR + '/')
|
||||
except OSError:
|
||||
# We probably caught an error because files already exist. Ignore
|
||||
pass
|
||||
|
||||
try:
|
||||
repo = git.Repo(self.tmp_repo_dir)
|
||||
repo = git.Repo(TMP_REPO_DIR)
|
||||
except git.exc.InvalidGitRepositoryError:
|
||||
repo = git.Repo.init(self.tmp_repo_dir)
|
||||
repo = git.Repo.init(TMP_REPO_DIR)
|
||||
|
||||
if 'USERNAME' not in os.environ:
|
||||
try:
|
||||
@ -238,9 +255,19 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
||||
'\'root\'.')
|
||||
os.environ['USERNAME'] = 'root'
|
||||
|
||||
repo.index.add([x for x in os.listdir(self.tmp_repo_dir)
|
||||
repo.index.add([x for x in os.listdir(TMP_REPO_DIR)
|
||||
if x != '.git'])
|
||||
repo.index.commit('Test')
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
We don't want to check in another .git dir into GH because that just
|
||||
gets messy. Instead, we'll create a temporary repo on the fly for the
|
||||
tests to examine.
|
||||
'''
|
||||
if not gitfs.__virtual__():
|
||||
self.skipTest("GitFS could not be loaded. Skipping GitFS tests!")
|
||||
self.tmp_cachedir = tempfile.mkdtemp(dir=TMP)
|
||||
gitfs.update()
|
||||
|
||||
def tearDown(self):
|
||||
@ -248,17 +275,11 @@ class GitFSTest(TestCase, LoaderModuleMockMixin):
|
||||
Remove the temporary git repository and gitfs cache directory to ensure
|
||||
a clean environment for each test.
|
||||
'''
|
||||
shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error)
|
||||
shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error)
|
||||
shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error)
|
||||
del self.tmp_repo_dir
|
||||
del self.tmp_cachedir
|
||||
del self.tmp_sock_dir
|
||||
del self.integration_base_files
|
||||
|
||||
def _rmtree_error(self, func, path, excinfo):
|
||||
os.chmod(path, stat.S_IWRITE)
|
||||
func(path)
|
||||
try:
|
||||
shutil.rmtree(self.tmp_cachedir, onerror=_rmtree_error)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
def test_file_list(self):
|
||||
ret = gitfs.file_list(LOAD)
|
||||
|
@ -428,6 +428,8 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
||||
controllers = root.findall('.//devices/controller')
|
||||
# There should be no controller
|
||||
self.assertTrue(len(controllers) == 0)
|
||||
# kvm mac address shoud start with 52:54:00
|
||||
self.assertTrue("mac address='52:54:00" in xml_data)
|
||||
|
||||
def test_mixed_dict_and_list_as_profile_objects(self):
|
||||
|
||||
|
@ -100,7 +100,7 @@ class BlockdevTestCase(TestCase, LoaderModuleMockMixin):
|
||||
|
||||
# Test state return when block device format fails
|
||||
with patch.dict(blockdev.__salt__, {'cmd.run': MagicMock(return_value=mock_ext4),
|
||||
'disk.format_': MagicMock(return_value=True)}):
|
||||
'disk.format': MagicMock(return_value=True)}):
|
||||
comt = ('Failed to format {0}'.format(name))
|
||||
ret.update({'comment': comt, 'result': False})
|
||||
with patch.object(salt.utils.path, 'which',
|
||||
|
@ -33,6 +33,7 @@ from salt.utils.jinja import (
|
||||
)
|
||||
from salt.utils.templates import JINJA, render_jinja_tmpl
|
||||
from salt.utils.odict import OrderedDict
|
||||
import salt.utils.stringutils
|
||||
|
||||
# Import 3rd party libs
|
||||
import yaml
|
||||
@ -176,12 +177,9 @@ class TestGetTemplate(TestCase):
|
||||
with salt.utils.files.fopen(fn_) as fp_:
|
||||
out = render_jinja_tmpl(
|
||||
fp_.read(),
|
||||
dict(
|
||||
opts=self.local_opts,
|
||||
saltenv='test',
|
||||
salt=self.local_salt
|
||||
))
|
||||
self.assertEqual(out, 'world\n')
|
||||
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
|
||||
)
|
||||
self.assertEqual(out, 'world' + os.linesep)
|
||||
|
||||
def test_fallback_noloader(self):
|
||||
'''
|
||||
@ -192,12 +190,9 @@ class TestGetTemplate(TestCase):
|
||||
with salt.utils.files.fopen(filename) as fp_:
|
||||
out = render_jinja_tmpl(
|
||||
fp_.read(),
|
||||
dict(
|
||||
opts=self.local_opts,
|
||||
saltenv='test',
|
||||
salt=self.local_salt
|
||||
))
|
||||
self.assertEqual(out, 'Hey world !a b !\n')
|
||||
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
|
||||
)
|
||||
self.assertEqual(out, 'Hey world !a b !' + os.linesep)
|
||||
|
||||
def test_saltenv(self):
|
||||
'''
|
||||
@ -216,7 +211,7 @@ class TestGetTemplate(TestCase):
|
||||
'file_roots': self.local_opts['file_roots'],
|
||||
'pillar_roots': self.local_opts['pillar_roots']},
|
||||
a='Hi', b='Salt', saltenv='test', salt=self.local_salt))
|
||||
self.assertEqual(out, 'Hey world !Hi Salt !\n')
|
||||
self.assertEqual(out, 'Hey world !Hi Salt !' + os.linesep)
|
||||
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
|
||||
|
||||
def test_macro_additional_log_for_generalexc(self):
|
||||
@ -225,7 +220,7 @@ class TestGetTemplate(TestCase):
|
||||
more output from trace.
|
||||
'''
|
||||
expected = r'''Jinja error:.*division.*
|
||||
.*/macrogeneral\(2\):
|
||||
.*macrogeneral\(2\):
|
||||
---
|
||||
\{% macro mymacro\(\) -%\}
|
||||
\{\{ 1/0 \}\} <======================
|
||||
@ -249,7 +244,7 @@ class TestGetTemplate(TestCase):
|
||||
more output from trace.
|
||||
'''
|
||||
expected = r'''Jinja variable 'b' is undefined
|
||||
.*/macroundefined\(2\):
|
||||
.*macroundefined\(2\):
|
||||
---
|
||||
\{% macro mymacro\(\) -%\}
|
||||
\{\{b.greetee\}\} <-- error is here <======================
|
||||
@ -272,7 +267,7 @@ class TestGetTemplate(TestCase):
|
||||
If we failed in a macro, get more output from trace.
|
||||
'''
|
||||
expected = r'''Jinja syntax error: expected token .*end.*got '-'.*
|
||||
.*/macroerror\(2\):
|
||||
.*macroerror\(2\):
|
||||
---
|
||||
# macro
|
||||
\{% macro mymacro\(greeting, greetee='world'\) -\} <-- error is here <======================
|
||||
@ -302,7 +297,7 @@ class TestGetTemplate(TestCase):
|
||||
'file_roots': self.local_opts['file_roots'],
|
||||
'pillar_roots': self.local_opts['pillar_roots']},
|
||||
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
|
||||
self.assertEqual(out, u'Hey world !Hi Sàlt !\n')
|
||||
self.assertEqual(out, salt.utils.stringutils.to_unicode('Hey world !Hi Sàlt !' + os.linesep))
|
||||
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
|
||||
|
||||
filename = os.path.join(TEMPLATES_DIR, 'files', 'test', 'non_ascii')
|
||||
@ -313,7 +308,7 @@ class TestGetTemplate(TestCase):
|
||||
'file_roots': self.local_opts['file_roots'],
|
||||
'pillar_roots': self.local_opts['pillar_roots']},
|
||||
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
|
||||
self.assertEqual(u'Assunção\n', out)
|
||||
self.assertEqual(u'Assunção' + os.linesep, out)
|
||||
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
|
||||
|
||||
@skipIf(HAS_TIMELIB is False, 'The `timelib` library is not installed.')
|
||||
@ -376,8 +371,8 @@ class TestGetTemplate(TestCase):
|
||||
with salt.utils.files.fopen(out['data']) as fp:
|
||||
result = fp.read()
|
||||
if six.PY2:
|
||||
result = result.decode('utf-8')
|
||||
self.assertEqual(u'Assunção\n', result)
|
||||
result = salt.utils.stringutils.to_unicode(result)
|
||||
self.assertEqual(salt.utils.stringutils.to_unicode('Assunção' + os.linesep), result)
|
||||
|
||||
def test_get_context_has_enough_context(self):
|
||||
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
|
||||
|
@ -439,7 +439,7 @@ class PillarTestCase(TestCase):
|
||||
def _setup_test_topfile_mocks(self, Matcher, get_file_client,
|
||||
nodegroup_order, glob_order):
|
||||
# Write a simple topfile and two pillar state files
|
||||
self.top_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
s = '''
|
||||
base:
|
||||
group:
|
||||
@ -456,19 +456,19 @@ base:
|
||||
'''.format(nodegroup_order=nodegroup_order, glob_order=glob_order)
|
||||
self.top_file.write(salt.utils.stringutils.to_bytes(s))
|
||||
self.top_file.flush()
|
||||
self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.ssh_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
self.ssh_file.write(b'''
|
||||
ssh:
|
||||
foo
|
||||
''')
|
||||
self.ssh_file.flush()
|
||||
self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.ssh_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
self.ssh_minion_file.write(b'''
|
||||
ssh:
|
||||
bar
|
||||
''')
|
||||
self.ssh_minion_file.flush()
|
||||
self.generic_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.generic_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
self.generic_file.write(b'''
|
||||
generic:
|
||||
key1:
|
||||
@ -478,7 +478,7 @@ generic:
|
||||
sub_key1: []
|
||||
''')
|
||||
self.generic_file.flush()
|
||||
self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.generic_minion_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
self.generic_minion_file.write(b'''
|
||||
generic:
|
||||
key1:
|
||||
@ -507,7 +507,7 @@ generic:
|
||||
client.get_state.side_effect = get_state
|
||||
|
||||
def _setup_test_include_mocks(self, Matcher, get_file_client):
|
||||
self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.top_file = top_file = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
top_file.write(b'''
|
||||
base:
|
||||
'*':
|
||||
@ -518,21 +518,21 @@ base:
|
||||
- test
|
||||
''')
|
||||
top_file.flush()
|
||||
self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.init_sls = init_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
init_sls.write(b'''
|
||||
include:
|
||||
- test.sub1
|
||||
- test.sub2
|
||||
''')
|
||||
init_sls.flush()
|
||||
self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.sub1_sls = sub1_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
sub1_sls.write(b'''
|
||||
p1:
|
||||
- value1_1
|
||||
- value1_2
|
||||
''')
|
||||
sub1_sls.flush()
|
||||
self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP)
|
||||
self.sub2_sls = sub2_sls = tempfile.NamedTemporaryFile(dir=TMP, delete=False)
|
||||
sub2_sls.write(b'''
|
||||
p1:
|
||||
- value1_3
|
||||
|
@ -37,18 +37,19 @@ class TestGitFSProvider(TestCase):
|
||||
MagicMock(return_value=True)):
|
||||
with patch.object(role_class, 'verify_pygit2',
|
||||
MagicMock(return_value=False)):
|
||||
args = [OPTS]
|
||||
args = [OPTS, {}]
|
||||
kwargs = {'init_remotes': False}
|
||||
if role_name == 'winrepo':
|
||||
args.append('/tmp/winrepo-dir')
|
||||
kwargs['cache_root'] = '/tmp/winrepo-dir'
|
||||
with patch.dict(OPTS, {key: provider}):
|
||||
# Try to create an instance with uppercase letters in
|
||||
# provider name. If it fails then a
|
||||
# FileserverConfigError will be raised, so no assert is
|
||||
# necessary.
|
||||
role_class(*args)
|
||||
# Now try to instantiate an instance with all lowercase
|
||||
# letters. Again, no need for an assert here.
|
||||
role_class(*args)
|
||||
role_class(*args, **kwargs)
|
||||
# Now try to instantiate an instance with all lowercase
|
||||
# letters. Again, no need for an assert here.
|
||||
role_class(*args, **kwargs)
|
||||
|
||||
def test_valid_provider(self):
|
||||
'''
|
||||
@ -73,12 +74,13 @@ class TestGitFSProvider(TestCase):
|
||||
verify = 'verify_pygit2'
|
||||
mock2 = _get_mock(verify, provider)
|
||||
with patch.object(role_class, verify, mock2):
|
||||
args = [OPTS]
|
||||
args = [OPTS, {}]
|
||||
kwargs = {'init_remotes': False}
|
||||
if role_name == 'winrepo':
|
||||
args.append('/tmp/winrepo-dir')
|
||||
kwargs['cache_root'] = '/tmp/winrepo-dir'
|
||||
|
||||
with patch.dict(OPTS, {key: provider}):
|
||||
role_class(*args)
|
||||
role_class(*args, **kwargs)
|
||||
|
||||
with patch.dict(OPTS, {key: 'foo'}):
|
||||
# Set the provider name to a known invalid provider
|
||||
@ -86,5 +88,5 @@ class TestGitFSProvider(TestCase):
|
||||
self.assertRaises(
|
||||
FileserverConfigError,
|
||||
role_class,
|
||||
*args
|
||||
)
|
||||
*args,
|
||||
**kwargs)
|
||||
|
Loading…
Reference in New Issue
Block a user