mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
Merge branch '2015.5' of https://github.com/saltstack/salt into fix_21041_again
This commit is contained in:
commit
5008bfee96
7
doc/ref/clouds/all/salt.cloud.clouds.vmware.rst
Normal file
7
doc/ref/clouds/all/salt.cloud.clouds.vmware.rst
Normal file
@ -0,0 +1,7 @@
|
||||
========================
|
||||
salt.cloud.clouds.vmware
|
||||
========================
|
||||
|
||||
.. automodule:: salt.cloud.clouds.vmware
|
||||
:members:
|
||||
:exclude-members: get_configured_provider, script
|
@ -329,7 +329,7 @@ to enable set grains_cache to ``True``.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
cache_jobs: False
|
||||
grains_cache: False
|
||||
|
||||
|
||||
.. conf_minion:: sock_dir
|
||||
|
@ -76,9 +76,8 @@ Extend declaration
|
||||
------------------
|
||||
|
||||
Extends a :ref:`name-declaration` from an included ``SLS module``. The
|
||||
keys of the extend declaration always define existing :ref`ID declaration`
|
||||
which have been defined in included
|
||||
``SLS modules``.
|
||||
keys of the extend declaration always refer to an existing
|
||||
:ref:`id-declaration` which have been defined in included ``SLS modules``.
|
||||
|
||||
Occurs only in the top level and defines a dictionary.
|
||||
|
||||
|
@ -451,7 +451,7 @@ accessible by the appropriate hosts:
|
||||
.. code-block:: yaml
|
||||
|
||||
testdb:
|
||||
mysql_database.present::
|
||||
mysql_database.present:
|
||||
- name: testerdb
|
||||
|
||||
``/srv/salt/mysql/user.sls``:
|
||||
|
379
doc/topics/cloud/vmware.rst
Normal file
379
doc/topics/cloud/vmware.rst
Normal file
@ -0,0 +1,379 @@
|
||||
===========================
|
||||
Getting Started With VMware
|
||||
===========================
|
||||
|
||||
.. versionadded:: 2015.5.4
|
||||
|
||||
**Author**: Nitin Madhok <nmadhok@clemson.edu>
|
||||
|
||||
The VMware cloud module allows you to manage VMware ESX, ESXi, and vCenter.
|
||||
|
||||
|
||||
Dependencies
|
||||
============
|
||||
The vmware module for Salt Cloud requires the ``pyVmomi`` package, which is
|
||||
available at PyPI:
|
||||
|
||||
https://pypi.python.org/pypi/pyvmomi
|
||||
|
||||
This package can be installed using `pip` or `easy_install`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyvmomi
|
||||
easy_install pyvmomi
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
The VMware cloud module needs the vCenter URL, username and password to be
|
||||
set up in the cloud configuration at
|
||||
``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/vmware.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-vmware-config:
|
||||
provider: vmware
|
||||
user: "DOMAIN\user"
|
||||
password: "verybadpass"
|
||||
url: "vcenter01.domain.com"
|
||||
|
||||
vmware-vcenter02:
|
||||
provider: vmware
|
||||
user: "DOMAIN\user"
|
||||
password: "verybadpass"
|
||||
url: "vcenter02.domain.com"
|
||||
|
||||
vmware-vcenter03:
|
||||
provider: vmware
|
||||
user: "DOMAIN\user"
|
||||
password: "verybadpass"
|
||||
url: "vcenter03.domain.com"
|
||||
protocol: "http"
|
||||
port: 80
|
||||
|
||||
.. note::
|
||||
|
||||
Optionally, ``protocol`` and ``port`` can be specified if the vCenter
|
||||
server is not using the defaults. Default is ``protocol: https`` and
|
||||
``port: 443``.
|
||||
|
||||
.. _vmware-cloud-profile:
|
||||
|
||||
Profiles
|
||||
========
|
||||
Set up an initial profile at ``/etc/salt/cloud.profiles`` or
|
||||
``/etc/salt/cloud.profiles.d/vmware.conf``:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
vmware-centos6.5:
|
||||
provider: vmware-vcenter01
|
||||
clonefrom: test-vm
|
||||
|
||||
## Optional arguments
|
||||
num_cpus: 4
|
||||
memory: 8GB
|
||||
devices:
|
||||
cd:
|
||||
CD/DVD drive 1:
|
||||
device_type: datastore_iso_file
|
||||
iso_path: "[nap004-1] vmimages/tools-isoimages/linux.iso"
|
||||
CD/DVD drive 2:
|
||||
device_type: client_device
|
||||
mode: atapi
|
||||
CD/DVD drive 3:
|
||||
device_type: client_device
|
||||
mode: passthrough
|
||||
disk:
|
||||
Hard disk 1:
|
||||
size: 30
|
||||
Hard disk 2:
|
||||
size: 20
|
||||
Hard disk 3:
|
||||
size: 5
|
||||
network:
|
||||
Network adapter 1:
|
||||
name: 10.20.30-400-Test
|
||||
switch_type: standard
|
||||
ip: 10.20.30.123
|
||||
gateway: [10.20.30.110]
|
||||
subnet_mask: 255.255.255.128
|
||||
domain: mycompany.com
|
||||
Network adapter 2:
|
||||
name: 10.30.40-500-Dev-DHCP
|
||||
adapter_type: e1000
|
||||
switch_type: distributed
|
||||
Network adapter 3:
|
||||
name: 10.40.50-600-Prod
|
||||
adapter_type: vmxnet3
|
||||
switch_type: distributed
|
||||
ip: 10.40.50.123
|
||||
gateway: [10.40.50.110]
|
||||
subnet_mask: 255.255.255.128
|
||||
domain: mycompany.com
|
||||
scsi:
|
||||
SCSI controller 1:
|
||||
type: lsilogic
|
||||
SCSI controller 2:
|
||||
type: lsilogic_sas
|
||||
bus_sharing: virtual
|
||||
SCSI controller 3:
|
||||
type: paravirtual
|
||||
bus_sharing: physical
|
||||
|
||||
domain: mycompany.com
|
||||
dns_servers:
|
||||
- 123.127.255.240
|
||||
- 123.127.255.241
|
||||
- 123.127.255.242
|
||||
|
||||
# If cloning from template, either resourcepool or cluster MUST be specified!
|
||||
resourcepool: Resources
|
||||
cluster: Prod
|
||||
|
||||
datastore: HUGE-DATASTORE-Cluster
|
||||
folder: Development
|
||||
datacenter: DC1
|
||||
host: c4212n-002.domain.com
|
||||
template: False
|
||||
power_on: True
|
||||
extra_config:
|
||||
mem.hotadd: 'yes'
|
||||
guestinfo.foo: bar
|
||||
guestinfo.domain: foobar.com
|
||||
guestinfo.customVariable: customValue
|
||||
|
||||
deploy: True
|
||||
private_key: /root/.ssh/mykey.pem
|
||||
ssh_username: cloud-user
|
||||
password: veryVeryBadPassword
|
||||
minion:
|
||||
master: 123.127.193.105
|
||||
|
||||
file_map:
|
||||
/path/to/local/custom/script: /path/to/remote/script
|
||||
/path/to/local/file: /path/to/remote/file
|
||||
/srv/salt/yum/epel.repo: /etc/yum.repos.d/epel.repo
|
||||
|
||||
|
||||
``provider``
|
||||
Enter the name that was specified when the cloud provider config was created.
|
||||
|
||||
``clonefrom``
|
||||
Enter the name of the VM/template to clone from.
|
||||
|
||||
``num_cpus``
|
||||
Enter the number of vCPUS that you want the VM/template to have. If not specified,
|
||||
the current VM/template\'s vCPU count is used.
|
||||
|
||||
``memory``
|
||||
Enter the memory size (in MB or GB) that you want the VM/template to have. If
|
||||
not specified, the current VM/template\'s memory size is used. Example
|
||||
``memory: 8GB`` or ``memory: 8192MB``.
|
||||
|
||||
``devices``
|
||||
Enter the device specifications here. Currently, the following devices can be
|
||||
created or reconfigured:
|
||||
|
||||
cd
|
||||
Enter the CD/DVD drive specification here. If the CD/DVD drive doesn\'t exist,
|
||||
it will be created with the specified configuration. If the CD/DVD drive
|
||||
already exists, it will be reconfigured with the specifications. The following
|
||||
options can be specified per CD/DVD drive:
|
||||
|
||||
device_type
|
||||
Specify how the CD/DVD drive should be used. Currently supported types are
|
||||
``client_device`` and ``datastore_iso_file``. Default is
|
||||
``device_type: client_device``
|
||||
iso_path
|
||||
Enter the path to the iso file present on the datastore only if
|
||||
``device_type: datastore_iso_file``. The syntax to specify this is
|
||||
``iso_path: "[datastoreName] vmimages/tools-isoimages/linux.iso"``. This
|
||||
field is ignored if ``device_type: client_device``
|
||||
mode
|
||||
Enter the mode of connection only if ``device_type: client_device``. Currently
|
||||
supported modes are ``passthrough`` and ``atapi``. This field is ignored if
|
||||
``device_type: datastore_iso_file``. Default is ``mode: passthrough``
|
||||
|
||||
disk
|
||||
Enter the disk specification here. If the hard disk doesn\'t exist, it will
|
||||
be created with the provided size. If the hard disk already exists, it will
|
||||
be expanded if the provided size is greater than the current size of the disk.
|
||||
|
||||
network
|
||||
Enter the network adapter specification here. If the network adapter doesn\'t
|
||||
exist, a new network adapter will be created with the specified network name,
|
||||
type and other configuration. If the network adapter already exists, it will
|
||||
be reconfigured with the specifications. The following additional options can
|
||||
be specified per network adapter (See example above):
|
||||
|
||||
name
|
||||
Enter the network name you want the network adapter to be mapped to.
|
||||
|
||||
adapter_type
|
||||
Enter the network adapter type you want to create. Currently supported
|
||||
types are ``vmxnet``, ``vmxnet2``, ``vmxnet3``, ``e1000`` and ``e1000e``.
|
||||
If no type is specified, by default ``vmxnet3`` will be used.
|
||||
|
||||
switch_type
|
||||
Enter the type of switch to use. This decides whether to use a standard
|
||||
switch network or a distributed virtual portgroup. Currently supported
|
||||
types are ``standard`` for standard portgroups and ``distributed`` for
|
||||
distributed virtual portgroups.
|
||||
|
||||
ip
|
||||
Enter the static IP you want the network adapter to be mapped to. If the
|
||||
network specified is DHCP enabled, you do not have to specify this.
|
||||
|
||||
gateway
|
||||
Enter the gateway for the network as a list. If the network specified
|
||||
is DHCP enabled, you do not have to specify this.
|
||||
|
||||
subnet_mask
|
||||
Enter the subnet mask for the network. If the network specified is DHCP
|
||||
enabled, you do not have to specify this.
|
||||
|
||||
domain
|
||||
Enter the domain to be used with the network adapter. If the network
|
||||
specified is DHCP enabled, you do not have to specify this.
|
||||
|
||||
scsi
|
||||
Enter the SCSI adapter specification here. If the SCSI adapter doesn\'t exist,
|
||||
a new SCSI adapter will be created of the specified type. If the SCSI adapter
|
||||
already exists, it will be reconfigured with the specifications. The following
|
||||
additional options can be specified per SCSI adapter:
|
||||
|
||||
type
|
||||
Enter the SCSI adapter type you want to create. Currently supported
|
||||
types are ``lsilogic``, ``lsilogic_sas`` and ``paravirtual``. Type must
|
||||
be specified when creating a new SCSI adapter.
|
||||
|
||||
bus_sharing
|
||||
Specify this if sharing of virtual disks between virtual machines is desired.
|
||||
The following can be specified:
|
||||
|
||||
virtual
|
||||
Virtual disks can be shared between virtual machines on the same server.
|
||||
|
||||
physical
|
||||
Virtual disks can be shared between virtual machines on any server.
|
||||
|
||||
no
|
||||
Virtual disks cannot be shared between virtual machines.
|
||||
|
||||
``domain``
|
||||
Enter the global domain name to be used for DNS. If not specified and if the VM name
|
||||
is a FQDN, ``domain`` is set to the domain from the VM name. Default is ``local``.
|
||||
|
||||
``dns_servers``
|
||||
Enter the list of DNS servers to use in order of priority.
|
||||
|
||||
``resourcepool``
|
||||
Enter the name of the resourcepool to which the new virtual machine should be
|
||||
attached. This determines what compute resources will be available to the clone.
|
||||
|
||||
.. note::
|
||||
|
||||
- For a clone operation from a virtual machine, it will use the same
|
||||
resourcepool as the original virtual machine unless specified.
|
||||
- For a clone operation from a template to a virtual machine, specifying
|
||||
either this or cluster is required. If both are specified, the resourcepool
|
||||
value will be used.
|
||||
- For a clone operation to a template, this argument is ignored.
|
||||
|
||||
``cluster``
|
||||
Enter the name of the cluster whose resource pool the new virtual machine should
|
||||
be attached to.
|
||||
|
||||
.. note::
|
||||
|
||||
- For a clone operation from a virtual machine, it will use the same cluster\'s
|
||||
resourcepool as the original virtual machine unless specified.
|
||||
- For a clone operation from a template to a virtual machine, specifying either
|
||||
this or resourcepool is required. If both are specified, the resourcepool
|
||||
value will be used.
|
||||
- For a clone operation to a template, this argument is ignored.
|
||||
|
||||
``datastore``
|
||||
Enter the name of the datastore or the datastore cluster where the virtual machine
|
||||
should be located on physical storage. If not specified, the current datastore is
|
||||
used.
|
||||
|
||||
.. note::
|
||||
|
||||
- If you specify a datastore cluster name, DRS Storage recommendation is
|
||||
automatically applied.
|
||||
- If you specify a datastore name, DRS Storage recommendation is disabled.
|
||||
|
||||
``folder``
|
||||
Enter the name of the folder that will contain the new virtual machine.
|
||||
|
||||
.. note::
|
||||
|
||||
- For a clone operation from a VM/template, the new VM/template will be added
|
||||
to the same folder that the original VM/template belongs to unless specified.
|
||||
- If both folder and datacenter are specified, the folder value will be used.
|
||||
|
||||
``datacenter``
|
||||
Enter the name of the datacenter that will contain the new virtual machine.
|
||||
|
||||
.. note::
|
||||
|
||||
- For a clone operation from a VM/template, the new VM/template will be added
|
||||
to the same folder that the original VM/template belongs to unless specified.
|
||||
- If both folder and datacenter are specified, the folder value will be used.
|
||||
|
||||
``host``
|
||||
Enter the name of the target host where the virtual machine should be registered.
|
||||
|
||||
If not specified:
|
||||
|
||||
.. note::
|
||||
|
||||
- If resource pool is not specified, current host is used.
|
||||
- If resource pool is specified, and the target pool represents a stand-alone
|
||||
host, the host is used.
|
||||
- If resource pool is specified, and the target pool represents a DRS-enabled
|
||||
cluster, a host selected by DRS is used.
|
||||
- If resource pool is specified and the target pool represents a cluster without
|
||||
DRS enabled, an InvalidArgument exception be thrown.
|
||||
|
||||
``template``
|
||||
Specifies whether the new virtual machine should be marked as a template or not.
|
||||
Default is ``template: False``.
|
||||
|
||||
``power_on``
|
||||
Specifies whether the new virtual machine should be powered on or not. If
|
||||
``template: True`` is set, this field is ignored. Default is ``power_on: True``.
|
||||
|
||||
``extra_config``
|
||||
Specifies the additional configuration information for the virtual machine. This
|
||||
describes a set of modifications to the additional options. If the key is already
|
||||
present, it will be reset with the new value provided. Otherwise, a new option is
|
||||
added. Keys with empty values will be removed.
|
||||
|
||||
``deploy``
|
||||
Specifies if salt should be installed on the newly created VM. Default is ``True``
|
||||
so salt will be installed using the bootstrap script. If ``template: True`` or
|
||||
``power_on: False`` is set, this field is ignored and salt will not be installed.
|
||||
|
||||
``private_key``
|
||||
Specify the path to the private key to use to be able to ssh to the VM.
|
||||
|
||||
``ssh_username``
|
||||
Specify the username to use in order to ssh to the VM. Default is ``root``
|
||||
|
||||
``password``
|
||||
Specify a password to use in order to ssh to the VM. If ``private_key`` is
|
||||
specified, you do not need to specify this.
|
||||
|
||||
``minion``
|
||||
Specify custom minion configuration you want the salt minion to have. A good example
|
||||
would be to specify the ``master`` as the IP/DNS name of the master.
|
||||
|
||||
``file_map``
|
||||
Specify file/files you want to copy to the VM before the bootstrap script is run
|
||||
and salt is installed. A good example of using this would be if you need to put
|
||||
custom repo files on the server in case your server will be in a private network
|
||||
and cannot reach external networks.
|
17
doc/topics/releases/2015.5.4.rst
Normal file
17
doc/topics/releases/2015.5.4.rst
Normal file
@ -0,0 +1,17 @@
|
||||
===========================
|
||||
Salt 2015.5.4 Release Notes
|
||||
===========================
|
||||
|
||||
:release: TBA
|
||||
|
||||
Version 2015.5.4 is a bugfix release for :doc:`2015.5.0
|
||||
</topics/releases/2015.5.0>`.
|
||||
|
||||
Changes:
|
||||
|
||||
- When querying for VMs in ``ditigal_ocean_v2.py``, the number of VMs to include in a page was changed from 20
|
||||
(default) to 200 to reduce the number of API calls to Digital Ocean.
|
||||
|
||||
- The ``vmware`` Salt-Cloud driver was back-ported from the develop branch in order for installations of Salt
|
||||
that are older than 2015.8.0 to be able to use the ``vmware`` driver without stack-tracing on various
|
||||
deprecation paths that were implemented in the 2015.8.0 release.
|
@ -112,7 +112,7 @@ def avail_images(call=None):
|
||||
ret = {}
|
||||
|
||||
while fetch:
|
||||
items = query(method='images', command='?page=' + str(page))
|
||||
items = query(method='images', command='?page=' + str(page) + '&per_page=200')
|
||||
|
||||
for image in items['images']:
|
||||
ret[image['id']] = {}
|
||||
@ -162,7 +162,7 @@ def list_nodes(call=None):
|
||||
ret = {}
|
||||
|
||||
while fetch:
|
||||
items = query(method='droplets', command='?page=' + str(page))
|
||||
items = query(method='droplets', command='?page=' + str(page) + '&per_page=200')
|
||||
for node in items['droplets']:
|
||||
ret[node['name']] = {
|
||||
'id': node['id'],
|
||||
@ -194,7 +194,7 @@ def list_nodes_full(call=None, forOutput=True):
|
||||
ret = {}
|
||||
|
||||
while fetch:
|
||||
items = query(method='droplets', command='?page=' + str(page))
|
||||
items = query(method='droplets', command='?page=' + str(page) + '&per_page=200')
|
||||
for node in items['droplets']:
|
||||
ret[node['name']] = {}
|
||||
for item in node.keys():
|
||||
|
@ -6,6 +6,8 @@ OpenNebula Cloud Module
|
||||
The OpenNebula cloud module is used to control access to an OpenNebula
|
||||
cloud.
|
||||
|
||||
:depends: lxml
|
||||
|
||||
Use of this module requires the ``xml_rpc``, ``user`` and
|
||||
``password`` parameter to be set. Set up the cloud configuration
|
||||
at ``/etc/salt/cloud.providers`` or
|
||||
|
3609
salt/cloud/clouds/vmware.py
Normal file
3609
salt/cloud/clouds/vmware.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -17,7 +17,7 @@
|
||||
# CREATED: 10/15/2012 09:49:37 PM WEST
|
||||
#======================================================================================================================
|
||||
set -o nounset # Treat unset variables as an error
|
||||
__ScriptVersion="2015.05.07"
|
||||
__ScriptVersion="2015.07.17"
|
||||
__ScriptName="bootstrap-salt.sh"
|
||||
|
||||
#======================================================================================================================
|
||||
@ -90,7 +90,7 @@ echoinfo() {
|
||||
|
||||
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
||||
# NAME: echowarn
|
||||
# DESCRIPTION: Echo warning information to stdout.
|
||||
# DESCRIPTION: Echo warning informations to stdout.
|
||||
#----------------------------------------------------------------------------------------------------------------------
|
||||
echowarn() {
|
||||
printf "${YC} * WARN${EC}: %s\n" "$@";
|
||||
@ -211,6 +211,7 @@ _LIBCLOUD_MIN_VERSION="0.14.0"
|
||||
_PY_REQUESTS_MIN_VERSION="2.0"
|
||||
_EXTRA_PACKAGES=""
|
||||
_HTTP_PROXY=""
|
||||
_DISABLE_SALT_CHECKS=$BS_FALSE
|
||||
__SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt}
|
||||
|
||||
|
||||
@ -277,6 +278,9 @@ usage() {
|
||||
-L Install the Apache Libcloud package if possible(required for salt-cloud)
|
||||
-p Extra-package to install while installing salt dependencies. One package
|
||||
per -p flag. You're responsible for providing the proper package name.
|
||||
-d Disable check_service functions. Setting this flag disables the
|
||||
'install_<distro>_check_services' checks. You can also do this by
|
||||
touching /tmp/disable_salt_checks on the target host. Defaults \${BS_FALSE}
|
||||
-H Use the specified http proxy for the installation
|
||||
-Z Enable external software source for newer ZeroMQ(Only available for RHEL/CentOS/Fedora based distributions)
|
||||
|
||||
@ -284,7 +288,7 @@ EOT
|
||||
} # ---------- end of function usage ----------
|
||||
|
||||
|
||||
while getopts ":hvnDc:Gg:k:MSNXCPFUKIA:i:Lp:H:Z" opt
|
||||
while getopts ":hvnDc:Gg:k:MSNXCPFUKIA:i:Lp:dH:Z" opt
|
||||
do
|
||||
case "${opt}" in
|
||||
|
||||
@ -333,6 +337,7 @@ do
|
||||
i ) _SALT_MINION_ID=$OPTARG ;;
|
||||
L ) _INSTALL_CLOUD=$BS_TRUE ;;
|
||||
p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;;
|
||||
d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;;
|
||||
H ) _HTTP_PROXY="$OPTARG" ;;
|
||||
Z) _ENABLE_EXTERNAL_ZMQ_REPOS=$BS_TRUE ;;
|
||||
|
||||
@ -467,6 +472,12 @@ if [ "${CALLER}x" = "${0}x" ]; then
|
||||
CALLER="PIPED THROUGH"
|
||||
fi
|
||||
|
||||
# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394
|
||||
if [ ${_DISABLE_SALT_CHECKS} -eq 0 ]; then
|
||||
[ -f /tmp/disable_salt_checks ] && _DISABLE_SALT_CHECKS=$BS_TRUE && \
|
||||
echowarn "Found file: /tmp/disable_salt_checks, setting \$_DISABLE_SALT_CHECKS=true"
|
||||
fi
|
||||
|
||||
echoinfo "${CALLER} ${0} -- Version ${__ScriptVersion}"
|
||||
#echowarn "Running the unstable version of ${__ScriptName}"
|
||||
|
||||
@ -1147,7 +1158,7 @@ fi
|
||||
if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$ITYPE" = "daily" ]); then
|
||||
echoerror "${DISTRO_NAME} does not have daily packages support"
|
||||
exit 1
|
||||
elif ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$STABLE_REV" != "latest" ]); then
|
||||
elif ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]); then
|
||||
echoerror "${DISTRO_NAME} does not have major version pegged packages support"
|
||||
exit 1
|
||||
fi
|
||||
@ -1899,7 +1910,8 @@ install_ubuntu_daily() {
|
||||
|
||||
install_ubuntu_git() {
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || return 1
|
||||
python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
python setup.py --salt-config-dir="$_SALT_ETC_DIR" install --install-layout=deb || return 1
|
||||
else
|
||||
python setup.py install --install-layout=deb || return 1
|
||||
fi
|
||||
@ -1915,7 +1927,7 @@ install_ubuntu_git_post() {
|
||||
[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
|
||||
if [ -f /bin/systemctl ]; then
|
||||
if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then
|
||||
copyfile "${__SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service"
|
||||
|
||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||
@ -1954,8 +1966,8 @@ install_ubuntu_git_post() {
|
||||
install_ubuntu_restart_daemons() {
|
||||
[ $_START_DAEMONS -eq $BS_FALSE ] && return
|
||||
|
||||
# Ensure upstart configs are loaded
|
||||
if [ -f /bin/systemctl ]; then
|
||||
# Ensure upstart configs / systemd units are loaded
|
||||
if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then
|
||||
systemctl daemon-reload
|
||||
elif [ -f /sbin/initctl ]; then
|
||||
/sbin/initctl reload-configuration
|
||||
@ -1970,7 +1982,7 @@ install_ubuntu_restart_daemons() {
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
|
||||
if [ -f /bin/systemctl ]; then
|
||||
if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then
|
||||
echodebug "There's systemd support while checking salt-$fname"
|
||||
systemctl stop salt-$fname > /dev/null 2>&1
|
||||
systemctl start salt-$fname.service
|
||||
@ -2015,7 +2027,7 @@ install_ubuntu_check_services() {
|
||||
#[ $fname = "api" ] && ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || [ "$(which salt-${fname} 2>/dev/null)" = "" ]) && continue
|
||||
[ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue
|
||||
|
||||
if [ -f /bin/systemctl ]; then
|
||||
if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 15 ]; then
|
||||
__check_services_systemd salt-$fname || return 1
|
||||
elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then
|
||||
__check_services_upstart salt-$fname || return 1
|
||||
@ -2137,7 +2149,7 @@ _eof
|
||||
# We NEED to install the unstable dpkg or mime-support WILL fail to install
|
||||
__apt_get_install_noinput -t unstable dpkg liblzma5 python mime-support || return 1
|
||||
__apt_get_install_noinput -t unstable libzmq3 libzmq3-dev || return 1
|
||||
__apt_get_install_noinput build-essential python-dev python-pip || return 1
|
||||
__apt_get_install_noinput build-essential python-dev python-pip python-setuptools || return 1
|
||||
|
||||
# Saltstack's Unstable Debian repository
|
||||
if [ "$(grep -R 'debian.saltstack.com' /etc/apt)" = "" ]; then
|
||||
@ -2179,6 +2191,14 @@ _eof
|
||||
|
||||
__apt_get_install_noinput python-zmq || return 1
|
||||
|
||||
if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then
|
||||
# Building pyzmq from source to build it against libzmq3.
|
||||
# Should override current installation
|
||||
# Using easy_install instead of pip because at least on Debian 6,
|
||||
# there's no default virtualenv active.
|
||||
easy_install -U pyzmq || return 1
|
||||
fi
|
||||
|
||||
if [ "${_EXTRA_PACKAGES}" != "" ]; then
|
||||
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
|
||||
# shellcheck disable=SC2086
|
||||
@ -2210,7 +2230,7 @@ install_debian_7_deps() {
|
||||
|
||||
# Debian Backports
|
||||
if [ "$(grep -R 'wheezy-backports' /etc/apt | grep -v "^#")" = "" ]; then
|
||||
echo "deb http://http.debian.net/debian wheezy-backports main" >> \
|
||||
echo "deb http://httpredir.debian.org/debian wheezy-backports main" >> \
|
||||
/etc/apt/sources.list.d/backports.list
|
||||
fi
|
||||
|
||||
@ -2278,7 +2298,7 @@ install_debian_8_deps() {
|
||||
|
||||
# Debian Backports
|
||||
if [ "$(grep -R 'jessie-backports' /etc/apt | grep -v "^#")" = "" ]; then
|
||||
echo "deb http://http.debian.net/debian jessie-backports main" >> \
|
||||
echo "deb http://httpredir.debian.org/debian jessie-backports main" >> \
|
||||
/etc/apt/sources.list.d/backports.list
|
||||
fi
|
||||
|
||||
@ -2380,7 +2400,7 @@ install_debian_6_git_deps() {
|
||||
install_debian_6_deps || return 1
|
||||
if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then
|
||||
__PACKAGES="build-essential lsb-release python python-dev python-pkg-resources python-crypto"
|
||||
__PACKAGES="${__PACKAGES} python-m2crypto python-yaml msgpack-python python-pip"
|
||||
__PACKAGES="${__PACKAGES} python-m2crypto python-yaml msgpack-python python-pip python-setuptools"
|
||||
|
||||
if [ "$(which git)" = "" ]; then
|
||||
__PACKAGES="${__PACKAGES} git"
|
||||
@ -2435,14 +2455,6 @@ __install_debian_stable() {
|
||||
# shellcheck disable=SC2086
|
||||
__apt_get_install_noinput ${__PACKAGES} || return 1
|
||||
|
||||
if [ "$_PIP_ALLOWED" -eq $BS_TRUE ]; then
|
||||
# Building pyzmq from source to build it against libzmq3.
|
||||
# Should override current installation
|
||||
# Using easy_install instead of pip because at least on Debian 6,
|
||||
# there's no default virtualenv active.
|
||||
easy_install -U pyzmq || return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -2465,7 +2477,8 @@ install_debian_8_stable() {
|
||||
install_debian_git() {
|
||||
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || return 1
|
||||
python setup.py install --install-layout=deb --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
python setup.py --salt-config-dir="$_SALT_ETC_DIR" install --install-layout=deb || return 1
|
||||
else
|
||||
python setup.py install --install-layout=deb || return 1
|
||||
fi
|
||||
@ -2509,6 +2522,8 @@ install_debian_git_post() {
|
||||
elif [ ! -f /etc/init.d/salt-$fname ] || ([ -f /etc/init.d/salt-$fname ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]); then
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init" ]; then
|
||||
copyfile "${__SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init" "/etc/init.d/salt-$fname"
|
||||
else
|
||||
__fetch_url "/etc/init.d/salt-$fname" "http://anonscm.debian.org/cgit/pkg-salt/salt.git/plain/debian/salt-${fname}.init"
|
||||
fi
|
||||
if [ ! -f "/etc/init.d/salt-$fname" ]; then
|
||||
echowarn "The init script for salt-$fname was not found, skipping it..."
|
||||
@ -2665,7 +2680,8 @@ install_fedora_git_deps() {
|
||||
|
||||
install_fedora_git() {
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1
|
||||
python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
python setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1
|
||||
else
|
||||
python setup.py install || return 1
|
||||
fi
|
||||
@ -2989,7 +3005,8 @@ install_centos_git() {
|
||||
_PYEXE=python2
|
||||
fi
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
$_PYEXE setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1
|
||||
$_PYEXE setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
$_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1
|
||||
else
|
||||
$_PYEXE setup.py install || return 1
|
||||
fi
|
||||
@ -3137,11 +3154,17 @@ install_centos_check_services() {
|
||||
__test_rhel_optionals_packages() {
|
||||
__install_epel_repository || return 1
|
||||
|
||||
# Make sure yum-utils is installed
|
||||
yum list installed yum-utils > /dev/null 2>&1 || yum -y install yum-utils --enablerepo=${_EPEL_REPO} || return 1
|
||||
|
||||
if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then
|
||||
yum-config-manager --enable \*server-optional || return 1
|
||||
fi
|
||||
|
||||
if [ "$DISTRO_MAJOR_VERSION" -ge 6 ]; then
|
||||
#python-jinja2 is in repo server-releases-optional in EC2/RHEL6
|
||||
yum-config-manager --enable rhui-\*-server-releases-optional || return 1
|
||||
|
||||
# Let's enable package installation testing, kind of, --dry-run
|
||||
echoinfo "Testing if packages usually on the optionals repository are available:"
|
||||
__YUM_CONF_DIR="$(mktemp -d)"
|
||||
@ -3746,7 +3769,8 @@ install_arch_linux_stable() {
|
||||
|
||||
install_arch_linux_git() {
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||
python2 setup.py install --salt-config-dir="$_SALT_ETC_DIR" || return 1
|
||||
python2 setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1
|
||||
else
|
||||
python2 setup.py install || return 1
|
||||
fi
|
||||
@ -4060,6 +4084,17 @@ install_freebsd_git() {
|
||||
--salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \
|
||||
--salt-logs-dir=/var/log/salt \
|
||||
--salt-pidfile-dir=/var/run \
|
||||
|| /usr/local/bin/python2 setup.py \
|
||||
--salt-root-dir=/usr/local \
|
||||
--salt-config-dir="${_SALT_ETC_DIR}" \
|
||||
--salt-cache-dir=/var/cache/salt \
|
||||
--salt-sock-dir=/var/run/salt \
|
||||
--salt-srv-root-dir=/srv \
|
||||
--salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \
|
||||
--salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \
|
||||
--salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \
|
||||
--salt-logs-dir=/var/log/salt \
|
||||
--salt-pidfile-dir=/var/run install \
|
||||
|| return 1
|
||||
fi
|
||||
|
||||
@ -4137,6 +4172,11 @@ install_freebsd_restart_daemons() {
|
||||
install_smartos_deps() {
|
||||
pkgin -y install zeromq py27-m2crypto py27-crypto py27-msgpack py27-yaml py27-jinja2 py27-zmq py27-requests || return 1
|
||||
|
||||
# Set _SALT_ETC_DIR to SmartOS default if they didn't specify
|
||||
_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt}
|
||||
# We also need to redefine the PKI directory
|
||||
_PKI_DIR=${_SALT_ETC_DIR}/pki
|
||||
|
||||
# Let's trigger config_salt()
|
||||
if [ "$_TEMP_CONFIG_DIR" = "null" ]; then
|
||||
# Let's set the configuration directory to /tmp
|
||||
@ -4156,6 +4196,10 @@ install_smartos_deps() {
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then
|
||||
pkgin -y install py27-apache-libcloud || return 1
|
||||
fi
|
||||
|
||||
if [ "${_EXTRA_PACKAGES}" != "" ]; then
|
||||
echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}"
|
||||
# shellcheck disable=SC2086
|
||||
@ -4169,8 +4213,9 @@ install_smartos_deps() {
|
||||
install_smartos_git_deps() {
|
||||
install_smartos_deps || return 1
|
||||
|
||||
if [ "$(which git)" = "" ]; then
|
||||
pkgin -y install scmgit || return 1
|
||||
which git > /dev/null 2>&1
|
||||
if [ $? -eq 1 ]; then
|
||||
pkgin -y install git || return 1
|
||||
fi
|
||||
|
||||
if [ -f "${__SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then
|
||||
@ -4202,7 +4247,9 @@ install_smartos_stable() {
|
||||
|
||||
install_smartos_git() {
|
||||
# Use setuptools in order to also install dependencies
|
||||
USE_SETUPTOOLS=1 /opt/local/bin/python setup.py install || return 1
|
||||
# lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0
|
||||
USE_SETUPTOOLS=1 /opt/local/bin/python setup.py install --salt-config-dir="$_SALT_ETC_DIR" || \
|
||||
USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" install || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -4742,7 +4789,7 @@ __gentoo_config_protection() {
|
||||
# this point, manually merge the changes using etc-update/dispatch-conf/
|
||||
# cfg-update and then restart the bootstrapping script, so instead we allow
|
||||
# at this point to modify certain config files directly
|
||||
export CONFIG_PROTECT_MASK="$CONFIG_PROTECT_MASK /etc/portage/package.keywords /etc/portage/package.unmask /etc/portage/package.use /etc/portage/package.license"
|
||||
export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} /etc/portage/package.keywords /etc/portage/package.unmask /etc/portage/package.use /etc/portage/package.license"
|
||||
}
|
||||
|
||||
__gentoo_pre_dep() {
|
||||
@ -5166,12 +5213,17 @@ done
|
||||
echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}"
|
||||
|
||||
# Let's get the check services function
|
||||
CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services"
|
||||
if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then
|
||||
CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services"
|
||||
CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services"
|
||||
else
|
||||
CHECK_SERVICES_FUNC_NAMES=False
|
||||
echowarn "DISABLE_SALT_CHECKS set, not setting \$CHECK_SERVICES_FUNC_NAMES"
|
||||
fi
|
||||
|
||||
CHECK_SERVICES_FUNC="null"
|
||||
for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do
|
||||
|
@ -134,6 +134,8 @@ class SaltRaetRoadStackSetup(ioflo.base.deeding.Deed):
|
||||
RoadStack.Bk = raeting.BodyKind.msgpack.value
|
||||
RoadStack.JoinentTimeout = 0.0
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
enter action
|
||||
@ -237,6 +239,8 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
|
||||
self.masters = daemons.extract_masters(self.opts.value)
|
||||
# self.mha = (self.opts.value['master'], int(self.opts.value['master_port']))
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self, **kwa):
|
||||
'''
|
||||
Join with all masters
|
||||
@ -499,6 +503,8 @@ class SaltLoadModules(ioflo.base.deeding.Deed):
|
||||
def postinitio(self):
|
||||
self._load_modules()
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
self._load_modules()
|
||||
|
||||
@ -626,6 +632,8 @@ class SaltSchedule(ioflo.base.deeding.Deed):
|
||||
self.modules.value,
|
||||
self.returners.value)
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
Eval the schedule
|
||||
@ -665,6 +673,8 @@ class SaltRaetManorLaneSetup(ioflo.base.deeding.Deed):
|
||||
'''
|
||||
pass
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
Run once at enter
|
||||
@ -1259,6 +1269,8 @@ class SaltRaetMasterEvents(ioflo.base.deeding.Deed):
|
||||
def postinitio(self):
|
||||
self.master_events.value = deque()
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
if not self.master_events.value:
|
||||
return
|
||||
@ -1308,6 +1320,8 @@ class SaltRaetThreadShellJobber(ioflo.base.deeding.Deed):
|
||||
def postinitio(self):
|
||||
self.threads.value = deque()
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
Evaluate the fun options and execute them via salt-call
|
||||
@ -1383,6 +1397,8 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.executors.value = {}
|
||||
|
||||
_prepare = postinitio
|
||||
|
||||
def _setup_jobber_stack(self):
|
||||
'''
|
||||
Setup and return the LaneStack and Yard used by the jobber yard
|
||||
|
@ -545,10 +545,6 @@ class Client(object):
|
||||
else:
|
||||
return ''
|
||||
elif not no_cache:
|
||||
if salt.utils.is_windows():
|
||||
netloc = salt.utils.sanitize_win_path_string(url_data.netloc)
|
||||
else:
|
||||
netloc = url_data.netloc
|
||||
dest = self._extrn_path(url, saltenv)
|
||||
destdir = os.path.dirname(dest)
|
||||
if not os.path.isdir(destdir):
|
||||
@ -695,12 +691,16 @@ class Client(object):
|
||||
Return the extn_filepath for a given url
|
||||
'''
|
||||
url_data = urlparse(url)
|
||||
if salt.utils.is_windows():
|
||||
netloc = salt.utils.sanitize_win_path_string(url_data.netloc)
|
||||
else:
|
||||
netloc = url_data.netloc
|
||||
|
||||
return salt.utils.path_join(
|
||||
self.opts['cachedir'],
|
||||
'extrn_files',
|
||||
saltenv,
|
||||
url_data.netloc,
|
||||
netloc,
|
||||
url_data.path
|
||||
)
|
||||
|
||||
|
@ -1039,32 +1039,33 @@ def os_data():
|
||||
os.stat('/run/systemd/system')
|
||||
grains['init'] = 'systemd'
|
||||
except OSError:
|
||||
with salt.utils.fopen('/proc/1/cmdline') as fhr:
|
||||
init_cmdline = fhr.read().replace('\x00', ' ').split()
|
||||
init_bin = salt.utils.which(init_cmdline[0])
|
||||
if init_bin:
|
||||
supported_inits = ('upstart', 'sysvinit', 'systemd')
|
||||
edge_len = max(len(x) for x in supported_inits) - 1
|
||||
buf_size = __opts__['file_buffer_size']
|
||||
try:
|
||||
with open(init_bin, 'rb') as fp_:
|
||||
buf = True
|
||||
edge = ''
|
||||
buf = fp_.read(buf_size).lower()
|
||||
while buf:
|
||||
buf = edge + buf
|
||||
for item in supported_inits:
|
||||
if item in buf:
|
||||
grains['init'] = item
|
||||
buf = ''
|
||||
break
|
||||
edge = buf[-edge_len:]
|
||||
if os.path.exists('/proc/1/cmdline'):
|
||||
with salt.utils.fopen('/proc/1/cmdline') as fhr:
|
||||
init_cmdline = fhr.read().replace('\x00', ' ').split()
|
||||
init_bin = salt.utils.which(init_cmdline[0])
|
||||
if init_bin:
|
||||
supported_inits = ('upstart', 'sysvinit', 'systemd')
|
||||
edge_len = max(len(x) for x in supported_inits) - 1
|
||||
buf_size = __opts__['file_buffer_size']
|
||||
try:
|
||||
with open(init_bin, 'rb') as fp_:
|
||||
buf = True
|
||||
edge = ''
|
||||
buf = fp_.read(buf_size).lower()
|
||||
except (IOError, OSError) as exc:
|
||||
log.error(
|
||||
'Unable to read from init_bin ({0}): {1}'
|
||||
.format(init_bin, exc)
|
||||
)
|
||||
while buf:
|
||||
buf = edge + buf
|
||||
for item in supported_inits:
|
||||
if item in buf:
|
||||
grains['init'] = item
|
||||
buf = ''
|
||||
break
|
||||
edge = buf[-edge_len:]
|
||||
buf = fp_.read(buf_size).lower()
|
||||
except (IOError, OSError) as exc:
|
||||
log.error(
|
||||
'Unable to read from init_bin ({0}): {1}'
|
||||
.format(init_bin, exc)
|
||||
)
|
||||
|
||||
# Add lsb grains on any distro with lsb-release
|
||||
try:
|
||||
|
@ -1,6 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Compendium of generic DNS utilities
|
||||
Compendium of generic DNS utilities.
|
||||
The 'dig' command line tool must be installed in order to use this module.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -3207,7 +3207,7 @@ def check_file_meta(
|
||||
if contents is not None:
|
||||
# Write a tempfile with the static contents
|
||||
tmp = salt.utils.mkstemp(text=True)
|
||||
with salt.utils.fopen(tmp, 'w') as tmp_:
|
||||
with salt.utils.fopen(tmp, 'wb') as tmp_:
|
||||
tmp_.write(str(contents))
|
||||
# Compare the static contents with the named file
|
||||
with contextlib.nested(
|
||||
|
@ -134,6 +134,9 @@ def _query(function, api_key=None, api_version=None, method='GET', data=None):
|
||||
elif api_version == 'v2':
|
||||
headers['Authorization'] = 'Bearer {0}'.format(api_key)
|
||||
data = json.dumps(data)
|
||||
|
||||
if method == 'POST':
|
||||
headers['Content-Type'] = 'application/json'
|
||||
else:
|
||||
log.error('Unsupported HipChat API version')
|
||||
return False
|
||||
|
@ -2870,7 +2870,7 @@ def set_dns(name, dnsservers=None, searchdomains=None):
|
||||
name, 'sh -c "chmod +x {0};{0}"'.format(script), python_shell=True)
|
||||
# blindly delete the setter file
|
||||
run_all(name,
|
||||
'if [ -f "{0}" ];then rm -f "{0}";fi'.format(script),
|
||||
'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''.format(script),
|
||||
python_shell=True)
|
||||
if result['retcode'] != 0:
|
||||
error = ('Unable to write to /etc/resolv.conf in container \'{0}\''
|
||||
@ -2907,7 +2907,7 @@ def running_systemd(name, cache=True):
|
||||
'''\
|
||||
#!/usr/bin/env bash
|
||||
set -x
|
||||
if ! which systemctl 1>/dev/nulll 2>/dev/null;then exit 2;fi
|
||||
if ! which systemctl 1>/dev/null 2>/dev/null;then exit 2;fi
|
||||
for i in \\
|
||||
/run/systemd/journal/dev-log\\
|
||||
/run/systemd/journal/flushed\\
|
||||
@ -3191,24 +3191,21 @@ def bootstrap(name,
|
||||
if install:
|
||||
rstr = __salt__['test.rand_str']()
|
||||
configdir = '/tmp/.c_{0}'.format(rstr)
|
||||
run(name,
|
||||
'install -m 0700 -d {0}'.format(configdir),
|
||||
python_shell=False)
|
||||
|
||||
cmd = 'install -m 0700 -d {0}'.format(configdir)
|
||||
if run(name, cmd, python_shell=False):
|
||||
log.error('tmpdir {0} creation failed ({1}'
|
||||
.format(configdir, cmd))
|
||||
return False
|
||||
|
||||
bs_ = __salt__['config.gather_bootstrap_script'](
|
||||
bootstrap=bootstrap_url)
|
||||
dest_dir = os.path.join('/tmp', rstr)
|
||||
for cmd in [
|
||||
'mkdir -p {0}'.format(dest_dir),
|
||||
'chmod 700 {0}'.format(dest_dir),
|
||||
]:
|
||||
if run_stdout(name, cmd):
|
||||
log.error(
|
||||
('tmpdir {0} creation'
|
||||
' failed ({1}').format(dest_dir, cmd))
|
||||
return False
|
||||
cp(name,
|
||||
bs_,
|
||||
'{0}/bootstrap.sh'.format(dest_dir))
|
||||
script = '/sbin/{0}_bootstrap.sh'.format(rstr)
|
||||
cp(name, bs_, script)
|
||||
result = run_all(name,
|
||||
'sh -c "chmod +x {0};{0}"'''.format(script),
|
||||
python_shell=True)
|
||||
|
||||
cp(name, cfg_files['config'],
|
||||
os.path.join(configdir, 'minion'))
|
||||
cp(name, cfg_files['privkey'],
|
||||
@ -3216,16 +3213,22 @@ def bootstrap(name,
|
||||
cp(name, cfg_files['pubkey'],
|
||||
os.path.join(configdir, 'minion.pub'))
|
||||
bootstrap_args = bootstrap_args.format(configdir)
|
||||
cmd = ('{0} {2}/bootstrap.sh {1}'
|
||||
cmd = ('{0} {2} {1}'
|
||||
.format(bootstrap_shell,
|
||||
bootstrap_args.replace("'", "''"),
|
||||
dest_dir))
|
||||
script))
|
||||
# log ASAP the forged bootstrap command which can be wrapped
|
||||
# out of the output in case of unexpected problem
|
||||
log.info('Running {0} in LXC container \'{1}\''
|
||||
.format(cmd, name))
|
||||
ret = retcode(name, cmd, output_loglevel='info',
|
||||
use_vt=True) == 0
|
||||
|
||||
run_all(name,
|
||||
'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''
|
||||
''.format(script),
|
||||
ignore_retcode=True,
|
||||
python_shell=True)
|
||||
else:
|
||||
ret = False
|
||||
else:
|
||||
|
@ -379,6 +379,7 @@ def set_fstab(
|
||||
# Try to guess right criteria for auto....
|
||||
# NOTE: missing some special fstypes here
|
||||
specialFSes = frozenset([
|
||||
'none',
|
||||
'tmpfs',
|
||||
'sysfs',
|
||||
'proc',
|
||||
|
@ -57,6 +57,27 @@ def _get_rabbitmq_plugin():
|
||||
return rabbitmq
|
||||
|
||||
|
||||
def _strip_listing_to_done(output_list):
|
||||
'''Conditionally remove non-relevant first and last line,
|
||||
"Listing ..." - "...done".
|
||||
outputlist: rabbitmq command output split by newline
|
||||
return value: list, conditionally modified, may be empty.
|
||||
'''
|
||||
|
||||
# conditionally remove non-relevant first line
|
||||
f_line = ''.join(output_list[:1])
|
||||
if f_line.startswith('Listing') and f_line.endswith('...'):
|
||||
output_list.pop(0)
|
||||
|
||||
# some versions of rabbitmq have no trailing '...done' line,
|
||||
# which some versions do not output.
|
||||
l_line = ''.join(output_list[-1:])
|
||||
if l_line == '...done':
|
||||
output_list.pop()
|
||||
|
||||
return output_list
|
||||
|
||||
|
||||
def _output_to_dict(cmdoutput, values_mapper=None):
|
||||
'''Convert rabbitmqctl output to a dict of data
|
||||
cmdoutput: string output of rabbitmqctl commands
|
||||
@ -67,11 +88,11 @@ def _output_to_dict(cmdoutput, values_mapper=None):
|
||||
values_mapper = lambda string: string.split('\t')
|
||||
|
||||
# remove first and last line: Listing ... - ...done
|
||||
data_rows = cmdoutput.splitlines()[1:-1]
|
||||
data_rows = _strip_listing_to_done(cmdoutput.splitlines())
|
||||
|
||||
for row in data_rows:
|
||||
key, values = row.split('\t', 1)
|
||||
ret[key] = values_mapper(values)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@ -111,7 +132,7 @@ def list_vhosts(runas=None):
|
||||
runas=runas)
|
||||
|
||||
# remove first and last line: Listing ... - ...done
|
||||
return res.splitlines()[1:-1]
|
||||
return _strip_listing_to_done(res.splitlines())
|
||||
|
||||
|
||||
def user_exists(name, runas=None):
|
||||
|
@ -1,6 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage ruby installations with rbenv.
|
||||
Manage ruby installations with rbenv. Rbenv is supported on Linux and Mac OS X.
|
||||
Rbenv doesn't work on Windows (and isn't really necessary on Windows as there is
|
||||
no system Ruby on Windows). On Windows, the RubyInstaller and/or Pik are both
|
||||
good alternatives to work with multiple versions of Ruby on the same box.
|
||||
|
||||
http://misheska.com/blog/2013/06/15/using-rbenv-to-manage-multiple-versions-of-ruby/
|
||||
|
||||
.. versionadded:: 0.16.0
|
||||
'''
|
||||
@ -30,6 +35,15 @@ __opts__ = {
|
||||
}
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
Only work on POSIX-like systems
|
||||
"""
|
||||
if salt.utils.is_windows():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _shlex_split(s):
|
||||
# from python:shlex.split: passing None for s will read
|
||||
# the string to split from standard input.
|
||||
|
@ -300,7 +300,7 @@ def add(name, **kwargs):
|
||||
|
||||
salt '*' schedule.add job1 function='test.ping' seconds=3600
|
||||
# If function have some arguments, use job_args
|
||||
salt '*' schedule.add job2 function='cmd.run' job_args=['date >> /tmp/date.log'] seconds=60
|
||||
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
|
||||
'''
|
||||
|
||||
ret = {'comment': [],
|
||||
|
@ -40,6 +40,12 @@ def get(**kwargs):
|
||||
|
||||
cmd = 'sysrc -v'
|
||||
|
||||
if 'file' in kwargs:
|
||||
cmd += ' -f '+kwargs['file']
|
||||
|
||||
if 'jail' in kwargs:
|
||||
cmd += ' -j '+kwargs['jail']
|
||||
|
||||
if 'name' in kwargs:
|
||||
cmd += ' '+kwargs['name']
|
||||
elif kwargs.get('includeDefaults', False):
|
||||
@ -47,12 +53,6 @@ def get(**kwargs):
|
||||
else:
|
||||
cmd += ' -a'
|
||||
|
||||
if 'file' in kwargs:
|
||||
cmd += ' -f '+kwargs['file']
|
||||
|
||||
if 'jail' in kwargs:
|
||||
cmd += ' -j '+kwargs['jail']
|
||||
|
||||
sysrcs = __salt__['cmd.run'](cmd)
|
||||
if "sysrc: unknown variable" in sysrcs:
|
||||
# raise CommandExecutionError(sysrcs)
|
||||
|
@ -4,7 +4,8 @@ A salt module for SSL/TLS.
|
||||
Can create a Certificate Authority (CA)
|
||||
or use Self-Signed certificates.
|
||||
|
||||
:depends: - PyOpenSSL Python module (0.14 or later)
|
||||
:depends: - PyOpenSSL Python module (0.10 or later, 0.14 or later for
|
||||
X509 extension support)
|
||||
:configuration: Add the following values in /etc/salt/minion for the CA module
|
||||
to function properly::
|
||||
|
||||
@ -113,6 +114,7 @@ from distutils.version import LooseVersion
|
||||
import re
|
||||
|
||||
HAS_SSL = False
|
||||
X509_EXT_ENABLED = True
|
||||
try:
|
||||
import OpenSSL
|
||||
HAS_SSL = True
|
||||
@ -133,9 +135,15 @@ def __virtual__():
|
||||
'''
|
||||
Only load this module if the ca config options are set
|
||||
'''
|
||||
if HAS_SSL and OpenSSL_version >= LooseVersion('0.14'):
|
||||
if OpenSSL_version <= LooseVersion('0.15'):
|
||||
log.warn('You should upgrade pyOpenSSL to at least 0.15.1')
|
||||
global X509_EXT_ENABLED
|
||||
if HAS_SSL and OpenSSL_version >= LooseVersion('0.10'):
|
||||
if OpenSSL_version < LooseVersion('0.14'):
|
||||
X509_EXT_ENABLED = False
|
||||
log.error('You should upgrade pyOpenSSL to at least 0.14.1 '
|
||||
'to enable the use of X509 extensions')
|
||||
elif OpenSSL_version <= LooseVersion('0.15'):
|
||||
log.warn('You should upgrade pyOpenSSL to at least 0.15.1 '
|
||||
'to enable the full use of X509 extensions')
|
||||
# never EVER reactivate this code, this has been done too many times.
|
||||
# not having configured a cert path in the configuration does not
|
||||
# mean that users cant use this module as we provide methods
|
||||
@ -147,9 +155,9 @@ def __virtual__():
|
||||
# return False
|
||||
return True
|
||||
else:
|
||||
return False, ['PyOpenSSL version 0.14 or later'
|
||||
' must be installed before '
|
||||
' this module can be used.']
|
||||
X509_EXT_ENABLED = False
|
||||
return False, ['PyOpenSSL version 0.10 or later must be installed '
|
||||
'before this module can be used.']
|
||||
|
||||
|
||||
def cert_base_path(cacert_path=None):
|
||||
@ -686,20 +694,21 @@ def create_ca(ca_name,
|
||||
ca.set_issuer(ca.get_subject())
|
||||
ca.set_pubkey(key)
|
||||
|
||||
ca.add_extensions([
|
||||
OpenSSL.crypto.X509Extension('basicConstraints', True,
|
||||
'CA:TRUE, pathlen:0'),
|
||||
OpenSSL.crypto.X509Extension('keyUsage', True,
|
||||
'keyCertSign, cRLSign'),
|
||||
OpenSSL.crypto.X509Extension('subjectKeyIdentifier', False, 'hash',
|
||||
subject=ca)])
|
||||
if X509_EXT_ENABLED:
|
||||
ca.add_extensions([
|
||||
OpenSSL.crypto.X509Extension('basicConstraints', True,
|
||||
'CA:TRUE, pathlen:0'),
|
||||
OpenSSL.crypto.X509Extension('keyUsage', True,
|
||||
'keyCertSign, cRLSign'),
|
||||
OpenSSL.crypto.X509Extension('subjectKeyIdentifier', False,
|
||||
'hash', subject=ca)])
|
||||
|
||||
ca.add_extensions([
|
||||
OpenSSL.crypto.X509Extension(
|
||||
'authorityKeyIdentifier',
|
||||
False,
|
||||
'issuer:always,keyid:always',
|
||||
issuer=ca)])
|
||||
ca.add_extensions([
|
||||
OpenSSL.crypto.X509Extension(
|
||||
'authorityKeyIdentifier',
|
||||
False,
|
||||
'issuer:always,keyid:always',
|
||||
issuer=ca)])
|
||||
ca.sign(key, digest)
|
||||
|
||||
# alway backup existing keys in case
|
||||
@ -754,6 +763,10 @@ def get_extensions(cert_type):
|
||||
|
||||
'''
|
||||
|
||||
assert X509_EXT_ENABLED, ('X509 extensions are not supported in '
|
||||
'pyOpenSSL prior to version 0.15.1. Your '
|
||||
'version: {0}'.format(OpenSSL_version))
|
||||
|
||||
ext = {}
|
||||
if cert_type == '':
|
||||
log.error('cert_type set to empty in tls_ca.get_extensions(); '
|
||||
@ -974,21 +987,36 @@ def create_csr(ca_name,
|
||||
req.get_subject().CN = CN
|
||||
req.get_subject().emailAddress = emailAddress
|
||||
|
||||
extensions = get_extensions(cert_type)['csr']
|
||||
extension_adds = []
|
||||
try:
|
||||
extensions = get_extensions(cert_type)['csr']
|
||||
|
||||
for ext, value in extensions.items():
|
||||
extension_adds.append(OpenSSL.crypto.X509Extension(ext, False, value))
|
||||
extension_adds = []
|
||||
|
||||
for ext, value in extensions.items():
|
||||
extension_adds.append(OpenSSL.crypto.X509Extension(ext, False,
|
||||
value))
|
||||
|
||||
except AssertionError as err:
|
||||
log.error(err)
|
||||
extensions = []
|
||||
|
||||
if subjectAltName:
|
||||
if isinstance(subjectAltName, str):
|
||||
subjectAltName = [subjectAltName]
|
||||
if X509_EXT_ENABLED:
|
||||
if isinstance(subjectAltName, str):
|
||||
subjectAltName = [subjectAltName]
|
||||
|
||||
extension_adds.append(
|
||||
OpenSSL.crypto.X509Extension(
|
||||
'subjectAltName', False, ", ".join(subjectAltName)))
|
||||
extension_adds.append(
|
||||
OpenSSL.crypto.X509Extension(
|
||||
'subjectAltName', False, ", ".join(subjectAltName)))
|
||||
else:
|
||||
raise ValueError('subjectAltName cannot be set as X509 '
|
||||
'extensions are not supported in pyOpenSSL '
|
||||
'prior to version 0.15.1. Your '
|
||||
'version: {0}.'.format(OpenSSL_version))
|
||||
|
||||
if X509_EXT_ENABLED:
|
||||
req.add_extensions(extension_adds)
|
||||
|
||||
req.add_extensions(extension_adds)
|
||||
req.set_pubkey(key)
|
||||
req.sign(key, digest)
|
||||
|
||||
@ -1344,8 +1372,6 @@ def create_ca_signed_cert(ca_name,
|
||||
exts = []
|
||||
try:
|
||||
exts.extend(req.get_extensions())
|
||||
log.debug('req.get_extensions() supported in pyOpenSSL {0}'.format(
|
||||
OpenSSL.__dict__.get('__version__', '')))
|
||||
except AttributeError:
|
||||
try:
|
||||
# see: http://bazaar.launchpad.net/~exarkun/pyopenssl/master/revision/189
|
||||
@ -1353,9 +1379,9 @@ def create_ca_signed_cert(ca_name,
|
||||
# so we mimic the newly get_extensions method present in ultra
|
||||
# recent pyopenssl distros
|
||||
log.info('req.get_extensions() not supported in pyOpenSSL versions '
|
||||
'prior to 0.15. Switching to Dark Magic(tm) '
|
||||
'prior to 0.15. Processing extensions internally. '
|
||||
' Your version: {0}'.format(
|
||||
OpenSSL.__dict__.get('__version__', 'pre-2014')))
|
||||
OpenSSL_version))
|
||||
|
||||
native_exts_obj = OpenSSL._util.lib.X509_REQ_get_extensions(
|
||||
req._req)
|
||||
@ -1369,10 +1395,9 @@ def create_ca_signed_cert(ca_name,
|
||||
exts.append(ext)
|
||||
except Exception:
|
||||
log.error('X509 extensions are unsupported in pyOpenSSL '
|
||||
'versions prior to 0.14. Upgrade required. Current '
|
||||
'version: {0}'.format(
|
||||
OpenSSL.__dict__.get('__version__', 'pre-2014'))
|
||||
)
|
||||
'versions prior to 0.14. Upgrade required to '
|
||||
'use extensions. Current version: {0}'.format(
|
||||
OpenSSL_version))
|
||||
|
||||
cert = OpenSSL.crypto.X509()
|
||||
cert.set_version(2)
|
||||
|
@ -872,7 +872,8 @@ def stats(path, hash_type='md5', follow_symlinks=True):
|
||||
ret['ctime'] = pstat.st_ctime
|
||||
ret['size'] = pstat.st_size
|
||||
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
|
||||
ret['sum'] = get_sum(path, hash_type)
|
||||
if hash_type:
|
||||
ret['sum'] = get_sum(path, hash_type)
|
||||
ret['type'] = 'file'
|
||||
if stat.S_ISDIR(pstat.st_mode):
|
||||
ret['type'] = 'dir'
|
||||
|
@ -643,7 +643,11 @@ def _get_name_map():
|
||||
'''
|
||||
Return a reverse map of full pkg names to the names recognized by winrepo.
|
||||
'''
|
||||
return get_repo_data().get('name_map', {})
|
||||
u_name_map = {}
|
||||
name_map = get_repo_data().get('name_map', {})
|
||||
for k in name_map.keys():
|
||||
u_name_map[salt.utils.sdecode(k)] = name_map[k]
|
||||
return u_name_map
|
||||
|
||||
|
||||
def _get_package_info(name):
|
||||
|
@ -216,6 +216,7 @@ class SaltClientsMixIn(object):
|
||||
'local_batch': local_client.cmd_batch,
|
||||
'local_async': local_client.run_job,
|
||||
'runner': salt.runner.RunnerClient(opts=self.application.opts).async,
|
||||
'runner_async': None, # empty, since we use the same client as `runner`
|
||||
}
|
||||
return SaltClientsMixIn.__saltclients
|
||||
|
||||
@ -717,7 +718,7 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
||||
Content-Type: application/json
|
||||
Content-Legnth: 83
|
||||
|
||||
{"clients": ["local", "local_batch", "local_async","runner"], "return": "Welcome"}
|
||||
{"clients": ["local", "local_batch", "local_async", "runner", "runner_async"], "return": "Welcome"}
|
||||
'''
|
||||
ret = {"clients": self.saltclients.keys(),
|
||||
"return": "Welcome"}
|
||||
@ -1031,6 +1032,15 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
|
||||
except TimeoutException:
|
||||
raise tornado.gen.Return('Timeout waiting for runner to execute')
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def _disbatch_runner_async(self, chunk):
|
||||
'''
|
||||
Disbatch runner client_async commands
|
||||
'''
|
||||
f_call = {'args': [chunk['fun'], chunk]}
|
||||
pub_data = self.saltclients['runner'](chunk['fun'], chunk)
|
||||
raise tornado.gen.Return(pub_data)
|
||||
|
||||
|
||||
class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223
|
||||
'''
|
||||
|
@ -109,7 +109,7 @@ def ext_pillar(minion_id,
|
||||
bucket,
|
||||
key,
|
||||
keyid,
|
||||
verify_ssl,
|
||||
verify_ssl=True,
|
||||
multiple_env=False,
|
||||
environment='base',
|
||||
prefix='',
|
||||
|
@ -1011,7 +1011,7 @@ def running(name,
|
||||
image_exists = iinfos['status']
|
||||
is_running = False
|
||||
if already_exists:
|
||||
is_running = __salt__['docker.is_running'](container)
|
||||
is_running = __salt__['docker.is_running'](name)
|
||||
# if container exists but is not started, try to start it
|
||||
if already_exists and (is_running or not start):
|
||||
return _valid(comment='container {0!r} already exists'.format(name))
|
||||
|
@ -3606,11 +3606,11 @@ def copy(
|
||||
if force and os.path.isfile(name):
|
||||
hash1 = salt.utils.get_hash(name)
|
||||
hash2 = salt.utils.get_hash(source)
|
||||
if hash1 != hash2:
|
||||
if hash1 == hash2:
|
||||
changed = False
|
||||
if not force:
|
||||
changed = False
|
||||
elif not __opts__['test']:
|
||||
elif not __opts__['test'] and changed:
|
||||
# Remove the destination to prevent problems later
|
||||
try:
|
||||
if os.path.islink(name):
|
||||
|
@ -1719,7 +1719,7 @@ def get_hash(path, form='md5', chunk_size=65536):
|
||||
'''
|
||||
try:
|
||||
hash_type = getattr(hashlib, form)
|
||||
except AttributeError:
|
||||
except (AttributeError, TypeError):
|
||||
raise ValueError('Invalid hash type: {0}'.format(form))
|
||||
with salt.utils.fopen(path, 'rb') as ifile:
|
||||
hash_obj = hash_type()
|
||||
|
@ -139,8 +139,9 @@ def sig2(method, endpoint, params, provider, aws_api_version):
|
||||
return params_with_headers
|
||||
|
||||
|
||||
def sig4(method, endpoint, params, prov_dict, aws_api_version, location,
|
||||
product='ec2', uri='/', requesturl=None):
|
||||
def sig4(method, endpoint, params, prov_dict,
|
||||
aws_api_version=DEFAULT_AWS_API_VERSION, location=DEFAULT_LOCATION,
|
||||
product='ec2', uri='/', requesturl=None, data=''):
|
||||
'''
|
||||
Sign a query against AWS services using Signature Version 4 Signing
|
||||
Process. This is documented at:
|
||||
@ -155,7 +156,8 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location,
|
||||
access_key_id, secret_access_key, token = creds(prov_dict)
|
||||
|
||||
params_with_headers = params.copy()
|
||||
params_with_headers['Version'] = aws_api_version
|
||||
if product != 's3':
|
||||
params_with_headers['Version'] = aws_api_version
|
||||
keys = sorted(params_with_headers.keys())
|
||||
values = list(map(params_with_headers.get, keys))
|
||||
querystring = urlencode(list(zip(keys, values))).replace('+', '%20')
|
||||
@ -173,7 +175,7 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location,
|
||||
|
||||
# Create payload hash (hash of the request body content). For GET
|
||||
# requests, the payload is an empty string ('').
|
||||
payload_hash = hashlib.sha256('').hexdigest()
|
||||
payload_hash = hashlib.sha256(data).hexdigest()
|
||||
|
||||
# Combine elements to create create canonical request
|
||||
canonical_request = '\n'.join((
|
||||
@ -223,7 +225,8 @@ def sig4(method, endpoint, params, prov_dict, aws_api_version, location,
|
||||
|
||||
headers = {
|
||||
'x-amz-date': amzdate,
|
||||
'Authorization': authorization_header
|
||||
'x-amz-content-sha256': payload_hash,
|
||||
'Authorization': authorization_header,
|
||||
}
|
||||
|
||||
# Add in security token if we have one
|
||||
|
@ -423,6 +423,9 @@ def bootstrap(vm_, opts):
|
||||
'win_installer', vm_, opts
|
||||
)
|
||||
if win_installer:
|
||||
deploy_kwargs['port'] = salt.config.get_cloud_config_value(
|
||||
'smb_port', vm_, opts, default=445
|
||||
)
|
||||
deploy_kwargs['win_installer'] = win_installer
|
||||
minion = salt.utils.cloud.minion_config(opts, vm_)
|
||||
deploy_kwargs['master'] = minion['master']
|
||||
|
@ -498,7 +498,7 @@ def _interfaces_ifconfig(out):
|
||||
else:
|
||||
pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s')
|
||||
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
|
||||
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
|
||||
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?')
|
||||
pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))')
|
||||
pupdown = re.compile('UP')
|
||||
pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)')
|
||||
@ -545,6 +545,9 @@ def _interfaces_ifconfig(out):
|
||||
mmask6 = pmask6.match(line)
|
||||
if mmask6:
|
||||
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
|
||||
if not salt.utils.is_sunos():
|
||||
ipv6scope = mmask6.group(3) or mmask6.group(4)
|
||||
addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope
|
||||
data['inet6'].append(addr_obj)
|
||||
data['up'] = updown
|
||||
if iface in ret:
|
||||
|
@ -6,6 +6,7 @@ from __future__ import absolute_import
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import types
|
||||
import sys
|
||||
import multiprocessing
|
||||
import signal
|
||||
@ -235,7 +236,20 @@ class ProcessManager(object):
|
||||
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
|
||||
|
||||
process.start()
|
||||
log.debug("Started '{0}' with pid {1}".format(tgt.__name__, process.pid))
|
||||
|
||||
# create a nicer name for the debug log
|
||||
if isinstance(tgt, types.FunctionType):
|
||||
name = '{0}.{1}'.format(
|
||||
tgt.__module__,
|
||||
tgt.__name__,
|
||||
)
|
||||
else:
|
||||
name = '{0}.{1}.{2}'.format(
|
||||
tgt.__module__,
|
||||
tgt.__class__,
|
||||
tgt.__name__,
|
||||
)
|
||||
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
|
||||
self._process_map[process.pid] = {'tgt': tgt,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
|
@ -7,10 +7,6 @@ Connection library for Amazon S3
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
import binascii
|
||||
import datetime
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -19,21 +15,22 @@ try:
|
||||
HAS_REQUESTS = True # pylint: disable=W0612
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False # pylint: disable=W0612
|
||||
from salt.ext.six.moves.urllib.parse import urlencode # pylint: disable=no-name-in-module,import-error
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils
|
||||
import salt.utils.aws
|
||||
import salt.utils.xmlutil as xml
|
||||
import salt.utils.iam as iam
|
||||
from salt._compat import ElementTree as ET
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
DEFAULT_LOCATION = 'us-east-1'
|
||||
|
||||
|
||||
def query(key, keyid, method='GET', params=None, headers=None,
|
||||
requesturl=None, return_url=False, bucket=None, service_url=None,
|
||||
path=None, return_bin=False, action=None, local_file=None,
|
||||
verify_ssl=True):
|
||||
path='', return_bin=False, action=None, local_file=None,
|
||||
verify_ssl=True, location=DEFAULT_LOCATION):
|
||||
'''
|
||||
Perform a query against an S3-like API. This function requires that a
|
||||
secret key and the id for that key are passed in. For instance:
|
||||
@ -71,9 +68,6 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
if not params:
|
||||
params = {}
|
||||
|
||||
if path is None:
|
||||
path = ''
|
||||
|
||||
if not service_url:
|
||||
service_url = 's3.amazonaws.com'
|
||||
|
||||
@ -90,75 +84,33 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
keyid = iam_creds['access_key']
|
||||
token = iam_creds['security_token']
|
||||
|
||||
if not requesturl:
|
||||
x_amz_date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
content_type = 'text/plain'
|
||||
if method == 'GET':
|
||||
if bucket:
|
||||
can_resource = '/{0}/{1}'.format(bucket, path)
|
||||
else:
|
||||
can_resource = '/'
|
||||
elif method == 'PUT' or method == 'HEAD' or method == 'DELETE':
|
||||
if path:
|
||||
can_resource = '/{0}/{1}'.format(bucket, path)
|
||||
else:
|
||||
can_resource = '/{0}/'.format(bucket)
|
||||
|
||||
if action:
|
||||
can_resource += '?{0}'.format(action)
|
||||
|
||||
log.debug('CanonicalizedResource: {0}'.format(can_resource))
|
||||
|
||||
headers['Host'] = endpoint
|
||||
headers['Content-type'] = content_type
|
||||
headers['Date'] = x_amz_date
|
||||
if token:
|
||||
headers['x-amz-security-token'] = token
|
||||
|
||||
string_to_sign = '{0}\n'.format(method)
|
||||
|
||||
new_headers = []
|
||||
for header in sorted(headers):
|
||||
if header.lower().startswith('x-amz'):
|
||||
log.debug(header.lower())
|
||||
new_headers.append('{0}:{1}'.format(header.lower(),
|
||||
headers[header]))
|
||||
can_headers = '\n'.join(new_headers)
|
||||
log.debug('CanonicalizedAmzHeaders: {0}'.format(can_headers))
|
||||
|
||||
string_to_sign += '\n{0}'.format(content_type)
|
||||
string_to_sign += '\n{0}'.format(x_amz_date)
|
||||
if can_headers:
|
||||
string_to_sign += '\n{0}'.format(can_headers)
|
||||
string_to_sign += '\n{0}'.format(can_resource)
|
||||
log.debug('String To Sign:: \n{0}'.format(string_to_sign))
|
||||
|
||||
hashed = hmac.new(key, string_to_sign, hashlib.sha1)
|
||||
sig = binascii.b2a_base64(hashed.digest())
|
||||
headers['Authorization'] = 'AWS {0}:{1}'.format(keyid, sig.strip())
|
||||
|
||||
querystring = urlencode(params)
|
||||
if action:
|
||||
if querystring:
|
||||
querystring = '{0}&{1}'.format(action, querystring)
|
||||
else:
|
||||
querystring = action
|
||||
requesturl = 'https://{0}/'.format(endpoint)
|
||||
if path:
|
||||
requesturl += path
|
||||
if querystring:
|
||||
requesturl += '?{0}'.format(querystring)
|
||||
|
||||
data = None
|
||||
data = ''
|
||||
if method == 'PUT':
|
||||
if local_file:
|
||||
with salt.utils.fopen(local_file, 'r') as ifile:
|
||||
data = ifile.read()
|
||||
|
||||
if not requesturl:
|
||||
requesturl = 'https://{0}/{1}'.format(endpoint, path)
|
||||
headers, requesturl = salt.utils.aws.sig4(
|
||||
method,
|
||||
endpoint,
|
||||
params,
|
||||
data=data,
|
||||
uri='/{0}'.format(path),
|
||||
prov_dict={'id': keyid, 'key': key},
|
||||
location=location,
|
||||
product='s3',
|
||||
requesturl=requesturl,
|
||||
)
|
||||
|
||||
log.debug('S3 Request: {0}'.format(requesturl))
|
||||
log.debug('S3 Headers::')
|
||||
log.debug(' Authorization: {0}'.format(headers['Authorization']))
|
||||
|
||||
if not data:
|
||||
data = None
|
||||
|
||||
try:
|
||||
result = requests.request(method, requesturl, headers=headers,
|
||||
data=data,
|
||||
|
@ -203,7 +203,7 @@ def verify_env(dirs, user, permissive=False, pki_dir=''):
|
||||
err = ('Failed to prepare the Salt environment for user '
|
||||
'{0}. The user is not available.\n').format(user)
|
||||
sys.stderr.write(err)
|
||||
sys.exit(salt.defulats.exitcodes.EX_NOUSER)
|
||||
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
|
||||
for dir_ in dirs:
|
||||
if not dir_:
|
||||
continue
|
||||
|
@ -54,6 +54,7 @@ class TestSaltAPIHandler(SaltnadoTestCase):
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertEqual(response_obj['clients'],
|
||||
['runner',
|
||||
'runner_async',
|
||||
'local_async',
|
||||
'local',
|
||||
'local_batch']
|
||||
@ -303,6 +304,25 @@ class TestSaltAPIHandler(SaltnadoTestCase):
|
||||
self.assertEqual(len(response_obj['return']), 1)
|
||||
self.assertEqual(set(response_obj['return'][0]), set(['minion', 'sub_minion']))
|
||||
|
||||
# runner_async tests
|
||||
def test_simple_local_runner_async_post(self):
|
||||
low = [{'client': 'runner_async',
|
||||
'fun': 'manage.up',
|
||||
}]
|
||||
response = self.fetch('/',
|
||||
method='POST',
|
||||
body=json.dumps(low),
|
||||
headers={'Content-Type': self.content_type_map['json'],
|
||||
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
|
||||
connect_timeout=10,
|
||||
request_timeout=10,
|
||||
)
|
||||
response_obj = json.loads(response.body)
|
||||
self.assertIn('return', response_obj)
|
||||
self.assertEqual(1, len(response_obj['return']))
|
||||
self.assertIn('jid', response_obj['return'][0])
|
||||
self.assertIn('tag', response_obj['return'][0])
|
||||
|
||||
|
||||
@skipIf(HAS_TORNADO is False, 'Tornado must be installed to run these tests')
|
||||
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
|
||||
|
@ -1056,6 +1056,17 @@ class FileTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
'file.append', name=name, text='cheese', makedirs=True
|
||||
)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
finally:
|
||||
if os.path.isfile(name):
|
||||
os.remove(name)
|
||||
|
||||
try:
|
||||
# Parent directory exists but file does not and makedirs is False
|
||||
ret = self.run_state(
|
||||
'file.append', name=name, text='cheese'
|
||||
)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
self.assertTrue(os.path.isfile(name))
|
||||
finally:
|
||||
shutil.rmtree(
|
||||
os.path.join(integration.TMP, 'issue_1864'),
|
||||
|
@ -11,6 +11,7 @@ NO_PYOPENSSL = False
|
||||
import shutil
|
||||
import tempfile
|
||||
import os
|
||||
from distutils.version import LooseVersion
|
||||
try:
|
||||
# We're not going to actually use OpenSSL, we just want to check that
|
||||
# it's installed.
|
||||
@ -642,6 +643,160 @@ class TLSAddTestCase(TestCase):
|
||||
if os.path.isdir(ca_path):
|
||||
shutil.rmtree(ca_path)
|
||||
|
||||
def test_pyOpenSSL_version(self):
|
||||
'''
|
||||
Test extension logic with different pyOpenSSL versions
|
||||
'''
|
||||
pillarval = {'csr': {'extendedKeyUsage': 'serverAuth'}}
|
||||
mock_pgt = MagicMock(return_value=pillarval)
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version': LooseVersion('0.1.1'),
|
||||
'X509_EXT_ENABLED': False}):
|
||||
self.assertEqual(tls.__virtual__(),
|
||||
(False, ['PyOpenSSL version 0.10 or later must be installed '
|
||||
'before this module can be used.']))
|
||||
with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}):
|
||||
self.assertRaises(AssertionError, tls.get_extensions, 'server')
|
||||
self.assertRaises(AssertionError, tls.get_extensions, 'client')
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version': LooseVersion('0.14.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
self.assertTrue(tls.__virtual__())
|
||||
with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}):
|
||||
self.assertEqual(tls.get_extensions('server'), pillarval)
|
||||
self.assertEqual(tls.get_extensions('client'), pillarval)
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version': LooseVersion('0.15.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
self.assertTrue(tls.__virtual__())
|
||||
with patch.dict(tls.__salt__, {'pillar.get': mock_pgt}):
|
||||
self.assertEqual(tls.get_extensions('server'), pillarval)
|
||||
self.assertEqual(tls.get_extensions('client'), pillarval)
|
||||
|
||||
@destructiveTest
|
||||
def test_pyOpenSSL_version_destructive(self):
|
||||
'''
|
||||
Test extension logic with different pyOpenSSL versions
|
||||
'''
|
||||
pillarval = {'csr': {'extendedKeyUsage': 'serverAuth'}}
|
||||
mock_pgt = MagicMock(return_value=pillarval)
|
||||
ca_path = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
|
||||
ca_name = 'test_ca'
|
||||
certp = '{0}/{1}/{2}_ca_cert.crt'.format(
|
||||
ca_path,
|
||||
ca_name,
|
||||
ca_name)
|
||||
certk = '{0}/{1}/{2}_ca_cert.key'.format(
|
||||
ca_path,
|
||||
ca_name,
|
||||
ca_name)
|
||||
ret = 'Created Private Key: "{0}." Created CA "{1}": "{2}."'.format(
|
||||
certk, ca_name, certp)
|
||||
mock_opt = MagicMock(return_value=ca_path)
|
||||
mock_ret = MagicMock(return_value=0)
|
||||
try:
|
||||
with patch.dict(tls.__salt__, {
|
||||
'config.option': mock_opt,
|
||||
'cmd.retcode': mock_ret}):
|
||||
with patch.dict(tls.__opts__, {
|
||||
'hash_type': 'sha256',
|
||||
'cachedir': ca_path}):
|
||||
with patch.dict(_TLS_TEST_DATA['create_ca'],
|
||||
{'replace': True}):
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.1.1'),
|
||||
'X509_EXT_ENABLED': False}):
|
||||
self.assertEqual(
|
||||
tls.create_ca(
|
||||
ca_name,
|
||||
days=365,
|
||||
fixmode=False,
|
||||
**_TLS_TEST_DATA['create_ca']),
|
||||
ret)
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.14.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
self.assertEqual(
|
||||
tls.create_ca(
|
||||
ca_name,
|
||||
days=365,
|
||||
fixmode=False,
|
||||
**_TLS_TEST_DATA['create_ca']),
|
||||
ret)
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.15.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
self.assertEqual(
|
||||
tls.create_ca(
|
||||
ca_name,
|
||||
days=365,
|
||||
fixmode=False,
|
||||
**_TLS_TEST_DATA['create_ca']),
|
||||
ret)
|
||||
finally:
|
||||
if os.path.isdir(ca_path):
|
||||
shutil.rmtree(ca_path)
|
||||
|
||||
try:
|
||||
certp = '{0}/{1}/certs/{2}.csr'.format(
|
||||
ca_path,
|
||||
ca_name,
|
||||
_TLS_TEST_DATA['create_ca']['CN'])
|
||||
certk = '{0}/{1}/certs/{2}.key'.format(
|
||||
ca_path,
|
||||
ca_name,
|
||||
_TLS_TEST_DATA['create_ca']['CN'])
|
||||
ret = ('Created Private Key: "{0}." '
|
||||
'Created CSR for "{1}": "{2}."').format(
|
||||
certk, _TLS_TEST_DATA['create_ca']['CN'], certp)
|
||||
with patch.dict(tls.__salt__, {
|
||||
'config.option': mock_opt,
|
||||
'cmd.retcode': mock_ret,
|
||||
'pillar.get': mock_pgt}):
|
||||
with patch.dict(tls.__opts__, {'hash_type': 'sha256',
|
||||
'cachedir': ca_path}):
|
||||
with patch.dict(_TLS_TEST_DATA['create_ca'], {
|
||||
'subjectAltName': 'DNS:foo.bar',
|
||||
'replace': True}):
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.1.1'),
|
||||
'X509_EXT_ENABLED': False}):
|
||||
tls.create_ca(ca_name)
|
||||
tls.create_csr(ca_name)
|
||||
self.assertRaises(ValueError,
|
||||
tls.create_csr,
|
||||
ca_name,
|
||||
**_TLS_TEST_DATA['create_ca'])
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.14.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
tls.create_ca(ca_name)
|
||||
tls.create_csr(ca_name)
|
||||
self.assertEqual(
|
||||
tls.create_csr(
|
||||
ca_name,
|
||||
**_TLS_TEST_DATA['create_ca']),
|
||||
ret)
|
||||
with patch.dict(tls.__dict__, {
|
||||
'OpenSSL_version':
|
||||
LooseVersion('0.15.1'),
|
||||
'X509_EXT_ENABLED': True}):
|
||||
tls.create_ca(ca_name)
|
||||
tls.create_csr(ca_name)
|
||||
self.assertEqual(
|
||||
tls.create_csr(
|
||||
ca_name,
|
||||
**_TLS_TEST_DATA['create_ca']),
|
||||
ret)
|
||||
finally:
|
||||
if os.path.isdir(ca_path):
|
||||
shutil.rmtree(ca_path)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(TLSAddTestCase, needs_daemon=False)
|
||||
|
@ -101,12 +101,14 @@ class NetworkTestCase(TestCase):
|
||||
'broadcast': '10.10.10.255',
|
||||
'netmask': '255.255.252.0'}],
|
||||
'inet6': [{'address': 'fe80::e23f:49ff:fe85:6aaf',
|
||||
'prefixlen': '64'}],
|
||||
'prefixlen': '64',
|
||||
'scope': 'link'}],
|
||||
'up': True},
|
||||
'lo': {'inet': [{'address': '127.0.0.1',
|
||||
'netmask': '255.0.0.0'}],
|
||||
'inet6': [{'address': '::1',
|
||||
'prefixlen': '128'}],
|
||||
'prefixlen': '128',
|
||||
'scope': 'host'}],
|
||||
'up': True}}
|
||||
)
|
||||
|
||||
@ -127,9 +129,11 @@ class NetworkTestCase(TestCase):
|
||||
'lo0': {'inet': [{'address': '127.0.0.1',
|
||||
'netmask': '255.0.0.0'}],
|
||||
'inet6': [{'address': 'fe80::1',
|
||||
'prefixlen': '64'},
|
||||
'prefixlen': '64',
|
||||
'scope': '0x8'},
|
||||
{'address': '::1',
|
||||
'prefixlen': '128'}],
|
||||
'prefixlen': '128',
|
||||
'scope': None}],
|
||||
'up': True},
|
||||
'plip0': {'up': False},
|
||||
'tun0': {'inet': [{'address': '10.12.0.1',
|
||||
|
Loading…
Reference in New Issue
Block a user