mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 17:09:03 +00:00
Merge branch 'carbon' into 'develop'
Conflicts: - salt/scripts.py - salt/thorium/key.py - tests/unit/cloud/clouds/dimensiondata_test.py - tests/unit/cloud/clouds/gce_test.py
This commit is contained in:
commit
2edfef33ae
@ -4,17 +4,16 @@
|
||||
# Because the location to this file must be explicitly declared when using it,
|
||||
# its actual location on disk is up to the user.
|
||||
|
||||
#fedora_rs:
|
||||
# - fedora1
|
||||
# - fedora2
|
||||
# - fedora3
|
||||
# - fedora4
|
||||
# - fedora5
|
||||
|
||||
fedora_rs:
|
||||
- fedora1
|
||||
- fedora2
|
||||
- fedora3
|
||||
- fedora4
|
||||
- fedora5
|
||||
|
||||
ubuntu_rs:
|
||||
- ubuntu1
|
||||
- ubuntu2
|
||||
- ubuntu3
|
||||
- ubuntu4
|
||||
- ubuntu5
|
||||
#ubuntu_rs:
|
||||
# - ubuntu1
|
||||
# - ubuntu2
|
||||
# - ubuntu3
|
||||
# - ubuntu4
|
||||
# - ubuntu5
|
||||
|
@ -1,23 +1,27 @@
|
||||
base_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-e565ba8c
|
||||
size: t1.micro
|
||||
script: python-bootstrap
|
||||
minion:
|
||||
cheese: edam
|
||||
# This file may be used in addition to, or instead of, the files in the
|
||||
# cloud.profiles.d/ directory. The format for this file, and all files in that
|
||||
# directory, is identical.
|
||||
|
||||
ubuntu_rs:
|
||||
provider: my-openstack-rackspace-config
|
||||
image: Ubuntu 12.04 LTS
|
||||
size: 256 server
|
||||
script: Ubuntu
|
||||
minion:
|
||||
cheese: edam
|
||||
#base_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-e565ba8c
|
||||
# size: t1.micro
|
||||
# script: python-bootstrap
|
||||
# minion:
|
||||
# cheese: edam
|
||||
|
||||
fedora_rs:
|
||||
provider: my-openstack-rackspace-config
|
||||
image: Fedora 17
|
||||
size: 256 server
|
||||
script: Fedora
|
||||
minion:
|
||||
cheese: edam
|
||||
#ubuntu_rs:
|
||||
# provider: my-openstack-rackspace-config
|
||||
# image: Ubuntu 12.04 LTS
|
||||
# size: 256 server
|
||||
# script: Ubuntu
|
||||
# minion:
|
||||
# cheese: edam
|
||||
|
||||
#fedora_rs:
|
||||
# provider: my-openstack-rackspace-config
|
||||
# image: Fedora 17
|
||||
# size: 256 server
|
||||
# script: Fedora
|
||||
# minion:
|
||||
# cheese: edam
|
||||
|
@ -2,115 +2,114 @@
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
arch_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-6ee95107
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#arch_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-6ee95107
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
arch_cloud-init_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-596de730
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#arch_cloud-init_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-596de730
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
# Centos 6, available from ec2 marketplace for no-charge
|
||||
# http://wiki.centos.org/Cloud/AWS
|
||||
centos_6:
|
||||
provider: my-ec2-config
|
||||
image: ami-86e15bef
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#centos_6:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-86e15bef
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
# official Debian, available at no-charge from ec2 marketplace:
|
||||
# http://wiki.debian.org/Cloud/AmazonEC2Image
|
||||
debian_squeeze_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-a121a6c8
|
||||
size: t1.micro
|
||||
ssh_username: admin
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#debian_squeeze_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-a121a6c8
|
||||
# size: t1.micro
|
||||
# ssh_username: admin
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
# Fedora project cloud images
|
||||
# https://fedoraproject.org/wiki/Cloud_images
|
||||
fedora_17_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-2ea50247
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#fedora_17_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-2ea50247
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
fedora_18_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-6145cc08
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#fedora_18_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-6145cc08
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
# FreeBSD 9.1
|
||||
# http://www.daemonology.net/freebsd-on-ec2/
|
||||
|
||||
# this t1.micro instance does not auto-populate SSH keys see above link
|
||||
freebsd_91_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-5339bb3a
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#freebsd_91_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-5339bb3a
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
freebsd_91_4XL_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-79088510
|
||||
size: Cluster Compute 4XL
|
||||
ssh_username: ec2-user
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#freebsd_91_4XL_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-79088510
|
||||
# size: Cluster Compute 4XL
|
||||
# ssh_username: ec2-user
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
# Canonical Ubuntu LTS images
|
||||
# http://cloud-images.ubuntu.com/releases/
|
||||
ubuntu_lucid_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-21e47148
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
|
||||
ubuntu_precise_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-0145d268
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-east-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-east-1
|
||||
#ubuntu_lucid_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-21e47148
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
||||
#ubuntu_precise_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-0145d268
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-east-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-east-1
|
||||
|
@ -2,105 +2,104 @@
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
arch_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-337d5b76
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#arch_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-337d5b76
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
arch_cloud-init_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-6a5f7c2f
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#arch_cloud-init_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-6a5f7c2f
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
# Centos 6, available from ec2 marketplace for no-charge
|
||||
# http://wiki.centos.org/Cloud/AWS
|
||||
centos_6:
|
||||
provider: my-ec2-config
|
||||
image: ami-f61630b3
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#centos_6:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-f61630b3
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
# official Debian, available at no-charge from ec2 marketplace:
|
||||
# http://wiki.debian.org/Cloud/AmazonEC2Image
|
||||
debian_squeeze_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-2c735269
|
||||
size: t1.micro
|
||||
ssh_username: admin
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#debian_squeeze_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-2c735269
|
||||
# size: t1.micro
|
||||
# ssh_username: admin
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
# Fedora project cloud images
|
||||
# https://fedoraproject.org/wiki/Cloud_images
|
||||
fedora_17_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-877e24c2
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#fedora_17_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-877e24c2
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
fedora_18_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-0899b94d
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#fedora_18_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-0899b94d
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
# FreeBSD 9.1
|
||||
# http://www.daemonology.net/freebsd-on-ec2/
|
||||
|
||||
# this t1.micro instance does not auto-populate SSH keys see above link
|
||||
freebsd_91_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-4c8baa09
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#freebsd_91_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-4c8baa09
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
# Canonical Ubuntu LTS images
|
||||
# http://cloud-images.ubuntu.com/releases/
|
||||
ubuntu_lucid_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-e63013a3
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
|
||||
ubuntu_precise_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-3ed8fb7b
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-west-1
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-1
|
||||
#ubuntu_lucid_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-e63013a3
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
||||
#ubuntu_precise_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-3ed8fb7b
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-west-1
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-1
|
||||
|
@ -2,115 +2,114 @@
|
||||
|
||||
# Arch Linux
|
||||
# https://wiki.archlinux.org/index.php/Arch_Linux_AMIs_for_Amazon_Web_Services
|
||||
arch_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-bcf77e8c
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#arch_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-bcf77e8c
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
arch_cloud-init_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-6a5f7c2f
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#arch_cloud-init_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-6a5f7c2f
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
# Centos 6, available from ec2 marketplace for no-charge
|
||||
# http://wiki.centos.org/Cloud/AWS
|
||||
centos_6:
|
||||
provider: my-ec2-config
|
||||
image: ami-de5bd2ee
|
||||
size: t1.micro
|
||||
ssh_username: root
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#centos_6:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-de5bd2ee
|
||||
# size: t1.micro
|
||||
# ssh_username: root
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
# official Debian, available at no-charge from ec2 marketplace:
|
||||
# http://wiki.debian.org/Cloud/AmazonEC2Image
|
||||
debian_squeeze_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-e4da52d4
|
||||
size: t1.micro
|
||||
ssh_username: admin
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#debian_squeeze_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-e4da52d4
|
||||
# size: t1.micro
|
||||
# ssh_username: admin
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
# Fedora project cloud images
|
||||
# https://fedoraproject.org/wiki/Cloud_images
|
||||
fedora_17_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-8e69e5be
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#fedora_17_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-8e69e5be
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
fedora_18_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-0266ed32
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#fedora_18_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-0266ed32
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
# FreeBSD 9.1
|
||||
# http://www.daemonology.net/freebsd-on-ec2/
|
||||
|
||||
# this t1.micro instance does not auto-populate SSH keys see above link
|
||||
freebsd_91_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-aa09819a
|
||||
size: t1.micro
|
||||
ssh_username: ec2-user
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#freebsd_91_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-aa09819a
|
||||
# size: t1.micro
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
freebsd_91_4XL_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-66169e56
|
||||
size: Cluster Compute 4XL
|
||||
ssh_username: ec2-user
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#freebsd_91_4XL_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-66169e56
|
||||
# size: Cluster Compute 4XL
|
||||
# ssh_username: ec2-user
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
# Canonical Ubuntu LTS images
|
||||
# http://cloud-images.ubuntu.com/releases/
|
||||
ubuntu_lucid_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-6ec8425e
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
|
||||
ubuntu_precise_ec2:
|
||||
provider: my-ec2-config
|
||||
image: ami-e0941ed0
|
||||
size: t1.micro
|
||||
ssh_username: ubuntu
|
||||
location: us-west-2
|
||||
minion:
|
||||
grains:
|
||||
cloud: ec2-us-west-2
|
||||
#ubuntu_lucid_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-6ec8425e
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
||||
#ubuntu_precise_ec2:
|
||||
# provider: my-ec2-config
|
||||
# image: ami-e0941ed0
|
||||
# size: t1.micro
|
||||
# ssh_username: ubuntu
|
||||
# location: us-west-2
|
||||
# minion:
|
||||
# grains:
|
||||
# cloud: ec2-us-west-2
|
||||
|
@ -415,7 +415,7 @@
|
||||
# Note that this is a very large hammer and it can be quite difficult to keep the minion working
|
||||
# the way you think it should since Salt uses many modules internally itself. At a bare minimum
|
||||
# you need the following enabled or else the minion won't start.
|
||||
whitelist_modules:
|
||||
#whitelist_modules:
|
||||
# - cmdmod
|
||||
# - test
|
||||
# - config
|
||||
|
@ -5,6 +5,11 @@ Salt 2016.3.3 Release Notes
|
||||
Version 2016.3.3 is a bugfix release for :doc:`2016.3.0
|
||||
</topics/releases/2016.3.0>`.
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
:issue:`36055`: Salt Cloud events (``salt/cloud``) are not generated on the
|
||||
master event bus when provisioning cloud systems.
|
||||
|
||||
Changes for v2016.3.2..2016.3.3
|
||||
-------------------------------
|
||||
|
@ -473,7 +473,7 @@ def show_instance(name, resource_group=None, call=None): # pylint: disable=unus
|
||||
data['network_profile']['network_interfaces'] = ifaces
|
||||
data['resource_group'] = resource_group
|
||||
|
||||
salt.utils.cloud.cache_node(
|
||||
__utils__['cloud.cache_node'](
|
||||
salt.utils.simple_types_filter(data),
|
||||
__active_provider_name__,
|
||||
__opts__
|
||||
@ -1118,7 +1118,7 @@ def create(vm_):
|
||||
vm_['password'] = config.get_cloud_config_value(
|
||||
'ssh_password', vm_, __opts__
|
||||
)
|
||||
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
|
||||
data = show_instance(vm_['name'], call='action')
|
||||
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
|
||||
@ -1179,7 +1179,7 @@ def destroy(name, conn=None, call=None, kwargs=None): # pylint: disable=unused-
|
||||
result.wait()
|
||||
|
||||
if __opts__.get('update_cachedir', False) is True:
|
||||
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
|
||||
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
|
||||
|
||||
cleanup_disks = config.get_cloud_config_value(
|
||||
'cleanup_disks',
|
||||
|
@ -76,10 +76,7 @@ from salt.exceptions import (
|
||||
SaltCloudNotFound,
|
||||
SaltCloudSystemExit
|
||||
)
|
||||
from salt.utils import is_true
|
||||
|
||||
# Import Salt Cloud Libs
|
||||
import salt.utils.cloud
|
||||
import salt.utils
|
||||
|
||||
# Import Third Party Libs
|
||||
try:
|
||||
@ -337,7 +334,7 @@ def list_nodes_select(call=None):
|
||||
'The list_nodes_full function must be called with -f or --function.'
|
||||
)
|
||||
|
||||
return salt.utils.cloud.list_nodes_select(
|
||||
return __utils__['cloud.list_nodes_select'](
|
||||
list_nodes_full('function'), __opts__['query.selection'], call,
|
||||
)
|
||||
|
||||
@ -993,7 +990,7 @@ def create(vm_):
|
||||
return node_data
|
||||
|
||||
try:
|
||||
data = salt.utils.cloud.wait_for_ip(
|
||||
data = __utils__['cloud.wait_for_ip'](
|
||||
__query_node_data,
|
||||
update_args=(vm_['name'],),
|
||||
timeout=config.get_cloud_config_value(
|
||||
@ -1037,7 +1034,7 @@ def create(vm_):
|
||||
vm_['username'] = ssh_username
|
||||
vm_['key_filename'] = key_filename
|
||||
|
||||
ret = salt.utils.cloud.bootstrap(vm_, __opts__)
|
||||
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
|
||||
|
||||
ret['id'] = data['id']
|
||||
ret['image'] = vm_['image']
|
||||
@ -1115,7 +1112,7 @@ def destroy(name, call=None):
|
||||
)
|
||||
|
||||
if __opts__.get('update_cachedir', False) is True:
|
||||
salt.utils.cloud.delete_minion_cachedir(
|
||||
__utils__['cloud.delete_minion_cachedir'](
|
||||
name,
|
||||
__active_provider_name__.split(':')[0],
|
||||
__opts__
|
||||
@ -1460,7 +1457,7 @@ def image_persistent(call=None, kwargs=None):
|
||||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
response = server.one.image.persistent(auth, int(image_id), is_true(persist))
|
||||
response = server.one.image.persistent(auth, int(image_id), salt.utils.is_true(persist))
|
||||
|
||||
data = {
|
||||
'action': 'image.persistent',
|
||||
@ -1807,7 +1804,7 @@ def show_instance(name, call=None):
|
||||
)
|
||||
|
||||
node = _get_node(name)
|
||||
salt.utils.cloud.cache_node(node, __active_provider_name__, __opts__)
|
||||
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
|
||||
|
||||
return node
|
||||
|
||||
@ -2673,7 +2670,7 @@ def vm_allocate(call=None, kwargs=None):
|
||||
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
response = server.one.vm.allocate(auth, data, is_true(hold))
|
||||
response = server.one.vm.allocate(auth, data, salt.utils.is_true(hold))
|
||||
|
||||
ret = {
|
||||
'action': 'vm.allocate',
|
||||
@ -2903,7 +2900,7 @@ def vm_deploy(name, kwargs=None, call=None):
|
||||
response = server.one.vm.deploy(auth,
|
||||
int(vm_id),
|
||||
int(host_id),
|
||||
is_true(capacity_maintained),
|
||||
salt.utils.is_true(capacity_maintained),
|
||||
int(datastore_id))
|
||||
|
||||
data = {
|
||||
@ -3372,8 +3369,8 @@ def vm_migrate(name, kwargs=None, call=None):
|
||||
response = server.one.vm.migrate(auth,
|
||||
vm_id,
|
||||
int(host_id),
|
||||
is_true(live_migration),
|
||||
is_true(capacity_maintained),
|
||||
salt.utils.is_true(live_migration),
|
||||
salt.utils.is_true(capacity_maintained),
|
||||
int(datastore_id))
|
||||
|
||||
data = {
|
||||
@ -3491,7 +3488,7 @@ def vm_resize(name, kwargs=None, call=None):
|
||||
server, user, password = _get_xml_rpc()
|
||||
auth = ':'.join([user, password])
|
||||
vm_id = int(get_vm_id(kwargs={'name': name}))
|
||||
response = server.one.vm.resize(auth, vm_id, data, is_true(capacity_maintained))
|
||||
response = server.one.vm.resize(auth, vm_id, data, salt.utils.is_true(capacity_maintained))
|
||||
|
||||
ret = {
|
||||
'action': 'vm.resize',
|
||||
|
@ -504,10 +504,14 @@ class AsyncAuth(object):
|
||||
error = SaltClientError('Attempt to authenticate with the salt master failed')
|
||||
self._authenticate_future.set_exception(error)
|
||||
else:
|
||||
AsyncAuth.creds_map[self.__key(self.opts)] = creds
|
||||
key = self.__key(self.opts)
|
||||
AsyncAuth.creds_map[key] = creds
|
||||
self._creds = creds
|
||||
self._crypticle = Crypticle(self.opts, creds['aes'])
|
||||
self._authenticate_future.set_result(True) # mark the sign-in as complete
|
||||
# Notify the bus about creds change
|
||||
event = salt.utils.event.get_event(self.opts.get('__role'), opts=self.opts, listen=False)
|
||||
event.fire_event({'key': key, 'creds': creds}, salt.utils.event.tagify(prefix='auth', suffix='creds'))
|
||||
|
||||
@tornado.gen.coroutine
|
||||
def sign_in(self, timeout=60, safe=True, tries=1, channel=None):
|
||||
|
@ -8,6 +8,7 @@ from __future__ import absolute_import
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import string
|
||||
import shutil
|
||||
import ftplib
|
||||
from tornado.httputil import parse_response_start_line, HTTPInputError
|
||||
@ -465,8 +466,8 @@ class Client(object):
|
||||
url_path = os.path.join(
|
||||
url_data.netloc, url_data.path).rstrip(os.sep)
|
||||
|
||||
if url_scheme and url_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
|
||||
url_path = ':'.join([url_scheme, url_path])
|
||||
if url_scheme and url_scheme.lower() in string.ascii_lowercase:
|
||||
url_path = ':'.join((url_scheme, url_path))
|
||||
url_scheme = 'file'
|
||||
|
||||
if url_scheme in ('file', ''):
|
||||
|
@ -1997,6 +1997,12 @@ class Minion(MinionBase):
|
||||
if self.connected:
|
||||
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
|
||||
self._fire_master(data, tag)
|
||||
elif tag.startswith('salt/auth/creds'):
|
||||
mtag, data = salt.utils.event.MinionEvent.unpack(tag)
|
||||
key = tuple(data['key'])
|
||||
log.debug('Updating auth data for {0}: {1} -> {2}'.format(
|
||||
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']))
|
||||
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
|
||||
|
||||
def _fallback_cleanups(self):
|
||||
'''
|
||||
|
@ -344,6 +344,22 @@ def _run(cmd,
|
||||
'Setting value to an empty string'.format(bad_env_key))
|
||||
env[bad_env_key] = ''
|
||||
|
||||
if _check_loglevel(output_loglevel) is not None:
|
||||
# Always log the shell commands at INFO unless quiet logging is
|
||||
# requested. The command output is what will be controlled by the
|
||||
# 'loglevel' parameter.
|
||||
msg = (
|
||||
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
|
||||
'\'' if not isinstance(cmd, list) else '',
|
||||
cmd,
|
||||
'as user \'{0}\' '.format(runas) if runas else '',
|
||||
cwd,
|
||||
'. Executing command in the background, no output will be '
|
||||
'logged.' if bg else ''
|
||||
)
|
||||
)
|
||||
log.info(log_callback(msg))
|
||||
|
||||
if runas and salt.utils.is_windows():
|
||||
if not password:
|
||||
msg = 'password is a required argument for runas on Windows'
|
||||
@ -414,21 +430,6 @@ def _run(cmd,
|
||||
)
|
||||
)
|
||||
|
||||
if _check_loglevel(output_loglevel) is not None:
|
||||
# Always log the shell commands at INFO unless quiet logging is
|
||||
# requested. The command output is what will be controlled by the
|
||||
# 'loglevel' parameter.
|
||||
msg = (
|
||||
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
|
||||
'\'' if not isinstance(cmd, list) else '',
|
||||
cmd,
|
||||
'as user \'{0}\' '.format(runas) if runas else '',
|
||||
cwd,
|
||||
' in the background, no output will be logged' if bg else ''
|
||||
)
|
||||
)
|
||||
log.info(log_callback(msg))
|
||||
|
||||
if reset_system_locale is True:
|
||||
if not salt.utils.is_windows():
|
||||
# Default to C!
|
||||
@ -726,6 +727,7 @@ def run(cmd,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
bg=False,
|
||||
password=None,
|
||||
encoded_cmd=False,
|
||||
**kwargs):
|
||||
r'''
|
||||
@ -746,8 +748,8 @@ def run(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -897,6 +899,7 @@ def run(cmd,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
bg=bg,
|
||||
password=password,
|
||||
encoded_cmd=encoded_cmd,
|
||||
**kwargs)
|
||||
|
||||
@ -937,6 +940,7 @@ def shell(cmd,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
bg=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute the passed command and return the output as a string.
|
||||
@ -955,15 +959,16 @@ def shell(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:param int shell: Shell to execute under. Defaults to the system default
|
||||
shell.
|
||||
|
||||
:param bool bg: If True, run command in background and do not await or deliver it's results
|
||||
:param bool bg: If True, run command in background and do not await or
|
||||
deliver its results
|
||||
|
||||
:param list env: A list of environment variables to be set prior to
|
||||
execution.
|
||||
@ -1102,6 +1107,7 @@ def shell(cmd,
|
||||
use_vt=use_vt,
|
||||
python_shell=python_shell,
|
||||
bg=bg,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
|
||||
@ -1123,6 +1129,7 @@ def run_stdout(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a command, and only return the standard out
|
||||
@ -1139,8 +1146,8 @@ def run_stdout(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -1259,6 +1266,7 @@ def run_stdout(cmd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
log_callback = _check_cb(log_callback)
|
||||
@ -1302,6 +1310,7 @@ def run_stderr(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a command and only return the standard error
|
||||
@ -1318,8 +1327,8 @@ def run_stderr(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -1439,6 +1448,7 @@ def run_stderr(cmd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
use_vt=use_vt,
|
||||
saltenv=saltenv,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
log_callback = _check_cb(log_callback)
|
||||
@ -1483,6 +1493,7 @@ def run_all(cmd,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
redirect_stderr=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute the passed command and return a dict of return data
|
||||
@ -1499,8 +1510,8 @@ def run_all(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -1630,6 +1641,7 @@ def run_all(cmd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
log_callback = _check_cb(log_callback)
|
||||
@ -1672,6 +1684,7 @@ def retcode(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Execute a shell command and return the command's return code.
|
||||
@ -1688,8 +1701,8 @@ def retcode(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -1811,6 +1824,7 @@ def retcode(cmd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
log_callback = _check_cb(log_callback)
|
||||
@ -1848,6 +1862,7 @@ def _retcode_quiet(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Helper for running commands quietly for minion startup.
|
||||
@ -1870,6 +1885,7 @@ def _retcode_quiet(cmd,
|
||||
ignore_retcode=ignore_retcode,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
|
||||
@ -1891,6 +1907,7 @@ def script(source,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
bg=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Download a script from a remote location and execute the script locally.
|
||||
@ -1919,8 +1936,8 @@ def script(source,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -2077,6 +2094,7 @@ def script(source,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
bg=bg,
|
||||
password=password,
|
||||
**kwargs)
|
||||
_cleanup_tempfile(path)
|
||||
return ret
|
||||
@ -2098,6 +2116,7 @@ def script_retcode(source,
|
||||
output_loglevel='debug',
|
||||
log_callback=None,
|
||||
use_vt=False,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Download a script from a remote location and execute the script locally.
|
||||
@ -2130,8 +2149,8 @@ def script_retcode(source,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -2246,6 +2265,7 @@ def script_retcode(source,
|
||||
output_loglevel=output_loglevel,
|
||||
log_callback=log_callback,
|
||||
use_vt=use_vt,
|
||||
password=password,
|
||||
**kwargs)['retcode']
|
||||
|
||||
|
||||
@ -2393,10 +2413,10 @@ def run_chroot(root,
|
||||
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
|
||||
within a chroot, with dev and proc mounted in the chroot
|
||||
|
||||
root:
|
||||
root
|
||||
Path to the root of the jail to use.
|
||||
|
||||
cmd:
|
||||
cmd
|
||||
The command to run. ex: 'ls -lart /home'
|
||||
|
||||
cwd
|
||||
@ -2631,6 +2651,7 @@ def powershell(cmd,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
use_vt=False,
|
||||
password=None,
|
||||
encode_cmd=False,
|
||||
**kwargs):
|
||||
'''
|
||||
@ -2659,8 +2680,8 @@ def powershell(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Pass a password if you specify runas.
|
||||
This parameter will be ignored for other OS's
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
@ -2787,6 +2808,7 @@ def powershell(cmd,
|
||||
saltenv=saltenv,
|
||||
use_vt=use_vt,
|
||||
python_shell=python_shell,
|
||||
password=password,
|
||||
encoded_cmd=encoded_cmd,
|
||||
**kwargs)
|
||||
|
||||
@ -2810,7 +2832,9 @@ def run_bg(cmd,
|
||||
output_loglevel='debug',
|
||||
log_callback=None,
|
||||
reset_system_locale=True,
|
||||
ignore_retcode=False,
|
||||
saltenv='base',
|
||||
password=None,
|
||||
**kwargs):
|
||||
r'''
|
||||
.. versionadded: 2016.3.0
|
||||
@ -2832,6 +2856,11 @@ def run_bg(cmd,
|
||||
:param str runas: User to run script as. If running on a Windows minion you
|
||||
must also pass a password
|
||||
|
||||
:param str password: Windows only. Required when specifying ``runas``. This
|
||||
parameter will be ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
:param str shell: Shell to execute under. Defaults to the system default
|
||||
shell.
|
||||
|
||||
@ -2957,10 +2986,10 @@ def run_bg(cmd,
|
||||
log_callback=log_callback,
|
||||
timeout=timeout,
|
||||
reset_system_locale=reset_system_locale,
|
||||
# ignore_retcode=ignore_retcode,
|
||||
ignore_retcode=ignore_retcode,
|
||||
saltenv=saltenv,
|
||||
password=password,
|
||||
**kwargs
|
||||
# password=kwargs.get('password', None),
|
||||
)
|
||||
|
||||
return {
|
||||
|
@ -609,11 +609,10 @@ def agent_join(consul_url=None, address=None, **kwargs):
|
||||
query_params=query_params)
|
||||
if res['res']:
|
||||
ret['res'] = True
|
||||
ret['message'] = ('Agent maintenance mode '
|
||||
'{0}ed.'.format(kwargs['enable']))
|
||||
ret['message'] = 'Agent joined the cluster'
|
||||
else:
|
||||
ret['res'] = False
|
||||
ret['message'] = 'Unable to change maintenance mode for agent.'
|
||||
ret['message'] = 'Unable to join the cluster.'
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -649,7 +649,7 @@ def push(path, keep_symlinks=False, upload_path=None, remove_source=False):
|
||||
load_path_normal = os.path.normpath(load_path)
|
||||
|
||||
# If this is Windows and a drive letter is present, remove it
|
||||
load_path_split_drive = os.path.splitdrive(load_path_normal)[1:]
|
||||
load_path_split_drive = os.path.splitdrive(load_path_normal)[1]
|
||||
|
||||
# Finally, split the remaining path into a list for delivery to the master
|
||||
load_path_list = os.path.split(load_path_split_drive)
|
||||
|
@ -10,9 +10,8 @@ to the correct service manager
|
||||
<module-provider-override>`.
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -281,7 +281,7 @@ def set_wake_on_modem(enabled):
|
||||
state = salt.utils.mac_utils.validate_enabled(enabled)
|
||||
cmd = 'systemsetup -setwakeonmodem {0}'.format(state)
|
||||
salt.utils.mac_utils.execute_return_success(cmd)
|
||||
return get_wake_on_modem() == state
|
||||
return salt.utils.mac_utils.validate_enabled(get_wake_on_modem()) == state
|
||||
|
||||
|
||||
def get_wake_on_network():
|
||||
@ -324,7 +324,8 @@ def set_wake_on_network(enabled):
|
||||
state = salt.utils.mac_utils.validate_enabled(enabled)
|
||||
cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state)
|
||||
salt.utils.mac_utils.execute_return_success(cmd)
|
||||
return get_wake_on_network() == state
|
||||
return salt.utils.mac_utils.validate_enabled(
|
||||
get_wake_on_network()) == state
|
||||
|
||||
|
||||
def get_restart_power_failure():
|
||||
@ -367,7 +368,8 @@ def set_restart_power_failure(enabled):
|
||||
state = salt.utils.mac_utils.validate_enabled(enabled)
|
||||
cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state)
|
||||
salt.utils.mac_utils.execute_return_success(cmd)
|
||||
return get_restart_power_failure() == state
|
||||
return salt.utils.mac_utils.validate_enabled(
|
||||
get_restart_power_failure()) == state
|
||||
|
||||
|
||||
def get_restart_freeze():
|
||||
@ -412,7 +414,7 @@ def set_restart_freeze(enabled):
|
||||
state = salt.utils.mac_utils.validate_enabled(enabled)
|
||||
cmd = 'systemsetup -setrestartfreeze {0}'.format(state)
|
||||
salt.utils.mac_utils.execute_return_success(cmd)
|
||||
return get_restart_freeze() == state
|
||||
return salt.utils.mac_utils.validate_enabled(get_restart_freeze()) == state
|
||||
|
||||
|
||||
def get_sleep_on_power_button():
|
||||
@ -433,7 +435,7 @@ def get_sleep_on_power_button():
|
||||
ret = salt.utils.mac_utils.execute_return_result(
|
||||
'systemsetup -getallowpowerbuttontosleepcomputer')
|
||||
return salt.utils.mac_utils.validate_enabled(
|
||||
salt.utils.mac_utils.parse_return(ret)) == 'on'
|
||||
salt.utils.mac_utils.parse_return(ret)) == 'on'
|
||||
|
||||
|
||||
def set_sleep_on_power_button(enabled):
|
||||
@ -456,4 +458,5 @@ def set_sleep_on_power_button(enabled):
|
||||
state = salt.utils.mac_utils.validate_enabled(enabled)
|
||||
cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state)
|
||||
salt.utils.mac_utils.execute_return_success(cmd)
|
||||
return get_sleep_on_power_button() == state
|
||||
return salt.utils.mac_utils.validate_enabled(
|
||||
get_sleep_on_power_button()) == state
|
||||
|
@ -1,8 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage Parallels Desktop VMs with prlctl
|
||||
Manage Parallels Desktop VMs with ``prlctl`` and ``prlsrvctl``. Only some of
|
||||
the prlctl commands implemented so far. Of those that have been implemented,
|
||||
not all of the options may have been provided yet. For a complete reference,
|
||||
see the `Parallels Desktop Reference Guide
|
||||
<http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf>`_.
|
||||
|
||||
http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf
|
||||
What has not been implemented yet can be accessed through ``parallels.prlctl``
|
||||
and ``parallels.prlsrvctl`` (note the preceeding double dash ``--`` as
|
||||
necessary):
|
||||
|
||||
.. code-block::
|
||||
|
||||
salt '*' parallels.prlctl installtools macvm runas=macdev
|
||||
salt -- '*' parallels.prlctl capture 'macvm --file macvm.display.png' runas=macdev
|
||||
salt -- '*' parallels.prlsrvctl set '--mem-limit auto' runas=macdev
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
'''
|
||||
@ -36,7 +48,9 @@ def __virtual__():
|
||||
Load this module if prlctl is available
|
||||
'''
|
||||
if not salt.utils.which('prlctl'):
|
||||
return (False, 'Cannot load prlctl module: prlctl utility not available')
|
||||
return (False, 'prlctl utility not available')
|
||||
if not salt.utils.which('prlsrvctl'):
|
||||
return (False, 'prlsrvctl utility not available')
|
||||
return __virtualname__
|
||||
|
||||
|
||||
@ -76,6 +90,38 @@ def _find_guids(guid_string):
|
||||
return sorted(list(set(guids)))
|
||||
|
||||
|
||||
def prlsrvctl(sub_cmd, args=None, runas=None):
|
||||
'''
|
||||
Execute a prlsrvctl command
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
:param str sub_cmd:
|
||||
prlsrvctl subcommand to execute
|
||||
|
||||
:param str args:
|
||||
The arguments supplied to ``prlsrvctl <sub_cmd>``
|
||||
|
||||
:param str runas:
|
||||
The user that the prlsrvctl command will be run as
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.prlsrvctl info runas=macdev
|
||||
salt '*' parallels.prlsrvctl usb list runas=macdev
|
||||
salt -- '*' parallels.prlsrvctl set '--mem-limit auto' runas=macdev
|
||||
'''
|
||||
# Construct command
|
||||
cmd = ['prlsrvctl', sub_cmd]
|
||||
if args:
|
||||
cmd.extend(_normalize_args(args))
|
||||
|
||||
# Execute command and return output
|
||||
return __salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
|
||||
def prlctl(sub_cmd, args=None, runas=None):
|
||||
'''
|
||||
Execute a prlctl command
|
||||
@ -95,6 +141,7 @@ def prlctl(sub_cmd, args=None, runas=None):
|
||||
|
||||
salt '*' parallels.prlctl user list runas=macdev
|
||||
salt '*' parallels.prlctl exec 'macvm uname' runas=macdev
|
||||
salt -- '*' parallels.prlctl capture 'macvm --file macvm.display.png' runas=macdev
|
||||
'''
|
||||
# Construct command
|
||||
cmd = ['prlctl', sub_cmd]
|
||||
@ -105,32 +152,41 @@ def prlctl(sub_cmd, args=None, runas=None):
|
||||
return __salt__['cmd.run'](cmd, runas=runas)
|
||||
|
||||
|
||||
def list_vms(name=None, info=False, all=False, args=None, runas=None):
|
||||
def list_vms(name=None, info=False, all=False, args=None, runas=None, template=False):
|
||||
'''
|
||||
List information about the VMs
|
||||
|
||||
:param str name:
|
||||
Name/ID of VM to list; implies ``info=True``
|
||||
Name/ID of VM to list
|
||||
|
||||
.. versionchanged:: Carbon
|
||||
|
||||
No longer implies ``info=True``
|
||||
|
||||
:param str info:
|
||||
List extra information
|
||||
|
||||
:param bool all:
|
||||
Also list non-running VMs
|
||||
List all non-template VMs
|
||||
|
||||
:param tuple args:
|
||||
Additional arguments given to ``prctl list``. This argument is
|
||||
mutually exclusive with the other arguments
|
||||
Additional arguments given to ``prctl list``
|
||||
|
||||
:param str runas:
|
||||
The user that the prlctl command will be run as
|
||||
|
||||
:param bool template:
|
||||
List the available virtual machine templates. The real virtual
|
||||
machines will not be included in the output
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.list_vms runas=macdev
|
||||
salt '*' parallels.list_vms name=macvm runas=macdev
|
||||
salt '*' parallels.list_vms name=macvm info=True runas=macdev
|
||||
salt '*' parallels.list_vms info=True runas=macdev
|
||||
salt '*' parallels.list_vms ' -o uuid,status' all=True runas=macdev
|
||||
'''
|
||||
@ -141,17 +197,127 @@ def list_vms(name=None, info=False, all=False, args=None, runas=None):
|
||||
args = _normalize_args(args)
|
||||
|
||||
if name:
|
||||
args.extend(['--info', name])
|
||||
elif info:
|
||||
args.extend([name])
|
||||
if info:
|
||||
args.append('--info')
|
||||
|
||||
if all:
|
||||
args.append('--all')
|
||||
if template:
|
||||
args.append('--template')
|
||||
|
||||
# Execute command and return output
|
||||
return prlctl('list', args, runas=runas)
|
||||
|
||||
|
||||
def clone(name, new_name, linked=False, template=False, runas=None):
|
||||
'''
|
||||
Clone a VM
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
:param str name:
|
||||
Name/ID of VM to clone
|
||||
|
||||
:param str new_name:
|
||||
Name of the new VM
|
||||
|
||||
:param bool linked:
|
||||
Create a linked virtual machine.
|
||||
|
||||
:param bool template:
|
||||
Create a virtual machine template instead of a real virtual machine.
|
||||
|
||||
:param str runas:
|
||||
The user that the prlctl command will be run as
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.clone macvm macvm_new runas=macdev
|
||||
salt '*' parallels.clone macvm macvm_templ template=True runas=macdev
|
||||
'''
|
||||
args = [_sdecode(name), '--name', _sdecode(new_name)]
|
||||
if linked:
|
||||
args.append('--linked')
|
||||
if template:
|
||||
args.append('--template')
|
||||
return prlctl('clone', args, runas=runas)
|
||||
|
||||
|
||||
def delete(name, runas=None):
|
||||
'''
|
||||
Delete a VM
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
:param str name:
|
||||
Name/ID of VM to clone
|
||||
|
||||
:param str runas:
|
||||
The user that the prlctl command will be run as
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.exec macvm 'find /etc/paths.d' runas=macdev
|
||||
'''
|
||||
return prlctl('delete', _sdecode(name), runas=runas)
|
||||
|
||||
|
||||
def exists(name, runas=None):
|
||||
'''
|
||||
Query whether a VM exists
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
:param str name:
|
||||
Name/ID of VM
|
||||
|
||||
:param str runas:
|
||||
The user that the prlctl command will be run as
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.exists macvm runas=macdev
|
||||
'''
|
||||
vm_info = list_vms(name, info=True, runas=runas).splitlines()
|
||||
for info_line in vm_info:
|
||||
if 'Name: {0}'.format(name) in info_line:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def state(name, runas=None):
|
||||
'''
|
||||
Return the state of the VM
|
||||
|
||||
.. versionadded:: Carbon
|
||||
|
||||
:param str name:
|
||||
Name/ID of VM
|
||||
|
||||
:param str runas:
|
||||
The user that the prlctl command will be run as
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' parallels.state macvm runas=macdev
|
||||
'''
|
||||
vm_info = list_vms(name, info=True, runas=runas).splitlines()
|
||||
for info_line in vm_info:
|
||||
if 'State: ' in info_line:
|
||||
return info_line.split('State: ')[1]
|
||||
|
||||
log.error('Cannot find state of VM named {0}'.format(name))
|
||||
return ''
|
||||
|
||||
|
||||
def start(name, runas=None):
|
||||
'''
|
||||
Start a VM
|
||||
@ -340,7 +506,7 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
|
||||
data = yaml.safe_load(info)
|
||||
except yaml.YAMLError as err:
|
||||
log.warning(
|
||||
'Could not interpret snapshot data returned from parallels deskop: '
|
||||
'Could not interpret snapshot data returned from prlctl: '
|
||||
'{0}'.format(err)
|
||||
)
|
||||
data = {}
|
||||
@ -353,7 +519,7 @@ def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
|
||||
snap_name = ''
|
||||
else:
|
||||
log.warning(
|
||||
u'Could not interpret snapshot data returned from parallels deskop: '
|
||||
u'Could not interpret snapshot data returned from prlctl: '
|
||||
u'data is not formed as a dictionary: {0}'.format(data)
|
||||
)
|
||||
snap_name = ''
|
||||
|
@ -17,7 +17,11 @@ import logging
|
||||
import os
|
||||
import time
|
||||
import difflib
|
||||
from pwd import getpwuid
|
||||
try:
|
||||
from pwd import getpwuid
|
||||
HAS_PWD = True
|
||||
except ImportError:
|
||||
HAS_PWD = False
|
||||
|
||||
from salt.exceptions import CommandExecutionError
|
||||
import salt.utils
|
||||
@ -82,6 +86,8 @@ def __virtual__():
|
||||
return False, error_msg.format(snapper_error)
|
||||
elif not bus:
|
||||
return False, error_msg.format(system_bus_error)
|
||||
elif not HAS_PWD:
|
||||
return False, error_msg.format('pwd module not available')
|
||||
|
||||
return 'snapper'
|
||||
|
||||
|
@ -1047,9 +1047,14 @@ def install(name=None, refresh=False, pkgs=None, saltenv='base', **kwargs):
|
||||
start_in=cache_path,
|
||||
trigger_type='Once',
|
||||
start_date='1975-01-01',
|
||||
start_time='01:00')
|
||||
start_time='01:00',
|
||||
ac_only=False,
|
||||
stop_if_on_batteries=False)
|
||||
# Run Scheduled Task
|
||||
__salt__['task.run_wait'](name='update-salt-software')
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to install {0}'.format(pkg_name))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
@ -1296,9 +1301,14 @@ def remove(name=None, pkgs=None, version=None, saltenv='base', **kwargs):
|
||||
start_in=cache_path,
|
||||
trigger_type='Once',
|
||||
start_date='1975-01-01',
|
||||
start_time='01:00')
|
||||
start_time='01:00',
|
||||
ac_only=False,
|
||||
stop_if_on_batteries=False)
|
||||
# Run Scheduled Task
|
||||
__salt__['task.run_wait'](name='update-salt-software')
|
||||
if not __salt__['task.run_wait'](name='update-salt-software'):
|
||||
log.error('Failed to remove {0}'.format(target))
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[target] = {'uninstall status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = []
|
||||
|
@ -136,7 +136,8 @@ Authentication
|
||||
Authentication is performed by passing a session token with each request.
|
||||
Tokens are generated via the :py:class:`Login` URL.
|
||||
|
||||
The token may be sent in one of two ways:
|
||||
The token may be sent in one of two ways: as a custom header or as a session
|
||||
cookie. The latter is far more convenient for clients that support cookies.
|
||||
|
||||
* Include a custom header named :mailheader:`X-Auth-Token`.
|
||||
|
||||
@ -213,54 +214,204 @@ The token may be sent in one of two ways:
|
||||
Usage
|
||||
-----
|
||||
|
||||
Commands are sent to a running Salt master via this module by sending HTTP
|
||||
requests to the URLs detailed below.
|
||||
This interface directly exposes Salt's :ref:`Python API <python-api>`.
|
||||
Everything possible at the CLI is possible through the Python API. Commands are
|
||||
executed on the Salt Master.
|
||||
|
||||
.. admonition:: Content negotiation
|
||||
The root URL (``/``) is RPC-like in that it accepts instructions in the request
|
||||
body for what Salt functions to execute, and the response contains the result
|
||||
of those function calls.
|
||||
|
||||
This REST interface is flexible in what data formats it will accept as well
|
||||
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
|
||||
For example:
|
||||
|
||||
* Specify the format of data in the request body by including the
|
||||
:mailheader:`Content-Type` header.
|
||||
* Specify the desired data format for the response body with the
|
||||
:mailheader:`Accept` header.
|
||||
.. code-block:: text
|
||||
|
||||
Data sent in :http:method:`post` and :http:method:`put` requests must be in
|
||||
the format of a list of lowstate dictionaries. This allows multiple commands to
|
||||
be executed in a single HTTP request. The order of commands in the request
|
||||
corresponds to the return for each command in the response.
|
||||
% curl -sSi https://localhost:8000 \
|
||||
-H 'Content-type: application/json' \
|
||||
-d '[{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "test.ping"
|
||||
}]'
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
[...snip...]
|
||||
|
||||
Lowstate, broadly, is a dictionary of values that are mapped to a function
|
||||
call. This pattern is used pervasively throughout Salt. The functions called
|
||||
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
|
||||
{"return": [{"jerry": true}]}
|
||||
|
||||
The following example (in JSON format) causes Salt to execute two commands, a
|
||||
command sent to minions as well as a runner function on the master::
|
||||
The request body must be an array of commands. Use this workflow to build a
|
||||
command:
|
||||
|
||||
[{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "test.fib",
|
||||
"arg": ["10"]
|
||||
},
|
||||
1. Choose a client interface.
|
||||
2. Choose a function.
|
||||
3. Fill out the remaining parameters needed for the chosen client.
|
||||
|
||||
The ``client`` field is a reference to the main Python classes used in Salt's
|
||||
Python API. Read the full :ref:`client interfaces <netapi-clients>`
|
||||
documentation, but in short:
|
||||
|
||||
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
|
||||
commands to Minions. Equivalent to the ``salt`` CLI command.
|
||||
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
|
||||
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
|
||||
command.
|
||||
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
|
||||
wheel modules on the Master. Wheel modules do not have a direct CLI
|
||||
equivalent but they typically manage Master-side resources such as state
|
||||
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
|
||||
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
|
||||
command.
|
||||
|
||||
Most clients have variants like synchronous or asyncronous execution as well as
|
||||
others like batch execution. See the :ref:`full list of client interfaces
|
||||
<netapi-clients>`.
|
||||
|
||||
Each client requires different arguments and sometimes has different syntax.
|
||||
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
|
||||
the command to Minions and the other client interfaces do not. ``LocalClient``
|
||||
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
|
||||
values are sent to the Minions and used to execute the requested function
|
||||
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
|
||||
and thus do not need or accept those arguments.
|
||||
|
||||
Read the method signatures in the client documentation linked above, but
|
||||
hopefully an example will help illustrate the concept. This example causes Salt
|
||||
to execute two functions -- the :py:func:`test.arg execution function
|
||||
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
|
||||
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
|
||||
different structure for each command. The results for both are combined and
|
||||
returned as one response.
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
% curl -b ~/cookies.txt -sSi localhost:8000 \
|
||||
-H 'Content-type: application/json' \
|
||||
-d '
|
||||
[
|
||||
{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "test.arg",
|
||||
"arg": ["positional arg one", "positional arg two"],
|
||||
"kwarg": {
|
||||
"keyword arg one": "Hello from a minion",
|
||||
"keyword arg two": "Hello again from a minion"
|
||||
}
|
||||
},
|
||||
{
|
||||
"client": "runner",
|
||||
"fun": "test.arg",
|
||||
"keyword arg one": "Hello from a master",
|
||||
"keyword arg two": "Runners do not support positional args"
|
||||
}
|
||||
]
|
||||
'
|
||||
HTTP/1.1 200 OK
|
||||
[...snip...]
|
||||
{
|
||||
"client": "runner",
|
||||
"fun": "jobs.lookup_jid",
|
||||
"jid": "20130603122505459265"
|
||||
}]
|
||||
"return": [
|
||||
{
|
||||
"jerry": {
|
||||
"args": [
|
||||
"positional arg one",
|
||||
"positional arg two"
|
||||
],
|
||||
"kwargs": {
|
||||
"keyword arg one": "Hello from a minion",
|
||||
"keyword arg two": "Hello again from a minion",
|
||||
[...snip...]
|
||||
}
|
||||
},
|
||||
[...snip; other minion returns here...]
|
||||
},
|
||||
{
|
||||
"args": [],
|
||||
"kwargs": {
|
||||
"keyword arg two": "Runners do not support positional args",
|
||||
"keyword arg one": "Hello from a master"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
.. admonition:: x-www-form-urlencoded
|
||||
One more example, this time with more commonly used functions:
|
||||
|
||||
Sending JSON or YAML in the request body is simple and most flexible,
|
||||
however sending data in urlencoded format is also supported with the
|
||||
caveats below. It is the default format for HTML forms, many JavaScript
|
||||
libraries, and the :command:`curl` command.
|
||||
.. code-block:: text
|
||||
|
||||
For example, the equivalent to running ``salt '*' test.ping`` is sending
|
||||
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
|
||||
curl -b /tmp/cookies.txt -sSi localhost:8000 \
|
||||
-H 'Content-type: application/json' \
|
||||
-d '
|
||||
[
|
||||
{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "state.sls",
|
||||
"kwarg": {
|
||||
"mods": "apache",
|
||||
"pillar": {
|
||||
"lookup": {
|
||||
"wwwdir": "/srv/httpd/htdocs"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"client": "runner",
|
||||
"fun": "cloud.create",
|
||||
"provider": "my-ec2-provider",
|
||||
"instances": "my-centos-6",
|
||||
"image": "ami-1624987f",
|
||||
"delvol_on_destroy", true
|
||||
}
|
||||
]
|
||||
'
|
||||
HTTP/1.1 200 OK
|
||||
[...snip...]
|
||||
{
|
||||
"return": [
|
||||
{
|
||||
"jerry": {
|
||||
"pkg_|-install_apache_|-httpd_|-installed": {
|
||||
[...snip full state return here...]
|
||||
}
|
||||
}
|
||||
[...snip other minion returns here...]
|
||||
},
|
||||
{
|
||||
[...snip full salt-cloud output here...]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Caveats:
|
||||
Content negotiation
|
||||
-------------------
|
||||
|
||||
This REST interface is flexible in what data formats it will accept as well
|
||||
as what formats it will return (e.g., JSON, YAML, urlencoded).
|
||||
|
||||
* Specify the format of data in the request body by including the
|
||||
:mailheader:`Content-Type` header.
|
||||
* Specify the desired data format for the response body with the
|
||||
:mailheader:`Accept` header.
|
||||
|
||||
We recommend the JSON format for most HTTP requests. urlencoded data is simple
|
||||
and cannot express complex data structures -- and that is often required for
|
||||
some Salt commands, such as starting a state run that uses Pillar data. Salt's
|
||||
CLI tool can reformat strings passed in at the CLI into complex data
|
||||
structures, and that behavior also works via salt-api, but that can be brittle
|
||||
and since salt-api can accept JSON it is best just to send JSON.
|
||||
|
||||
Here is an example of sending urlencoded data:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -sSik https://localhost:8000 \\
|
||||
-b ~/cookies.txt \\
|
||||
-d client=runner \\
|
||||
-d fun='jobs.lookup_jid' \\
|
||||
-d jid='20150129182456704682'
|
||||
|
||||
.. admonition:: urlencoded data caveats
|
||||
|
||||
* Only a single command may be sent per HTTP request.
|
||||
* Repeating the ``arg`` parameter multiple times will cause those
|
||||
@ -268,9 +419,23 @@ command sent to minions as well as a runner function on the master::
|
||||
|
||||
Note, some popular frameworks and languages (notably jQuery, PHP, and
|
||||
Ruby on Rails) will automatically append empty brackets onto repeated
|
||||
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
|
||||
``arg[]=two``. This is not supported; send JSON or YAML instead.
|
||||
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
|
||||
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
|
||||
or YAML.
|
||||
|
||||
A note about ``curl``
|
||||
|
||||
The ``-d`` flag to curl does *not* automatically urlencode data which can
|
||||
affect passwords and other data that contains characters that must be
|
||||
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -ksi http://localhost:8000/login \\
|
||||
-H "Accept: application/json" \\
|
||||
-d username='myapiuser' \\
|
||||
--data-urlencode password='1234+' \\
|
||||
-d eauth='pam'
|
||||
|
||||
.. |req_token| replace:: a session token from :py:class:`~Login`.
|
||||
.. |req_accept| replace:: the desired response format.
|
||||
@ -284,21 +449,6 @@ command sent to minions as well as a runner function on the master::
|
||||
.. |401| replace:: authentication required
|
||||
.. |406| replace:: requested Content-Type not available
|
||||
|
||||
A Note About Curl
|
||||
=================
|
||||
|
||||
When sending passwords and data that might need to be urlencoded, you must set
|
||||
the ``-d`` flag to indicate the content type, and the ``--data-urlencode`` flag
|
||||
to urlencode the input.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -ksi http://localhost:8000/login \\
|
||||
-H "Accept: application/json" \\
|
||||
-d username='myapiuser' \\
|
||||
--data-urlencode password='1234+' \\
|
||||
-d eauth='pam'
|
||||
|
||||
'''
|
||||
# We need a custom pylintrc here...
|
||||
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
|
||||
@ -915,11 +1065,10 @@ class LowDataAdapter(object):
|
||||
.. code-block:: bash
|
||||
|
||||
curl -sSik https://localhost:8000 \\
|
||||
-H "Accept: application/x-yaml" \\
|
||||
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
|
||||
-d client=local \\
|
||||
-d tgt='*' \\
|
||||
-d fun='test.ping' \\
|
||||
-b ~/cookies.txt \\
|
||||
-H "Accept: application/x-yaml" \\
|
||||
-H "Content-type: application/json" \\
|
||||
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
@ -927,10 +1076,9 @@ class LowDataAdapter(object):
|
||||
Host: localhost:8000
|
||||
Accept: application/x-yaml
|
||||
X-Auth-Token: d40d1e1e
|
||||
Content-Length: 36
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Type: application/json
|
||||
|
||||
fun=test.ping&client=local&tgt=*
|
||||
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
|
||||
|
||||
**Example response:**
|
||||
|
||||
@ -943,55 +1091,10 @@ class LowDataAdapter(object):
|
||||
|
||||
return:
|
||||
- ms-0: true
|
||||
ms-1: true
|
||||
ms-2: true
|
||||
ms-3: true
|
||||
ms-4: true
|
||||
|
||||
**Other examples**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Sending multiple positional args with urlencoded:
|
||||
curl -sSik https://localhost:8000 \\
|
||||
-d client=local \\
|
||||
-d tgt='*' \\
|
||||
-d fun='cmd.run' \\
|
||||
-d arg='du -sh .' \\
|
||||
-d arg='/path/to/dir'
|
||||
|
||||
# Sending positional args and Keyword args with JSON:
|
||||
echo '[
|
||||
{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "cmd.run",
|
||||
"arg": [
|
||||
"du -sh .",
|
||||
"/path/to/dir"
|
||||
],
|
||||
"kwarg": {
|
||||
"shell": "/bin/sh",
|
||||
"template": "jinja"
|
||||
}
|
||||
}
|
||||
]' | curl -sSik https://localhost:8000 \\
|
||||
-H 'Content-type: application/json' \\
|
||||
-d@-
|
||||
|
||||
# Calling runner functions:
|
||||
curl -sSik https://localhost:8000 \\
|
||||
-d client=runner \\
|
||||
-d fun='jobs.lookup_jid' \\
|
||||
-d jid='20150129182456704682' \\
|
||||
-d outputter=highstate
|
||||
|
||||
# Calling wheel functions:
|
||||
curl -sSik https://localhost:8000 \\
|
||||
-d client=wheel \\
|
||||
-d fun='key.gen_accept' \\
|
||||
-d id_=dave \\
|
||||
-d keysize=4096
|
||||
ms-1: true
|
||||
ms-2: true
|
||||
ms-3: true
|
||||
ms-4: true
|
||||
'''
|
||||
return {
|
||||
'return': list(self.exec_lowstate(
|
||||
@ -1081,17 +1184,16 @@ class Minions(LowDataAdapter):
|
||||
.. code-block:: bash
|
||||
|
||||
curl -sSi localhost:8000/minions \\
|
||||
-b ~/cookies.txt \\
|
||||
-H "Accept: application/x-yaml" \\
|
||||
-d tgt='*' \\
|
||||
-d fun='status.diskusage'
|
||||
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /minions HTTP/1.1
|
||||
Host: localhost:8000
|
||||
Accept: application/x-yaml
|
||||
Content-Length: 26
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Type: application/json
|
||||
|
||||
tgt=*&fun=status.diskusage
|
||||
|
||||
@ -1506,20 +1608,25 @@ class Login(LowDataAdapter):
|
||||
.. code-block:: bash
|
||||
|
||||
curl -si localhost:8000/login \\
|
||||
-H "Accept: application/json" \\
|
||||
-d username='saltuser' \\
|
||||
-d password='saltpass' \\
|
||||
-d eauth='pam'
|
||||
-c ~/cookies.txt \\
|
||||
-H "Accept: application/json" \\
|
||||
-H "Content-type: application/json" \\
|
||||
-d '{
|
||||
"username": "saltuser",
|
||||
"password": "saltuser",
|
||||
"eauth": "auto"
|
||||
}'
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST / HTTP/1.1
|
||||
Host: localhost:8000
|
||||
Content-Length: 42
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Type: application/json
|
||||
Accept: application/json
|
||||
|
||||
username=saltuser&password=saltpass&eauth=pam
|
||||
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
|
||||
|
||||
|
||||
**Example response:**
|
||||
|
||||
@ -1660,12 +1767,15 @@ class Run(LowDataAdapter):
|
||||
|
||||
curl -sS localhost:8000/run \\
|
||||
-H 'Accept: application/x-yaml' \\
|
||||
-d client='local' \\
|
||||
-d tgt='*' \\
|
||||
-d fun='test.ping' \\
|
||||
-d username='saltdev' \\
|
||||
-d password='saltdev' \\
|
||||
-d eauth='pam'
|
||||
-H 'Content-type: application/json' \\
|
||||
-d '[{
|
||||
"client": "local",
|
||||
"tgt": "*",
|
||||
"fun": "test.ping",
|
||||
"username": "saltdev",
|
||||
"password": "saltdev",
|
||||
"eauth": "auto"
|
||||
}]'
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
@ -1673,9 +1783,9 @@ class Run(LowDataAdapter):
|
||||
Host: localhost:8000
|
||||
Accept: application/x-yaml
|
||||
Content-Length: 75
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Type: application/json
|
||||
|
||||
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
|
||||
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
|
||||
|
||||
**Example response:**
|
||||
|
||||
@ -1687,17 +1797,19 @@ class Run(LowDataAdapter):
|
||||
|
||||
return:
|
||||
- ms-0: true
|
||||
ms-1: true
|
||||
ms-2: true
|
||||
ms-3: true
|
||||
ms-4: true
|
||||
ms-1: true
|
||||
ms-2: true
|
||||
ms-3: true
|
||||
ms-4: true
|
||||
|
||||
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
|
||||
The /run enpoint can also be used to issue commands using the salt-ssh
|
||||
subsystem.
|
||||
|
||||
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
|
||||
should be handled by the SSH layer itself. The use of the salt-ssh client does not
|
||||
require a salt master to be running. Instead, only a roster file must be present
|
||||
in the salt configuration directory.
|
||||
When using salt-ssh, eauth credentials should not be supplied. Instad,
|
||||
authentication should be handled by the SSH layer itself. The use of
|
||||
the salt-ssh client does not require a salt master to be running.
|
||||
Instead, only a roster file must be present in the salt configuration
|
||||
directory.
|
||||
|
||||
All SSH client requests are synchronous.
|
||||
|
||||
@ -2210,16 +2322,18 @@ class Webhook(object):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
|
||||
curl -sS localhost:8000/hook \\
|
||||
-H 'Content-type: application/json' \\
|
||||
-d '{"foo": "Foo!", "bar": "Bar!"}'
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /hook HTTP/1.1
|
||||
Host: localhost:8000
|
||||
Content-Length: 16
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
Content-Type: application/json
|
||||
|
||||
foo=Foo&bar=Bar!
|
||||
{"foo": "Foo!", "bar": "Bar!"}
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
@ -310,7 +310,7 @@ def _format_host(host, data):
|
||||
u' {tcolor} Result: {ret[result]!s}{colors[ENDC]}',
|
||||
u' {tcolor} Comment: {comment}{colors[ENDC]}',
|
||||
]
|
||||
if __opts__.get('state_output_profile', True):
|
||||
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
|
||||
state_lines.extend([
|
||||
u' {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}',
|
||||
u' {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}',
|
||||
@ -547,7 +547,7 @@ def _format_terse(tcolor, comps, ret, colors, tabular):
|
||||
c=colors, w='\n'.join(ret['warnings'])
|
||||
)
|
||||
fmt_string += u'{0}'
|
||||
if __opts__.get('state_output_profile', True):
|
||||
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
|
||||
fmt_string += u'{6[start_time]!s} [{6[duration]!s} ms] '
|
||||
fmt_string += u'{2:>10}.{3:<10} {4:7} Name: {1}{5}'
|
||||
elif isinstance(tabular, str):
|
||||
@ -559,7 +559,7 @@ def _format_terse(tcolor, comps, ret, colors, tabular):
|
||||
c=colors, w='\n'.join(ret['warnings'])
|
||||
)
|
||||
fmt_string += u' {0} Name: {1} - Function: {2}.{3} - Result: {4}'
|
||||
if __opts__.get('state_output_profile', True):
|
||||
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
|
||||
fmt_string += u' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'
|
||||
fmt_string += u'{5}'
|
||||
|
||||
|
@ -74,6 +74,10 @@ def output(data):
|
||||
|
||||
return json.dumps(data, default=repr, indent=indent, sort_keys=sort_keys)
|
||||
|
||||
except UnicodeDecodeError as exc:
|
||||
log.error('Unable to serialize output to json')
|
||||
return json.dumps({'error': 'Unable to serialize output to json', 'message': str(exc)})
|
||||
|
||||
except TypeError:
|
||||
log.debug('An error occurred while outputting JSON', exc_info=True)
|
||||
# Return valid JSON for unserializable objects
|
||||
|
136
salt/scripts.py
136
salt/scripts.py
@ -8,7 +8,9 @@ from __future__ import absolute_import, print_function
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
import functools
|
||||
import threading
|
||||
import traceback
|
||||
import signal
|
||||
@ -40,6 +42,29 @@ def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
|
||||
raise exc
|
||||
|
||||
|
||||
def _handle_signals(client, signum, sigframe):
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
Exception('\nExiting with hard crash Ctrl-c'),
|
||||
hardcrash, trace=trace)
|
||||
|
||||
|
||||
def _install_signal_handlers(client):
|
||||
# Install the SIGINT/SIGTERM handlers if not done so far
|
||||
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
|
||||
# No custom signal handling was added, install our own
|
||||
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
|
||||
|
||||
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
|
||||
# No custom signal handling was added, install our own
|
||||
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
|
||||
|
||||
|
||||
def salt_master():
|
||||
'''
|
||||
Start the salt master.
|
||||
@ -110,6 +135,8 @@ def salt_minion():
|
||||
Start the salt minion in a subprocess.
|
||||
Auto restart minion on error.
|
||||
'''
|
||||
import signal
|
||||
|
||||
import salt.utils.process
|
||||
salt.utils.process.notify_systemd()
|
||||
|
||||
@ -314,20 +341,10 @@ def salt_key():
|
||||
Manage the authentication keys with salt-key.
|
||||
'''
|
||||
import salt.cli.key
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.key.SaltKey()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
except Exception as err:
|
||||
sys.stderr.write("Error: {0}\n".format(err))
|
||||
|
||||
@ -338,20 +355,9 @@ def salt_cp():
|
||||
master.
|
||||
'''
|
||||
import salt.cli.cp
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.cp.SaltCPCli()
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
client = salt.cli.cp.SaltCPCli()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
||||
|
||||
def salt_call():
|
||||
@ -362,20 +368,9 @@ def salt_call():
|
||||
import salt.cli.call
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.call.SaltCall()
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
client = salt.cli.call.SaltCall()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
||||
|
||||
def salt_run():
|
||||
@ -385,20 +380,9 @@ def salt_run():
|
||||
import salt.cli.run
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.run.SaltRun()
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
client = salt.cli.run.SaltRun()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
||||
|
||||
def salt_ssh():
|
||||
@ -408,20 +392,10 @@ def salt_ssh():
|
||||
import salt.cli.ssh
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.ssh.SaltSSH()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
except SaltClientError as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
@ -454,20 +428,9 @@ def salt_cloud():
|
||||
print('salt-cloud is not available in this system')
|
||||
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
|
||||
|
||||
client = None
|
||||
try:
|
||||
client = salt.cloud.cli.SaltCloud()
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
client = salt.cloud.cli.SaltCloud()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
||||
|
||||
def salt_api():
|
||||
@ -490,20 +453,9 @@ def salt_main():
|
||||
import salt.cli.salt
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
client = None
|
||||
try:
|
||||
client = salt.cli.salt.SaltCMD()
|
||||
client.run()
|
||||
except KeyboardInterrupt as err:
|
||||
trace = traceback.format_exc()
|
||||
try:
|
||||
hardcrash = client.options.hard_crash
|
||||
except (AttributeError, KeyError):
|
||||
hardcrash = False
|
||||
_handle_interrupt(
|
||||
SystemExit('\nExiting gracefully on Ctrl-c'),
|
||||
err,
|
||||
hardcrash, trace=trace)
|
||||
client = salt.cli.salt.SaltCMD()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
||||
|
||||
def salt_spm():
|
||||
|
@ -1820,6 +1820,8 @@ class State(object):
|
||||
tag = _gen_tag(low)
|
||||
if (low.get('failhard', False) or self.opts['failhard']
|
||||
and tag in running):
|
||||
if running[tag]['result'] is None:
|
||||
return False
|
||||
return not running[tag]['result']
|
||||
return False
|
||||
|
||||
|
@ -94,6 +94,7 @@ def extracted(name,
|
||||
if_missing=None,
|
||||
keep=False,
|
||||
trim_output=False,
|
||||
skip_verify=False,
|
||||
source_hash_update=None,
|
||||
use_cmd_unzip=False,
|
||||
**kwargs):
|
||||
@ -175,6 +176,13 @@ def extracted(name,
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
skip_verify:False
|
||||
If ``True``, hash verification of remote file sources (``http://``,
|
||||
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
|
||||
argument will be ignored.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
archive_format
|
||||
``tar``, ``zip`` or ``rar``
|
||||
|
||||
@ -326,6 +334,7 @@ def extracted(name,
|
||||
source=source,
|
||||
source_hash=source_hash,
|
||||
makedirs=True,
|
||||
skip_verify=skip_verify,
|
||||
saltenv=__env__)
|
||||
log.debug('file.managed: {0}'.format(file_result))
|
||||
# get value of first key
|
||||
|
@ -105,7 +105,7 @@ def _get_branch_opts(branch, local_branch, all_local_branches,
|
||||
return ret
|
||||
|
||||
|
||||
def _get_local_rev_and_branch(target, user):
|
||||
def _get_local_rev_and_branch(target, user, password):
|
||||
'''
|
||||
Return the local revision for before/after comparisons
|
||||
'''
|
||||
@ -113,6 +113,7 @@ def _get_local_rev_and_branch(target, user):
|
||||
try:
|
||||
local_rev = __salt__['git.revision'](target,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
log.info('No local revision for {0}'.format(target))
|
||||
@ -122,6 +123,7 @@ def _get_local_rev_and_branch(target, user):
|
||||
try:
|
||||
local_branch = __salt__['git.current_branch'](target,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
log.info('No local branch for {0}'.format(target))
|
||||
@ -205,6 +207,7 @@ def latest(name,
|
||||
target=None,
|
||||
branch=None,
|
||||
user=None,
|
||||
password=None,
|
||||
update_head=True,
|
||||
force_checkout=False,
|
||||
force_clone=False,
|
||||
@ -262,6 +265,12 @@ def latest(name,
|
||||
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
password
|
||||
Windows only. Required when specifying ``user``. This parameter will be
|
||||
ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
update_head : True
|
||||
If set to ``False``, then the remote repository will be fetched (if
|
||||
necessary) to ensure that the commit to which ``rev`` points exists in
|
||||
@ -503,6 +512,8 @@ def latest(name,
|
||||
branch = str(branch)
|
||||
if user is not None and not isinstance(user, six.string_types):
|
||||
user = str(user)
|
||||
if password is not None and not isinstance(password, six.string_types):
|
||||
password = str(password)
|
||||
if remote is not None and not isinstance(remote, six.string_types):
|
||||
remote = str(remote)
|
||||
if identity is not None:
|
||||
@ -564,7 +575,7 @@ def latest(name,
|
||||
return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and '
|
||||
'\'bare\' arguments'))
|
||||
|
||||
run_check_cmd_kwargs = {'runas': user}
|
||||
run_check_cmd_kwargs = {'runas': user, 'password': password}
|
||||
if 'shell' in __grains__:
|
||||
run_check_cmd_kwargs['shell'] = __grains__['shell']
|
||||
|
||||
@ -588,6 +599,7 @@ def latest(name,
|
||||
heads=False,
|
||||
tags=False,
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
https_user=https_user,
|
||||
https_pass=https_pass,
|
||||
@ -682,13 +694,18 @@ def latest(name,
|
||||
check = 'refs' if bare else '.git'
|
||||
gitdir = os.path.join(target, check)
|
||||
comments = []
|
||||
if os.path.isdir(gitdir) or __salt__['git.is_worktree'](target):
|
||||
if os.path.isdir(gitdir) or __salt__['git.is_worktree'](target,
|
||||
user=user,
|
||||
password=password):
|
||||
# Target directory is a git repository or git worktree
|
||||
try:
|
||||
all_local_branches = __salt__['git.list_branches'](
|
||||
target, user=user)
|
||||
all_local_tags = __salt__['git.list_tags'](target, user=user)
|
||||
local_rev, local_branch = _get_local_rev_and_branch(target, user)
|
||||
target, user=user, password=password)
|
||||
all_local_tags = __salt__['git.list_tags'](target,
|
||||
user=user,
|
||||
password=password)
|
||||
local_rev, local_branch = \
|
||||
_get_local_rev_and_branch(target, user, password)
|
||||
|
||||
if not bare and remote_rev is None and local_rev is not None:
|
||||
return _fail(
|
||||
@ -723,6 +740,7 @@ def latest(name,
|
||||
target,
|
||||
branch + '^{commit}',
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError as exc:
|
||||
return _fail(
|
||||
@ -734,12 +752,16 @@ def latest(name,
|
||||
|
||||
remotes = __salt__['git.remotes'](target,
|
||||
user=user,
|
||||
password=password,
|
||||
redact_auth=False)
|
||||
|
||||
revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type)
|
||||
try:
|
||||
local_changes = bool(
|
||||
__salt__['git.diff'](target, 'HEAD', user=user)
|
||||
__salt__['git.diff'](target,
|
||||
'HEAD',
|
||||
user=user,
|
||||
password=password)
|
||||
)
|
||||
except CommandExecutionError:
|
||||
# No need to capture the error and log it, the _git_run()
|
||||
@ -767,6 +789,8 @@ def latest(name,
|
||||
__salt__['git.rev_parse'](
|
||||
target,
|
||||
remote_rev + '^{commit}',
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
# Local checkout doesn't have the remote_rev
|
||||
@ -787,6 +811,7 @@ def latest(name,
|
||||
target,
|
||||
desired_upstream,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
pass
|
||||
@ -806,6 +831,7 @@ def latest(name,
|
||||
target,
|
||||
rev + '^{commit}',
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
# Shouldn't happen if the tag exists
|
||||
@ -875,6 +901,7 @@ def latest(name,
|
||||
refs=[base_rev, remote_rev],
|
||||
is_ancestor=True,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
|
||||
if fast_forward is False:
|
||||
@ -903,6 +930,7 @@ def latest(name,
|
||||
base_branch + '@{upstream}',
|
||||
opts=['--abbrev-ref'],
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
# There is a local branch but the rev-parse command
|
||||
@ -970,6 +998,7 @@ def latest(name,
|
||||
url=name,
|
||||
remote=remote,
|
||||
user=user,
|
||||
password=password,
|
||||
https_user=https_user,
|
||||
https_pass=https_pass)
|
||||
comments.append(
|
||||
@ -1121,6 +1150,7 @@ def latest(name,
|
||||
force=force_fetch,
|
||||
refspecs=refspecs,
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError as exc:
|
||||
@ -1136,6 +1166,8 @@ def latest(name,
|
||||
__salt__['git.rev_parse'](
|
||||
target,
|
||||
remote_rev + '^{commit}',
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError as exc:
|
||||
return _fail(
|
||||
@ -1167,7 +1199,8 @@ def latest(name,
|
||||
target,
|
||||
refs=[base_rev, remote_rev],
|
||||
is_ancestor=True,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
|
||||
if fast_forward is False and not force_reset:
|
||||
return _not_fast_forward(
|
||||
@ -1207,7 +1240,8 @@ def latest(name,
|
||||
checkout_rev,
|
||||
force=force_checkout,
|
||||
opts=checkout_opts,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
if '-b' in checkout_opts:
|
||||
comments.append(
|
||||
'New branch \'{0}\' was checked out, with {1} '
|
||||
@ -1228,7 +1262,8 @@ def latest(name,
|
||||
__salt__['git.reset'](
|
||||
target,
|
||||
opts=['--hard', remote_rev],
|
||||
user=user
|
||||
user=user,
|
||||
password=password,
|
||||
)
|
||||
ret['changes']['forced update'] = True
|
||||
comments.append(
|
||||
@ -1239,7 +1274,8 @@ def latest(name,
|
||||
__salt__['git.branch'](
|
||||
target,
|
||||
opts=branch_opts,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(upstream_action)
|
||||
|
||||
# Fast-forward to the desired revision
|
||||
@ -1255,6 +1291,8 @@ def latest(name,
|
||||
if __salt__['git.symbolic_ref'](target,
|
||||
'HEAD',
|
||||
opts=['--quiet'],
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True):
|
||||
merge_rev = remote_rev if rev == 'HEAD' \
|
||||
else desired_upstream
|
||||
@ -1276,8 +1314,8 @@ def latest(name,
|
||||
target,
|
||||
rev=merge_rev,
|
||||
opts=merge_opts,
|
||||
user=user
|
||||
)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Repository was fast-forwarded to {0}'
|
||||
.format(remote_loc)
|
||||
@ -1295,8 +1333,8 @@ def latest(name,
|
||||
target,
|
||||
opts=['--hard',
|
||||
remote_rev if rev == 'HEAD' else rev],
|
||||
user=user
|
||||
)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Repository was reset to {0} (fast-forward)'
|
||||
.format(rev)
|
||||
@ -1311,6 +1349,7 @@ def latest(name,
|
||||
'update',
|
||||
opts=['--init', '--recursive'],
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError as exc:
|
||||
@ -1332,6 +1371,7 @@ def latest(name,
|
||||
force=force_fetch,
|
||||
refspecs=refspecs,
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError as exc:
|
||||
@ -1349,6 +1389,7 @@ def latest(name,
|
||||
new_rev = __salt__['git.revision'](
|
||||
cwd=target,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
new_rev = None
|
||||
@ -1447,6 +1488,7 @@ def latest(name,
|
||||
__salt__['git.clone'](target,
|
||||
name,
|
||||
user=user,
|
||||
password=password,
|
||||
opts=clone_opts,
|
||||
identity=identity,
|
||||
https_user=https_user,
|
||||
@ -1483,7 +1525,7 @@ def latest(name,
|
||||
else:
|
||||
if remote_rev_type == 'tag' \
|
||||
and rev not in __salt__['git.list_tags'](
|
||||
target, user=user):
|
||||
target, user=user, password=password):
|
||||
return _fail(
|
||||
ret,
|
||||
'Revision \'{0}\' does not exist in clone'
|
||||
@ -1493,8 +1535,10 @@ def latest(name,
|
||||
|
||||
if branch is not None:
|
||||
if branch not in \
|
||||
__salt__['git.list_branches'](target,
|
||||
user=user):
|
||||
__salt__['git.list_branches'](
|
||||
target,
|
||||
user=user,
|
||||
password=password):
|
||||
if rev == 'HEAD':
|
||||
checkout_rev = remote_rev
|
||||
else:
|
||||
@ -1504,7 +1548,8 @@ def latest(name,
|
||||
__salt__['git.checkout'](target,
|
||||
checkout_rev,
|
||||
opts=['-b', branch],
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Branch \'{0}\' checked out, with {1} '
|
||||
'as a starting point'.format(
|
||||
@ -1514,14 +1559,14 @@ def latest(name,
|
||||
)
|
||||
|
||||
local_rev, local_branch = \
|
||||
_get_local_rev_and_branch(target, user)
|
||||
_get_local_rev_and_branch(target, user, password)
|
||||
|
||||
if not _revs_equal(local_rev, remote_rev, remote_rev_type):
|
||||
__salt__['git.reset'](
|
||||
target,
|
||||
opts=['--hard', remote_rev],
|
||||
user=user
|
||||
)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Repository was reset to {0}'.format(remote_loc)
|
||||
)
|
||||
@ -1532,6 +1577,7 @@ def latest(name,
|
||||
local_branch + '@{upstream}',
|
||||
opts=['--abbrev-ref'],
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
upstream = False
|
||||
@ -1545,7 +1591,9 @@ def latest(name,
|
||||
branch_opts = _get_branch_opts(
|
||||
branch,
|
||||
local_branch,
|
||||
__salt__['git.list_branches'](target, user=user),
|
||||
__salt__['git.list_branches'](target,
|
||||
user=user,
|
||||
password=password),
|
||||
desired_upstream,
|
||||
git_ver)
|
||||
elif upstream and desired_upstream is False:
|
||||
@ -1568,7 +1616,9 @@ def latest(name,
|
||||
branch_opts = _get_branch_opts(
|
||||
branch,
|
||||
local_branch,
|
||||
__salt__['git.list_branches'](target, user=user),
|
||||
__salt__['git.list_branches'](target,
|
||||
user=user,
|
||||
password=password),
|
||||
desired_upstream,
|
||||
git_ver)
|
||||
else:
|
||||
@ -1578,7 +1628,8 @@ def latest(name,
|
||||
__salt__['git.branch'](
|
||||
target,
|
||||
opts=branch_opts,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(upstream_action)
|
||||
|
||||
if submodules and remote_rev:
|
||||
@ -1587,6 +1638,7 @@ def latest(name,
|
||||
'update',
|
||||
opts=['--init', '--recursive'],
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity)
|
||||
except CommandExecutionError as exc:
|
||||
return _failed_submodule_update(ret, exc, comments)
|
||||
@ -1595,6 +1647,7 @@ def latest(name,
|
||||
new_rev = __salt__['git.revision'](
|
||||
cwd=target,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
new_rev = None
|
||||
@ -1624,7 +1677,8 @@ def present(name,
|
||||
template=None,
|
||||
separate_git_dir=None,
|
||||
shared=None,
|
||||
user=None):
|
||||
user=None,
|
||||
password=None):
|
||||
'''
|
||||
Ensure that a repository exists in the given directory
|
||||
|
||||
@ -1678,6 +1732,12 @@ def present(name,
|
||||
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
password
|
||||
Windows only. Required when specifying ``user``. This parameter will be
|
||||
ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
.. _`git-init(1)`: http://git-scm.com/docs/git-init
|
||||
.. _`worktree`: http://git-scm.com/docs/git-worktree
|
||||
'''
|
||||
@ -1689,7 +1749,7 @@ def present(name,
|
||||
return ret
|
||||
elif not bare and \
|
||||
(os.path.isdir(os.path.join(name, '.git')) or
|
||||
__salt__['git.is_worktree'](name)):
|
||||
__salt__['git.is_worktree'](name, user=user, password=password)):
|
||||
return ret
|
||||
# Directory exists and is not a git repo, if force is set destroy the
|
||||
# directory and recreate, otherwise throw an error
|
||||
@ -1747,7 +1807,8 @@ def present(name,
|
||||
template=template,
|
||||
separate_git_dir=separate_git_dir,
|
||||
shared=shared,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
|
||||
actions = [
|
||||
'Initialized {0}repository in {1}'.format(
|
||||
@ -1773,6 +1834,7 @@ def detached(name,
|
||||
target=None,
|
||||
remote='origin',
|
||||
user=None,
|
||||
password=None,
|
||||
force_clone=False,
|
||||
force_checkout=False,
|
||||
fetch_remote=True,
|
||||
@ -1811,6 +1873,12 @@ def detached(name,
|
||||
User under which to run git commands. By default, commands are run by
|
||||
the user under which the minion is running.
|
||||
|
||||
password
|
||||
Windows only. Required when specifying ``user``. This parameter will be
|
||||
ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
force_clone : False
|
||||
If the ``target`` directory exists and is not a git repository, then
|
||||
this state will fail. Set this argument to ``True`` to remove the
|
||||
@ -1966,18 +2034,24 @@ def detached(name,
|
||||
local_commit_id = None
|
||||
|
||||
gitdir = os.path.join(target, '.git')
|
||||
if os.path.isdir(gitdir) or __salt__['git.is_worktree'](target):
|
||||
if os.path.isdir(gitdir) \
|
||||
or __salt__['git.is_worktree'](target, user=user, password=password):
|
||||
# Target directory is a git repository or git worktree
|
||||
|
||||
local_commit_id = _get_local_rev_and_branch(target, user)[0]
|
||||
local_commit_id = _get_local_rev_and_branch(target, user, password)[0]
|
||||
|
||||
if remote_ref_type is 'hash' and __salt__['git.describe'](target, ref):
|
||||
if remote_ref_type is 'hash' \
|
||||
and __salt__['git.describe'](target,
|
||||
ref,
|
||||
user=user,
|
||||
password=password):
|
||||
# The ref is a hash and it exists locally so skip to checkout
|
||||
hash_exists_locally = True
|
||||
else:
|
||||
# Check that remote is present and set to correct url
|
||||
remotes = __salt__['git.remotes'](target,
|
||||
user=user,
|
||||
password=password,
|
||||
redact_auth=False)
|
||||
|
||||
if remote in remotes and name in remotes[remote]['fetch']:
|
||||
@ -2002,6 +2076,7 @@ def detached(name,
|
||||
url=name,
|
||||
remote=remote,
|
||||
user=user,
|
||||
password=password,
|
||||
https_user=https_user,
|
||||
https_pass=https_pass)
|
||||
comments.append(
|
||||
@ -2073,6 +2148,7 @@ def detached(name,
|
||||
__salt__['git.clone'](target,
|
||||
name,
|
||||
user=user,
|
||||
password=password,
|
||||
opts=clone_opts,
|
||||
identity=identity,
|
||||
https_user=https_user,
|
||||
@ -2119,6 +2195,7 @@ def detached(name,
|
||||
force=True,
|
||||
refspecs=refspecs,
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
saltenv=__env__)
|
||||
except CommandExecutionError as exc:
|
||||
@ -2135,7 +2212,7 @@ def detached(name,
|
||||
#get refs and checkout
|
||||
checkout_commit_id = ''
|
||||
if remote_ref_type is 'hash':
|
||||
if __salt__['git.describe'](target, ref):
|
||||
if __salt__['git.describe'](target, ref, user=user, password=password):
|
||||
checkout_commit_id = ref
|
||||
else:
|
||||
return _fail(
|
||||
@ -2147,6 +2224,7 @@ def detached(name,
|
||||
all_remote_refs = __salt__['git.remote_refs'](
|
||||
target,
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity,
|
||||
https_user=https_user,
|
||||
https_pass=https_pass,
|
||||
@ -2179,8 +2257,8 @@ def detached(name,
|
||||
__salt__['git.reset'](
|
||||
target,
|
||||
opts=['--hard', 'HEAD'],
|
||||
user=user
|
||||
)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Repository was reset to HEAD before checking out ref'
|
||||
)
|
||||
@ -2202,7 +2280,8 @@ def detached(name,
|
||||
__salt__['git.checkout'](target,
|
||||
checkout_commit_id,
|
||||
force=force_checkout,
|
||||
user=user)
|
||||
user=user,
|
||||
password=password)
|
||||
comments.append(
|
||||
'Commit ID {0} was checked out at {1}'.format(
|
||||
checkout_commit_id,
|
||||
@ -2214,6 +2293,7 @@ def detached(name,
|
||||
new_rev = __salt__['git.revision'](
|
||||
cwd=target,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True)
|
||||
except CommandExecutionError:
|
||||
new_rev = None
|
||||
@ -2223,6 +2303,7 @@ def detached(name,
|
||||
'update',
|
||||
opts=['--init', '--recursive'],
|
||||
user=user,
|
||||
password=password,
|
||||
identity=identity)
|
||||
comments.append(
|
||||
'Submodules were updated'
|
||||
@ -2244,6 +2325,7 @@ def config_unset(name,
|
||||
value_regex=None,
|
||||
repo=None,
|
||||
user=None,
|
||||
password=None,
|
||||
**kwargs):
|
||||
r'''
|
||||
.. versionadded:: 2015.8.0
|
||||
@ -2274,7 +2356,14 @@ def config_unset(name,
|
||||
set. Required unless ``global`` is set to ``True``.
|
||||
|
||||
user
|
||||
Optional name of a user as whom `git config` will be run
|
||||
User under which to run git commands. By default, commands are run by
|
||||
the user under which the minion is running.
|
||||
|
||||
password
|
||||
Windows only. Required when specifying ``user``. This parameter will be
|
||||
ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
global : False
|
||||
If ``True``, this will set a global git config option
|
||||
@ -2349,6 +2438,7 @@ def config_unset(name,
|
||||
key=key,
|
||||
value_regex=value_regex,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True,
|
||||
**{'global': global_}
|
||||
)
|
||||
@ -2397,6 +2487,7 @@ def config_unset(name,
|
||||
key=key,
|
||||
value_regex=None,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True,
|
||||
**{'global': global_}
|
||||
)
|
||||
@ -2412,6 +2503,7 @@ def config_unset(name,
|
||||
value_regex=value_regex,
|
||||
all=all_,
|
||||
user=user,
|
||||
password=password,
|
||||
**{'global': global_}
|
||||
)
|
||||
except CommandExecutionError as exc:
|
||||
@ -2434,6 +2526,7 @@ def config_unset(name,
|
||||
key=key,
|
||||
value_regex=None,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True,
|
||||
**{'global': global_}
|
||||
)
|
||||
@ -2453,6 +2546,7 @@ def config_unset(name,
|
||||
key=key,
|
||||
value_regex=value_regex,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True,
|
||||
**{'global': global_}
|
||||
)
|
||||
@ -2474,6 +2568,7 @@ def config_set(name,
|
||||
multivar=None,
|
||||
repo=None,
|
||||
user=None,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
.. versionadded:: 2014.7.0
|
||||
@ -2504,7 +2599,14 @@ def config_set(name,
|
||||
set. Required unless ``global`` is set to ``True``.
|
||||
|
||||
user
|
||||
Optional name of a user as whom `git config` will be run
|
||||
User under which to run git commands. By default, the commands are run
|
||||
by the user under which the minion is running.
|
||||
|
||||
password
|
||||
Windows only. Required when specifying ``user``. This parameter will be
|
||||
ignored on non-Windows platforms.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
global : False
|
||||
If ``True``, this will set a global git config option
|
||||
@ -2614,6 +2716,7 @@ def config_set(name,
|
||||
cwd=repo,
|
||||
key=name,
|
||||
user=user,
|
||||
password=password,
|
||||
ignore_retcode=True,
|
||||
**{'all': True, 'global': global_}
|
||||
)
|
||||
@ -2644,6 +2747,7 @@ def config_set(name,
|
||||
value=value,
|
||||
multivar=multivar,
|
||||
user=user,
|
||||
password=password,
|
||||
**{'global': global_}
|
||||
)
|
||||
except CommandExecutionError as exc:
|
||||
@ -2679,7 +2783,13 @@ def config_set(name,
|
||||
return ret
|
||||
|
||||
|
||||
def config(name, value=None, multivar=None, repo=None, user=None, **kwargs):
|
||||
def config(name,
|
||||
value=None,
|
||||
multivar=None,
|
||||
repo=None,
|
||||
user=None,
|
||||
password=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Pass through to git.config_set and display a deprecation warning
|
||||
'''
|
||||
@ -2693,6 +2803,7 @@ def config(name, value=None, multivar=None, repo=None, user=None, **kwargs):
|
||||
multivar=multivar,
|
||||
repo=repo,
|
||||
user=user,
|
||||
password=password,
|
||||
**kwargs)
|
||||
|
||||
|
||||
|
@ -129,28 +129,38 @@ def mounted(name,
|
||||
.. versionadded:: Carbon
|
||||
|
||||
extra_mount_invisible_options
|
||||
A list of extra options that are not visible through the /proc/self/mountinfo
|
||||
interface. If a option is not visible through this interface it will always
|
||||
remount the device. This Option extends the builtin mount_invisible_options list.
|
||||
A list of extra options that are not visible through the
|
||||
``/proc/self/mountinfo`` interface.
|
||||
|
||||
If a option is not visible through this interface it will always remount
|
||||
the device. This option extends the builtin ``mount_invisible_options``
|
||||
list.
|
||||
|
||||
extra_mount_invisible_keys
|
||||
A list of extra key options that are not visible through the /proc/self/mountinfo
|
||||
interface. If a key option is not visible through this interface it will always
|
||||
remount the device. This Option extends the builtin mount_invisible_keys list.
|
||||
A good example for a key Option is the password Option:
|
||||
A list of extra key options that are not visible through the
|
||||
``/proc/self/mountinfo`` interface.
|
||||
|
||||
If a key option is not visible through this interface it will always
|
||||
remount the device. This option extends the builtin
|
||||
``mount_invisible_keys`` list.
|
||||
|
||||
A good example for a key option is the password option::
|
||||
|
||||
password=badsecret
|
||||
|
||||
extra_ignore_fs_keys
|
||||
A dict of filesystem options which should not force a remount. This will update
|
||||
the internal dictionary. The dict should look like this:
|
||||
the internal dictionary. The dict should look like this::
|
||||
|
||||
{
|
||||
'ramfs': ['size']
|
||||
}
|
||||
|
||||
extra_mount_translate_options
|
||||
A dict of mount options that gets translated when mounted. To prevent a remount
|
||||
add additional Options to the default dictionary. This will update the internal
|
||||
dictionary. The dictionary should look like this:
|
||||
add additional options to the default dictionary. This will update the internal
|
||||
dictionary. The dictionary should look like this::
|
||||
|
||||
{
|
||||
'tcp': 'proto=tcp',
|
||||
'udp': 'proto=udp'
|
||||
|
@ -96,12 +96,19 @@ import salt.ext.six as six
|
||||
_repack_pkgs = _namespaced_function(_repack_pkgs, globals())
|
||||
|
||||
if salt.utils.is_windows():
|
||||
# pylint: disable=W0611
|
||||
# pylint: disable=import-error,no-name-in-module
|
||||
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
|
||||
# pylint: disable=import-error
|
||||
# pylint: enable=W0611
|
||||
from salt.modules.win_pkg import _get_package_info
|
||||
from salt.modules.win_pkg import get_repo_data
|
||||
from salt.modules.win_pkg import _get_repo_src_dest
|
||||
from salt.modules.win_pkg import _get_latest_pkg_version
|
||||
from salt.modules.win_pkg import _reverse_cmp_pkg_versions
|
||||
_get_package_info = _namespaced_function(_get_package_info, globals())
|
||||
get_repo_data = _namespaced_function(get_repo_data, globals())
|
||||
_get_repo_src_dest = _namespaced_function(_get_repo_src_dest, globals())
|
||||
_get_latest_pkg_version = \
|
||||
_namespaced_function(_get_latest_pkg_version, globals())
|
||||
_reverse_cmp_pkg_versions = \
|
||||
|
@ -39,6 +39,9 @@ def present(dbname, name,
|
||||
name
|
||||
The name of the schema to manage
|
||||
|
||||
owner
|
||||
The database user that will be the owner of the schema
|
||||
|
||||
db_user
|
||||
database username if different from config or default
|
||||
|
||||
@ -99,7 +102,7 @@ def absent(dbname, name,
|
||||
db_user=None, db_password=None,
|
||||
db_host=None, db_port=None):
|
||||
'''
|
||||
Ensure that the named schema is absent
|
||||
Ensure that the named schema is absent.
|
||||
|
||||
dbname
|
||||
The database's name will work on
|
||||
|
@ -42,7 +42,7 @@ def calc(name, num, oper, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.calc:
|
||||
@ -115,7 +115,7 @@ def add(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.add:
|
||||
@ -131,7 +131,7 @@ def mul(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.mul:
|
||||
@ -147,7 +147,7 @@ def mean(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.mean:
|
||||
@ -163,7 +163,7 @@ def median(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.median:
|
||||
@ -179,7 +179,7 @@ def median_low(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.median_low:
|
||||
@ -195,7 +195,7 @@ def median_high(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.median_high:
|
||||
@ -212,7 +212,7 @@ def median_grouped(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.median_grouped:
|
||||
@ -228,7 +228,7 @@ def mode(name, num, minimum=0, maximum=0, ref=None):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.mode:
|
||||
|
@ -18,6 +18,8 @@ Then the file will be saved to:
|
||||
|
||||
You may also provide an absolute path for the file to be saved to:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
/tmp/foo.save:
|
||||
file.save
|
||||
|
||||
@ -26,6 +28,8 @@ If you are saving a register entry that contains a ``set()``, then it will fail
|
||||
to save to JSON format. However, you may pass data through a filter which makes
|
||||
it JSON compliant:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
file.save:
|
||||
filter: True
|
||||
|
@ -28,7 +28,8 @@ def timeout(name, delete=0, reject=0):
|
||||
minions that have not checked in for 300 seconds (5 minutes)
|
||||
|
||||
USAGE:
|
||||
code-block:: yaml
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
statreg:
|
||||
status.reg
|
||||
|
@ -57,7 +57,7 @@ def list_(name, add, match, stamp=False, prune=0):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
foo:
|
||||
reg.list:
|
||||
@ -139,7 +139,7 @@ def clear(name):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
clearns:
|
||||
reg.clear:
|
||||
@ -160,7 +160,7 @@ def delete(name):
|
||||
|
||||
USAGE:
|
||||
|
||||
code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
deletens:
|
||||
reg.delete:
|
||||
|
@ -2607,13 +2607,16 @@ def delete_minion_cachedir(minion_id, provider, opts, base=None):
|
||||
all cachedirs to find the minion's cache file.
|
||||
Needs `update_cachedir` set to True.
|
||||
'''
|
||||
if opts.get('update_cachedir', False) is False:
|
||||
if isinstance(opts, dict):
|
||||
__opts__.update(opts)
|
||||
|
||||
if __opts__.get('update_cachedir', False) is False:
|
||||
return
|
||||
|
||||
if base is None:
|
||||
base = __opts__['cachedir']
|
||||
|
||||
driver = next(six.iterkeys(opts['providers'][provider]))
|
||||
driver = next(six.iterkeys(__opts__['providers'][provider]))
|
||||
fname = '{0}.p'.format(minion_id)
|
||||
for cachedir in 'requested', 'active':
|
||||
path = os.path.join(base, cachedir, driver, provider, fname)
|
||||
@ -2839,7 +2842,10 @@ def cache_node(node, provider, opts):
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
'''
|
||||
if 'update_cachedir' not in opts or not opts['update_cachedir']:
|
||||
if isinstance(opts, dict):
|
||||
__opts__.update(opts)
|
||||
|
||||
if 'update_cachedir' not in __opts__ or not __opts__['update_cachedir']:
|
||||
return
|
||||
|
||||
if not os.path.exists(os.path.join(__opts__['cachedir'], 'active')):
|
||||
|
@ -687,7 +687,7 @@ class SaltEvent(object):
|
||||
is_msgpacked=True,
|
||||
use_bin_type=six.PY3
|
||||
)
|
||||
log.debug('Sending event - data = {0}'.format(data))
|
||||
log.debug('Sending event: tag = {0}; data = {1}'.format(tag, data))
|
||||
if six.PY2:
|
||||
event = '{0}{1}{2}'.format(tag, tagend, serialized_data)
|
||||
else:
|
||||
|
@ -1317,7 +1317,7 @@ class Pygit2(GitProvider):
|
||||
self.repo.config.set_multivar(
|
||||
'http.sslVerify',
|
||||
'',
|
||||
self.ssl_verify
|
||||
str(self.ssl_verify).lower()
|
||||
)
|
||||
except os.error:
|
||||
# This exception occurs when two processes are trying to write
|
||||
|
@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Implements the ability to run processes as another user in Windows for salt
|
||||
Run processes as a different user in Windows
|
||||
|
||||
Based on a solution from http://stackoverflow.com/questions/29566330
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
@ -114,13 +114,25 @@ def accept(match, include_rejected=False, include_denied=False):
|
||||
return skey.accept(match, include_rejected=include_rejected, include_denied=include_denied)
|
||||
|
||||
|
||||
def accept_dict(match):
|
||||
def accept_dict(match, include_rejected=False, include_denied=False):
|
||||
'''
|
||||
Accept keys based on a dict of keys. Returns a dictionary.
|
||||
|
||||
match
|
||||
The dictionary of keys to accept.
|
||||
|
||||
include_rejected
|
||||
To include rejected keys in the match along with pending keys, set this
|
||||
to ``True``. Defaults to ``False``.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
include_denied
|
||||
To include denied keys in the match along with pending keys, set this
|
||||
to ``True``. Defaults to ``False``.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
Example to move a list of keys from the ``minions_pre`` (pending) directory
|
||||
to the ``minions`` (accepted) directory:
|
||||
|
||||
@ -137,7 +149,9 @@ def accept_dict(match):
|
||||
{'minions': ['jerry', 'stuart', 'bob']}
|
||||
'''
|
||||
skey = get_key(__opts__)
|
||||
return skey.accept(match_dict=match)
|
||||
return skey.accept(match_dict=match,
|
||||
include_rejected=include_rejected,
|
||||
include_denied=include_denied)
|
||||
|
||||
|
||||
def delete(match):
|
||||
@ -203,13 +217,25 @@ def reject(match, include_accepted=False, include_denied=False):
|
||||
return skey.reject(match, include_accepted=include_accepted, include_denied=include_denied)
|
||||
|
||||
|
||||
def reject_dict(match):
|
||||
def reject_dict(match, include_accepted=False, include_denied=False):
|
||||
'''
|
||||
Reject keys based on a dict of keys. Returns a dictionary.
|
||||
|
||||
match
|
||||
The dictionary of keys to reject.
|
||||
|
||||
include_accepted
|
||||
To include accepted keys in the match along with pending keys, set this
|
||||
to ``True``. Defaults to ``False``.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
include_denied
|
||||
To include denied keys in the match along with pending keys, set this
|
||||
to ``True``. Defaults to ``False``.
|
||||
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> wheel.cmd_async({'fun': 'key.reject_dict',
|
||||
@ -223,7 +249,9 @@ def reject_dict(match):
|
||||
{'jid': '20160826201244808521', 'tag': 'salt/wheel/20160826201244808521'}
|
||||
'''
|
||||
skey = get_key(__opts__)
|
||||
return skey.reject(match_dict=match)
|
||||
return skey.reject(match_dict=match,
|
||||
include_accepted=include_accepted,
|
||||
include_denied=include_denied)
|
||||
|
||||
|
||||
def key_str(match):
|
||||
|
BIN
tests/integration/files/file/base/custom.tar.gz
Normal file
BIN
tests/integration/files/file/base/custom.tar.gz
Normal file
Binary file not shown.
42
tests/integration/grains/core.py
Normal file
42
tests/integration/grains/core.py
Normal file
@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Test the core grains
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
if salt.utils.is_windows():
|
||||
try:
|
||||
import salt.modules.reg
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
class TestGrainsCore(integration.ModuleCase):
|
||||
'''
|
||||
Test the core grains grains
|
||||
'''
|
||||
@skipIf(not salt.utils.is_windows(), 'Only run on Windows')
|
||||
def test_win_cpu_model(self):
|
||||
'''
|
||||
test grains['cpu_model']
|
||||
'''
|
||||
opts = self.minion_opts
|
||||
cpu_model_text = salt.modules.reg.read_value(
|
||||
"HKEY_LOCAL_MACHINE",
|
||||
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
|
||||
"ProcessorNameString").get('vdata')
|
||||
self.assertEqual(
|
||||
self.run_function('grains.items')['cpu_model'],
|
||||
cpu_model_text
|
||||
)
|
@ -9,41 +9,29 @@ from __future__ import absolute_import, print_function
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath, destructiveTest
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup'
|
||||
or salt.utils.get_uid(salt.utils.get_user() != 0)), 'Test requirements not met')
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacPowerModuleTest(integration.ModuleCase):
|
||||
'''
|
||||
Validate the mac_power module
|
||||
'''
|
||||
COMPUTER_SLEEP = 0
|
||||
DISPLAY_SLEEP = 0
|
||||
HARD_DISK_SLEEP = 0
|
||||
WAKE_ON_MODEM = False
|
||||
WAKE_ON_NET = False
|
||||
RESTART_POWER = False
|
||||
RESTART_FREEZE = False
|
||||
SLEEP_ON_BUTTON = False
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Get current settings
|
||||
'''
|
||||
# Get current settings
|
||||
self.COMPUTER_SLEEP = self.run_function('power.get_computer_sleep')
|
||||
self.DISPLAY_SLEEP = self.run_function('power.get_display_sleep')
|
||||
self.HARD_DISK_SLEEP = self.run_function('power.get_harddisk_sleep')
|
||||
self.WAKE_ON_MODEM = self.run_function('power.get_wake_on_modem')
|
||||
self.WAKE_ON_NET = self.run_function('power.get_wake_on_network')
|
||||
self.RESTART_POWER = self.run_function('power.get_restart_power_failure')
|
||||
self.RESTART_FREEZE = self.run_function('power.get_restart_freeze')
|
||||
self.SLEEP_ON_BUTTON = self.run_function('power.get_sleep_on_power_button')
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
@ -52,13 +40,6 @@ class MacPowerModuleTest(integration.ModuleCase):
|
||||
self.run_function('power.set_computer_sleep', [self.COMPUTER_SLEEP])
|
||||
self.run_function('power.set_display_sleep', [self.DISPLAY_SLEEP])
|
||||
self.run_function('power.set_harddisk_sleep', [self.HARD_DISK_SLEEP])
|
||||
self.run_function('power.set_wake_on_modem', [self.WAKE_ON_MODEM])
|
||||
self.run_function('power.set_wake_on_network', [self.WAKE_ON_NET])
|
||||
self.run_function('power.set_restart_power_failure',
|
||||
[self.RESTART_POWER])
|
||||
self.run_function('power.set_restart_freeze', [self.RESTART_FREEZE])
|
||||
self.run_function('power.set_sleep_on_power_button',
|
||||
[self.SLEEP_ON_BUTTON])
|
||||
|
||||
@destructiveTest
|
||||
def test_computer_sleep(self):
|
||||
@ -144,78 +125,217 @@ class MacPowerModuleTest(integration.ModuleCase):
|
||||
'Invalid Boolean Value for Minutes',
|
||||
self.run_function('power.set_harddisk_sleep', [True]))
|
||||
|
||||
def test_wake_on_modem(self):
|
||||
'''
|
||||
Test power.get_wake_on_modem
|
||||
Test power.set_wake_on_modem
|
||||
|
||||
Commands don't seem to be supported on el capitan. Perhaps it works on
|
||||
OS X Server or older versions
|
||||
'''
|
||||
self.assertTrue(self.run_function('power.set_wake_on_modem', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_wake_on_modem'))
|
||||
self.assertTrue(self.run_function('power.set_wake_on_modem', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_wake_on_modem'))
|
||||
|
||||
def test_wake_on_network(self):
|
||||
'''
|
||||
Test power.get_wake_on_network
|
||||
Test power.set_wake_on_network
|
||||
|
||||
Commands don't seem to be supported on el capitan. Perhaps it works on
|
||||
OS X Server or older versions
|
||||
'''
|
||||
self.assertTrue(self.run_function('power.set_wake_on_network', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_wake_on_network'))
|
||||
self.assertTrue(self.run_function('power.set_wake_on_network', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_wake_on_network'))
|
||||
|
||||
def test_restart_power_failure(self):
|
||||
'''
|
||||
Test power.get_restart_power_failure
|
||||
Test power.set_restart_power_failure
|
||||
|
||||
Commands don't seem to be supported on el capitan. Perhaps it works on
|
||||
OS X Server or older versions
|
||||
'''
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_restart_power_failure', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_restart_power_failure'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_restart_power_failure', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_restart_power_failure'))
|
||||
|
||||
def test_restart_freeze(self):
|
||||
'''
|
||||
Test power.get_restart_freeze
|
||||
Test power.set_restart_freeze
|
||||
|
||||
Though the set command completes successfully, the setting isn't
|
||||
actually changed
|
||||
'''
|
||||
# Normal Functionality
|
||||
self.assertTrue(self.run_function('power.set_restart_freeze', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_restart_freeze'))
|
||||
self.assertTrue(self.run_function('power.set_restart_freeze', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_restart_freeze'))
|
||||
# This will return False because mac fails to actually make the change
|
||||
self.assertFalse(
|
||||
self.run_function('power.set_restart_freeze', ['off']))
|
||||
# Even setting to off returns true, it actually is never set
|
||||
# This is an apple bug
|
||||
self.assertTrue(self.run_function('power.get_restart_freeze'))
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacPowerModuleTestSleepOnPowerButton(integration.ModuleCase):
|
||||
'''
|
||||
Test power.get_sleep_on_power_button
|
||||
Test power.set_sleep_on_power_button
|
||||
'''
|
||||
SLEEP_ON_BUTTON = None
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Check if function is available
|
||||
Get existing value
|
||||
'''
|
||||
# Is the function available
|
||||
ret = self.run_function('power.get_sleep_on_power_button')
|
||||
if isinstance(ret, bool):
|
||||
self.SLEEP_ON_BUTTON = self.run_function(
|
||||
'power.get_sleep_on_power_button')
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Reset to original value
|
||||
'''
|
||||
if self.SLEEP_ON_BUTTON is not None:
|
||||
self.run_function(
|
||||
'power.set_sleep_on_power_button', [self.SLEEP_ON_BUTTON])
|
||||
|
||||
def test_sleep_on_power_button(self):
|
||||
'''
|
||||
Test power.get_sleep_on_power_button
|
||||
Test power.set_sleep_on_power_button
|
||||
|
||||
Commands don't seem to be supported on el capitan. Perhaps it works on
|
||||
OS X Server or older versions
|
||||
'''
|
||||
# Normal Functionality
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_sleep_on_power_button', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_sleep_on_power_button'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_sleep_on_power_button', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_sleep_on_power_button'))
|
||||
# If available on this system, test it
|
||||
if self.SLEEP_ON_BUTTON is None:
|
||||
# Check for not available
|
||||
ret = self.run_function('power.get_sleep_on_power_button')
|
||||
self.assertIn('Error', ret)
|
||||
else:
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_sleep_on_power_button', ['on']))
|
||||
self.assertTrue(
|
||||
self.run_function('power.get_sleep_on_power_button'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_sleep_on_power_button', ['off']))
|
||||
self.assertFalse(
|
||||
self.run_function('power.get_sleep_on_power_button'))
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacPowerModuleTestRestartPowerFailure(integration.ModuleCase):
|
||||
'''
|
||||
Test power.get_restart_power_failure
|
||||
Test power.set_restart_power_failure
|
||||
'''
|
||||
RESTART_POWER = None
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Check if function is available
|
||||
Get existing value
|
||||
'''
|
||||
# Is the function available
|
||||
ret = self.run_function('power.get_restart_power_failure')
|
||||
if isinstance(ret, bool):
|
||||
self.RESTART_POWER = ret
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Reset to original value
|
||||
'''
|
||||
if self.RESTART_POWER is not None:
|
||||
self.run_function(
|
||||
'power.set_sleep_on_power_button', [self.SLEEP_ON_BUTTON])
|
||||
|
||||
def test_restart_power_failure(self):
|
||||
'''
|
||||
Test power.get_restart_power_failure
|
||||
Test power.set_restart_power_failure
|
||||
'''
|
||||
# If available on this system, test it
|
||||
if self.RESTART_POWER is None:
|
||||
# Check for not available
|
||||
ret = self.run_function('power.get_restart_power_failure')
|
||||
self.assertIn('Error', ret)
|
||||
else:
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_restart_power_failure', ['on']))
|
||||
self.assertTrue(
|
||||
self.run_function('power.get_restart_power_failure'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_restart_power_failure', ['off']))
|
||||
self.assertFalse(
|
||||
self.run_function('power.get_restart_power_failure'))
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacPowerModuleTestWakeOnNet(integration.ModuleCase):
|
||||
'''
|
||||
Test power.get_wake_on_network
|
||||
Test power.set_wake_on_network
|
||||
'''
|
||||
WAKE_ON_NET = None
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Check if function is available
|
||||
Get existing value
|
||||
'''
|
||||
# Is the function available
|
||||
ret = self.run_function('power.get_wake_on_network')
|
||||
if isinstance(ret, bool):
|
||||
self.WAKE_ON_NET = ret
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Reset to original value
|
||||
'''
|
||||
if self.WAKE_ON_NET is not None:
|
||||
self.run_function('power.set_wake_on_network', [self.WAKE_ON_NET])
|
||||
|
||||
def test_wake_on_network(self):
|
||||
'''
|
||||
Test power.get_wake_on_network
|
||||
Test power.set_wake_on_network
|
||||
'''
|
||||
# If available on this system, test it
|
||||
if self.WAKE_ON_NET is None:
|
||||
# Check for not available
|
||||
ret = self.run_function('power.get_wake_on_network')
|
||||
self.assertIn('Error', ret)
|
||||
else:
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_wake_on_network', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_wake_on_network'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_wake_on_network', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_wake_on_network'))
|
||||
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacPowerModuleTestWakeOnModem(integration.ModuleCase):
|
||||
'''
|
||||
Test power.get_wake_on_modem
|
||||
Test power.set_wake_on_modem
|
||||
'''
|
||||
WAKE_ON_MODEM = None
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Check if function is available
|
||||
Get existing value
|
||||
'''
|
||||
# Is the function available
|
||||
ret = self.run_function('power.get_wake_on_modem')
|
||||
if isinstance(ret, bool):
|
||||
self.WAKE_ON_MODEM = ret
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Reset to original value
|
||||
'''
|
||||
if self.WAKE_ON_MODEM is not None:
|
||||
self.run_function('power.set_wake_on_modem', [self.WAKE_ON_MODEM])
|
||||
|
||||
def test_wake_on_modem(self):
|
||||
'''
|
||||
Test power.get_wake_on_modem
|
||||
Test power.set_wake_on_modem
|
||||
'''
|
||||
# If available on this system, test it
|
||||
if self.WAKE_ON_MODEM is None:
|
||||
# Check for not available
|
||||
ret = self.run_function('power.get_wake_on_modem')
|
||||
self.assertIn('Error', ret)
|
||||
else:
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_wake_on_modem', ['on']))
|
||||
self.assertTrue(self.run_function('power.get_wake_on_modem'))
|
||||
self.assertTrue(
|
||||
self.run_function('power.set_wake_on_modem', ['off']))
|
||||
self.assertFalse(self.run_function('power.get_wake_on_modem'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(MacPowerModuleTest)
|
||||
run_tests(MacPowerModuleTest,
|
||||
MacPowerModuleTestSleepOnPowerButton,
|
||||
MacPowerModuleTestRestartPowerFailure,
|
||||
MacPowerModuleTestWakeOnNet,
|
||||
MacPowerModuleTestWakeOnModem)
|
||||
|
@ -35,7 +35,7 @@ SET_SUBNET_NAME = __random_string()
|
||||
|
||||
@skipIf(not salt.utils.is_darwin()
|
||||
or not salt.utils.which('systemsetup')
|
||||
or salt.utils.get_uid(salt.utils.get_user() != 0), 'Test requirements not met')
|
||||
or salt.utils.get_uid(salt.utils.get_user()) != 0, 'Test requirements not met')
|
||||
class MacSystemModuleTest(integration.ModuleCase):
|
||||
'''
|
||||
Validate the mac_system module
|
||||
@ -168,6 +168,7 @@ class MacSystemModuleTest(integration.ModuleCase):
|
||||
'Invalid value passed for path.',
|
||||
self.run_function('system.set_startup_disk', ['spongebob']))
|
||||
|
||||
@skipIf(True, 'Skip this test until mac fixes it.')
|
||||
def test_get_set_restart_delay(self):
|
||||
'''
|
||||
Test system.get_restart_delay
|
||||
@ -184,7 +185,7 @@ class MacSystemModuleTest(integration.ModuleCase):
|
||||
# Pass set bad value for seconds
|
||||
self.assertIn(
|
||||
'Invalid value passed for seconds.',
|
||||
self.run_funcdtion('system.set_restart_delay', [70]))
|
||||
self.run_function('system.set_restart_delay', [70]))
|
||||
|
||||
def test_get_set_disable_keyboard_on_lock(self):
|
||||
'''
|
||||
@ -226,6 +227,7 @@ class MacSystemModuleTest(integration.ModuleCase):
|
||||
self.run_function('system.set_disable_keyboard_on_lock',
|
||||
['spongebob']))
|
||||
|
||||
@skipIf(True, 'Skip this test until mac fixes it.')
|
||||
def test_get_set_boot_arch(self):
|
||||
'''
|
||||
Test system.get_boot_arch
|
||||
|
@ -12,6 +12,8 @@ import signal
|
||||
import tempfile
|
||||
import textwrap
|
||||
import yaml
|
||||
import threading
|
||||
from salt.ext.six.moves import queue
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import skipIf
|
||||
@ -29,6 +31,14 @@ class StateRunnerTest(integration.ShellCase):
|
||||
'''
|
||||
Test the state runner.
|
||||
'''
|
||||
def add_to_queue(self, q, cmd):
|
||||
'''
|
||||
helper method to add salt-run
|
||||
return data to a queue
|
||||
'''
|
||||
ret = self.run_run(cmd)
|
||||
q.put(ret)
|
||||
q.task_done()
|
||||
|
||||
def test_orchestrate_output(self):
|
||||
'''
|
||||
@ -58,6 +68,26 @@ class StateRunnerTest(integration.ShellCase):
|
||||
for item in good_out:
|
||||
self.assertIn(item, ret_output)
|
||||
|
||||
def test_state_event(self):
|
||||
'''
|
||||
test to ensure state.event
|
||||
runner returns correct data
|
||||
'''
|
||||
q = queue.Queue(maxsize=0)
|
||||
|
||||
cmd = 'state.event salt/job/*/new count=1'
|
||||
expect = '"minions": ["minion"]'
|
||||
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
|
||||
server_thread.setDaemon(True)
|
||||
server_thread.start()
|
||||
|
||||
while q.empty():
|
||||
self.run_salt('minion test.ping --static')
|
||||
out = q.get()
|
||||
self.assertIn(expect, str(out))
|
||||
|
||||
server_thread.join()
|
||||
|
||||
|
||||
@skipIf(salt.utils.is_windows(), '*NIX-only test')
|
||||
class OrchEventTest(integration.ShellCase):
|
||||
|
110
tests/integration/states/archive.py
Normal file
110
tests/integration/states/archive.py
Normal file
@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Tests for the archive state
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import shutil
|
||||
import threading
|
||||
import tornado.ioloop
|
||||
import tornado.web
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
|
||||
STATE_DIR = os.path.join(integration.FILES, 'file', 'base')
|
||||
if salt.utils.is_windows():
|
||||
ARCHIVE_DIR = os.path.join("c:/", "tmp")
|
||||
else:
|
||||
ARCHIVE_DIR = '/tmp/archive/'
|
||||
|
||||
PORT = 9999
|
||||
ARCHIVE_TAR_SOURCE = 'http://localhost:{0}/custom.tar.gz'.format(PORT)
|
||||
UNTAR_FILE = ARCHIVE_DIR + 'custom/README'
|
||||
ARCHIVE_TAR_HASH = 'md5=7643861ac07c30fe7d2310e9f25ca514'
|
||||
STATE_DIR = os.path.join(integration.FILES, 'file', 'base')
|
||||
|
||||
|
||||
class SetupWebServer(TestCase):
|
||||
'''
|
||||
Setup and Teardown of Web Server
|
||||
Only need to set this up once not
|
||||
before all tests
|
||||
'''
|
||||
@classmethod
|
||||
def webserver(cls):
|
||||
'''
|
||||
method to start tornado
|
||||
static web app
|
||||
'''
|
||||
application = tornado.web.Application([(r"/(.*)", tornado.web.StaticFileHandler,
|
||||
{"path": STATE_DIR})])
|
||||
application.listen(PORT)
|
||||
tornado.ioloop.IOLoop.instance().start()
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.server_thread = threading.Thread(target=cls.webserver)
|
||||
cls.server_thread.daemon = True
|
||||
cls.server_thread.start()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
tornado.ioloop.IOLoop.instance().stop()
|
||||
cls.server_thread.join()
|
||||
|
||||
|
||||
class ArchiveTest(SetupWebServer,
|
||||
integration.ModuleCase,
|
||||
integration.SaltReturnAssertsMixIn):
|
||||
'''
|
||||
Validate the archive state
|
||||
'''
|
||||
def _check_ext_remove(self, dir, file):
|
||||
'''
|
||||
function to check if file was extracted
|
||||
and remove the directory.
|
||||
'''
|
||||
# check to see if it extracted
|
||||
check_dir = os.path.isfile(file)
|
||||
self.assertTrue(check_dir)
|
||||
|
||||
# wipe away dir. Can't do this in teardown
|
||||
# because it needs to be wiped before each test
|
||||
shutil.rmtree(dir)
|
||||
|
||||
def test_archive_extracted_skip_verify(self):
|
||||
'''
|
||||
test archive.extracted with skip_verify
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_TAR_SOURCE, archive_format='tar',
|
||||
skip_verify=True)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_ext_remove(ARCHIVE_DIR, UNTAR_FILE)
|
||||
|
||||
def test_archive_extracted_with_source_hash(self):
|
||||
'''
|
||||
test archive.extracted without skip_verify
|
||||
only external resources work to check to
|
||||
ensure source_hash is verified correctly
|
||||
'''
|
||||
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
|
||||
source=ARCHIVE_TAR_SOURCE, archive_format='tar',
|
||||
source_hash=ARCHIVE_TAR_HASH)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self._check_ext_remove(ARCHIVE_DIR, UNTAR_FILE)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(ArchiveTest)
|
@ -8,6 +8,7 @@
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
try:
|
||||
import libcloud.security
|
||||
HAS_LIBCLOUD = True
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
try:
|
||||
import libcloud.security
|
||||
HAS_LIBCLOUD = True
|
||||
|
@ -20,6 +20,8 @@ from salt.exceptions import SaltCloudSystemExit, SaltCloudNotFound
|
||||
# Global Variables
|
||||
opennebula.__active_provider_name__ = ''
|
||||
opennebula.__opts__ = {}
|
||||
opennebula.__utils__ = {}
|
||||
opennebula.__utils__['cloud.cache_node'] = MagicMock()
|
||||
VM_NAME = 'my-vm'
|
||||
|
||||
|
||||
@ -761,7 +763,6 @@ class OpenNebulaTestCase(TestCase):
|
||||
|
||||
@patch('salt.cloud.clouds.opennebula._get_node',
|
||||
MagicMock(return_value={'my-vm': {'name': 'my-vm', 'id': 0}}))
|
||||
@patch('salt.utils.cloud.cache_node', MagicMock())
|
||||
def test_show_instance_success(self):
|
||||
'''
|
||||
Tests that the node was found successfully.
|
||||
|
194
tests/unit/conf_test.py
Normal file
194
tests/unit/conf_test.py
Normal file
@ -0,0 +1,194 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Unit tests for the files in the salt/conf directory.
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf, TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import (
|
||||
NO_MOCK,
|
||||
NO_MOCK_REASON,
|
||||
)
|
||||
|
||||
ensure_in_syspath('../')
|
||||
|
||||
# Import Salt libs
|
||||
import salt.config
|
||||
|
||||
SAMPLE_CONF_DIR = os.path.dirname(os.path.realpath(__file__)).split('tests')[0] + 'conf/'
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class ConfTest(TestCase):
|
||||
'''
|
||||
Validate files in the salt/conf directory.
|
||||
'''
|
||||
|
||||
def test_conf_master_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/master must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
master_config = SAMPLE_CONF_DIR + 'master'
|
||||
ret = salt.config._read_conf_file(master_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
master_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_minion_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/minion must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
minion_config = SAMPLE_CONF_DIR + 'minion'
|
||||
ret = salt.config._read_conf_file(minion_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
minion_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/cloud must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_config = SAMPLE_CONF_DIR + 'cloud'
|
||||
ret = salt.config._read_conf_file(cloud_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
cloud_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_profiles_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/cloud.profiles must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_profiles_config = SAMPLE_CONF_DIR + 'cloud.profiles'
|
||||
ret = salt.config._read_conf_file(cloud_profiles_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
cloud_profiles_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_providers_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/cloud.providers must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_providers_config = SAMPLE_CONF_DIR + 'cloud.providers'
|
||||
ret = salt.config._read_conf_file(cloud_providers_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
cloud_providers_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_proxy_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/proxy must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
proxy_config = SAMPLE_CONF_DIR + 'proxy'
|
||||
ret = salt.config._read_conf_file(proxy_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
proxy_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_roster_sample_is_commented(self):
|
||||
'''
|
||||
The sample config file located in salt/conf/roster must be completely
|
||||
commented out. This test checks for any lines that are not commented or blank.
|
||||
'''
|
||||
roster_config = SAMPLE_CONF_DIR + 'roster'
|
||||
ret = salt.config._read_conf_file(roster_config)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
roster_config
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_profiles_d_files_are_commented(self):
|
||||
'''
|
||||
All cloud profile sample configs in salt/conf/cloud.profiles.d/* must be completely
|
||||
commented out. This test loops through all of the files in that directory to check
|
||||
for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.profiles.d/')
|
||||
for conf_file in cloud_sample_files:
|
||||
profile_conf = SAMPLE_CONF_DIR + 'cloud.profiles.d/' + conf_file
|
||||
ret = salt.config._read_conf_file(profile_conf)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
conf_file
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_providers_d_files_are_commented(self):
|
||||
'''
|
||||
All cloud profile sample configs in salt/conf/cloud.providers.d/* must be completely
|
||||
commented out. This test loops through all of the files in that directory to check
|
||||
for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.providers.d/')
|
||||
for conf_file in cloud_sample_files:
|
||||
provider_conf = SAMPLE_CONF_DIR + 'cloud.providers.d/' + conf_file
|
||||
ret = salt.config._read_conf_file(provider_conf)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
conf_file
|
||||
)
|
||||
)
|
||||
|
||||
def test_conf_cloud_maps_d_files_are_commented(self):
|
||||
'''
|
||||
All cloud profile sample configs in salt/conf/cloud.maps.d/* must be completely
|
||||
commented out. This test loops through all of the files in that directory to check
|
||||
for any lines that are not commented or blank.
|
||||
'''
|
||||
cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.maps.d/')
|
||||
for conf_file in cloud_sample_files:
|
||||
map_conf = SAMPLE_CONF_DIR + 'cloud.maps.d/' + conf_file
|
||||
ret = salt.config._read_conf_file(map_conf)
|
||||
self.assertEqual(
|
||||
ret,
|
||||
{},
|
||||
'Sample config file \'{0}\' must be commented out.'.format(
|
||||
conf_file
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(ConfTest, needs_daemon=False)
|
@ -5,12 +5,13 @@
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
import os
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock
|
||||
|
||||
# Import salt libs
|
||||
from salt import minion
|
||||
@ -52,6 +53,82 @@ class MinionTestCase(TestCase):
|
||||
result = False
|
||||
self.assertTrue(result)
|
||||
|
||||
# Tests for _handle_decoded_payload in the salt.minion.Minion() class: 3
|
||||
|
||||
def test_handle_decoded_payload_jid_match_in_jid_queue(self):
|
||||
'''
|
||||
Tests that the _handle_decoded_payload function returns when a jid is given that is already present
|
||||
in the jid_queue.
|
||||
|
||||
Note: This test doesn't contain all of the patch decorators above the function like the other tests
|
||||
for _handle_decoded_payload below. This is essential to this test as the call to the function must
|
||||
return None BEFORE any of the processes are spun up because we should be avoiding firing duplicate
|
||||
jobs.
|
||||
'''
|
||||
mock_opts = {'cachedir': '',
|
||||
'extension_modules': ''}
|
||||
mock_data = {'fun': 'foo.bar',
|
||||
'jid': 123}
|
||||
mock_jid_queue = [123]
|
||||
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue))
|
||||
ret = minion._handle_decoded_payload(mock_data)
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@patch('salt.minion.Minion.ctx', MagicMock(return_value={}))
|
||||
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True))
|
||||
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True))
|
||||
def test_handle_decoded_payload_jid_queue_addition(self):
|
||||
'''
|
||||
Tests that the _handle_decoded_payload function adds a jid to the minion's jid_queue when the new
|
||||
jid isn't already present in the jid_queue.
|
||||
'''
|
||||
mock_jid = 11111
|
||||
mock_opts = {'cachedir': '',
|
||||
'extension_modules': '',
|
||||
'minion_jid_queue_hwm': 100}
|
||||
mock_data = {'fun': 'foo.bar',
|
||||
'jid': mock_jid}
|
||||
mock_jid_queue = [123, 456]
|
||||
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue))
|
||||
|
||||
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
|
||||
# This can help debug any test failures if the _handle_decoded_payload call fails.
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
|
||||
# Call the _handle_decoded_payload function and update the mock_jid_queue to include the new
|
||||
# mock_jid. The mock_jid should have been added to the jid_queue since the mock_jid wasn't
|
||||
# previously included. The minion's jid_queue attribute and the mock_jid_queue should be equal.
|
||||
minion._handle_decoded_payload(mock_data)
|
||||
mock_jid_queue.append(mock_jid)
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
|
||||
@patch('salt.minion.Minion.ctx', MagicMock(return_value={}))
|
||||
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.start', MagicMock(return_value=True))
|
||||
@patch('salt.utils.process.SignalHandlingMultiprocessingProcess.join', MagicMock(return_value=True))
|
||||
def test_handle_decoded_payload_jid_queue_reduced_minion_jid_queue_hwm(self):
|
||||
'''
|
||||
Tests that the _handle_decoded_payload function removes a jid from the minion's jid_queue when the
|
||||
minion's jid_queue high water mark (minion_jid_queue_hwm) is hit.
|
||||
'''
|
||||
mock_opts = {'cachedir': '',
|
||||
'extension_modules': '',
|
||||
'minion_jid_queue_hwm': 2}
|
||||
mock_data = {'fun': 'foo.bar',
|
||||
'jid': 789}
|
||||
mock_jid_queue = [123, 456]
|
||||
minion = salt.minion.Minion(mock_opts, jid_queue=copy.copy(mock_jid_queue))
|
||||
|
||||
# Assert that the minion's jid_queue attribute matches the mock_jid_queue as a baseline
|
||||
# This can help debug any test failures if the _handle_decoded_payload call fails.
|
||||
self.assertEqual(minion.jid_queue, mock_jid_queue)
|
||||
|
||||
# Call the _handle_decoded_payload function and check that the queue is smaller by one item
|
||||
# and contains the new jid
|
||||
minion._handle_decoded_payload(mock_data)
|
||||
self.assertEqual(len(minion.jid_queue), 2)
|
||||
self.assertEqual(minion.jid_queue, [456, 789])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
|
@ -88,6 +88,33 @@ class ParallelsTestCase(TestCase):
|
||||
|
||||
self.assertEqual(parallels._find_guids(guid_str), guids)
|
||||
|
||||
def test_prlsrvctl(self):
|
||||
'''
|
||||
Test parallels.prlsrvctl
|
||||
'''
|
||||
runas = 'macdev'
|
||||
|
||||
# Validate 'prlsrvctl info'
|
||||
info_cmd = ['prlsrvctl', 'info']
|
||||
info_fcn = MagicMock()
|
||||
with patch.dict(parallels.__salt__, {'cmd.run': info_fcn}):
|
||||
parallels.prlsrvctl('info', runas=runas)
|
||||
info_fcn.assert_called_once_with(info_cmd, runas=runas)
|
||||
|
||||
# Validate 'prlsrvctl usb list'
|
||||
usb_cmd = ['prlsrvctl', 'usb', 'list']
|
||||
usb_fcn = MagicMock()
|
||||
with patch.dict(parallels.__salt__, {'cmd.run': usb_fcn}):
|
||||
parallels.prlsrvctl('usb', 'list', runas=runas)
|
||||
usb_fcn.assert_called_once_with(usb_cmd, runas=runas)
|
||||
|
||||
# Validate 'prlsrvctl set "--mem-limit auto"'
|
||||
set_cmd = ['prlsrvctl', 'set', '--mem-limit', 'auto']
|
||||
set_fcn = MagicMock()
|
||||
with patch.dict(parallels.__salt__, {'cmd.run': set_fcn}):
|
||||
parallels.prlsrvctl('set', '--mem-limit auto', runas=runas)
|
||||
set_fcn.assert_called_once_with(set_cmd, runas=runas)
|
||||
|
||||
def test_prlctl(self):
|
||||
'''
|
||||
Test parallels.prlctl
|
||||
@ -101,13 +128,20 @@ class ParallelsTestCase(TestCase):
|
||||
parallels.prlctl('user', 'list', runas=runas)
|
||||
user_fcn.assert_called_once_with(user_cmd, runas=runas)
|
||||
|
||||
# Validate 'prlctl exec macvm uname'
|
||||
# Validate 'prlctl exec "macvm uname"'
|
||||
exec_cmd = ['prlctl', 'exec', 'macvm', 'uname']
|
||||
exec_fcn = MagicMock()
|
||||
with patch.dict(parallels.__salt__, {'cmd.run': exec_fcn}):
|
||||
parallels.prlctl('exec', 'macvm uname', runas=runas)
|
||||
exec_fcn.assert_called_once_with(exec_cmd, runas=runas)
|
||||
|
||||
# Validate 'prlctl capture "macvm --file macvm.display.png"'
|
||||
cap_cmd = ['prlctl', 'capture', 'macvm', '--file', 'macvm.display.png']
|
||||
cap_fcn = MagicMock()
|
||||
with patch.dict(parallels.__salt__, {'cmd.run': cap_fcn}):
|
||||
parallels.prlctl('capture', 'macvm --file macvm.display.png', runas=runas)
|
||||
cap_fcn.assert_called_once_with(cap_cmd, runas=runas)
|
||||
|
||||
def test_list_vms(self):
|
||||
'''
|
||||
Test parallels.list_vms
|
||||
@ -125,9 +159,17 @@ class ParallelsTestCase(TestCase):
|
||||
with patch.object(parallels, 'prlctl', mock_name):
|
||||
parallels.list_vms(name='macvm', runas=runas)
|
||||
mock_name.assert_called_once_with('list',
|
||||
['--info', 'macvm'],
|
||||
['macvm'],
|
||||
runas=runas)
|
||||
|
||||
# Validate listing templates
|
||||
mock_templ = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_templ):
|
||||
parallels.list_vms(template=True, runas=runas)
|
||||
mock_templ.assert_called_once_with('list',
|
||||
['--template'],
|
||||
runas=runas)
|
||||
|
||||
# Validate listing extra info
|
||||
mock_info = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_info):
|
||||
@ -144,6 +186,87 @@ class ParallelsTestCase(TestCase):
|
||||
['-o', 'uuid,status', '--all'],
|
||||
runas=runas)
|
||||
|
||||
def test_clone(self):
|
||||
'''
|
||||
Test parallels.clone
|
||||
'''
|
||||
name = 'macvm'
|
||||
runas = 'macdev'
|
||||
|
||||
# Validate clone
|
||||
mock_clone = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_clone):
|
||||
parallels.clone(name, 'macvm_new', runas=runas)
|
||||
mock_clone.assert_called_once_with('clone',
|
||||
[name, '--name', 'macvm_new'],
|
||||
runas=runas)
|
||||
|
||||
# Validate linked clone
|
||||
mock_linked = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_linked):
|
||||
parallels.clone(name, 'macvm_link', linked=True, runas=runas)
|
||||
mock_linked.assert_called_once_with('clone',
|
||||
[name, '--name', 'macvm_link', '--linked'],
|
||||
runas=runas)
|
||||
|
||||
# Validate template clone
|
||||
mock_template = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_template):
|
||||
parallels.clone(name, 'macvm_templ', template=True, runas=runas)
|
||||
mock_template.assert_called_once_with('clone',
|
||||
[name, '--name', 'macvm_templ', '--template'],
|
||||
runas=runas)
|
||||
|
||||
def test_delete(self):
|
||||
'''
|
||||
Test parallels.delete
|
||||
'''
|
||||
name = 'macvm'
|
||||
runas = 'macdev'
|
||||
|
||||
# Validate delete
|
||||
mock_delete = MagicMock()
|
||||
with patch.object(parallels, 'prlctl', mock_delete):
|
||||
parallels.delete(name, runas=runas)
|
||||
mock_delete.assert_called_once_with('delete', name, runas=runas)
|
||||
|
||||
def test_exists(self):
|
||||
'''
|
||||
Test parallels.exists
|
||||
'''
|
||||
name = 'macvm'
|
||||
runas = 'macdev'
|
||||
|
||||
# Validate exists
|
||||
mock_list = MagicMock(return_value='Name: {0}\nState: running'.format(name))
|
||||
with patch.object(parallels, 'list_vms', mock_list):
|
||||
self.assertTrue(parallels.exists(name, runas=runas))
|
||||
|
||||
# Validate not exists
|
||||
mock_list = MagicMock(return_value='Name: {0}\nState: running'.format(name))
|
||||
with patch.object(parallels, 'list_vms', mock_list):
|
||||
self.assertFalse(parallels.exists('winvm', runas=runas))
|
||||
|
||||
def test_state(self):
|
||||
'''
|
||||
Test parallels.state
|
||||
'''
|
||||
name = 'macvm'
|
||||
runas = 'macdev'
|
||||
|
||||
# Validate state
|
||||
mock_list = MagicMock(return_value='Name: {0}\nState: cantering'.format(name))
|
||||
with patch.object(parallels, 'list_vms', mock_list):
|
||||
self.assertEqual(parallels.state(name, runas=runas), 'cantering')
|
||||
|
||||
# Validate cannot find state
|
||||
mock_list = MagicMock(return_value='Name: {0}\nFavorite Color: unknown'.format(name))
|
||||
mock_log_error = MagicMock()
|
||||
with patch.object(parallels, 'list_vms', mock_list):
|
||||
with patch.object(parallels.log, 'error', mock_log_error):
|
||||
self.assertEqual(parallels.state(name, runas=runas), '')
|
||||
mock_log_error.assert_called_once_with('Cannot find state of VM named {0}'.format(name))
|
||||
|
||||
def test_start(self):
|
||||
'''
|
||||
Test parallels.start
|
||||
|
67
tests/unit/output/json_out_test.py
Normal file
67
tests/unit/output/json_out_test.py
Normal file
@ -0,0 +1,67 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
unittests for json outputter
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
from salt.output import json_out as json
|
||||
|
||||
|
||||
class JsonTestCase(TestCase):
|
||||
'''
|
||||
Test cases for salt.output.json_out
|
||||
'''
|
||||
def setUp(self):
|
||||
json.__opts__ = {}
|
||||
self.data = {'test': 'two', 'example': 'one'}
|
||||
|
||||
def test_default_output(self):
|
||||
ret = json.output(self.data)
|
||||
expect = '{\n "test": "two", \n "example": "one"\n}'
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
def test_pretty_output(self):
|
||||
json.__opts__['output_indent'] = 'pretty'
|
||||
ret = json.output(self.data)
|
||||
expect = '{\n "example": "one", \n "test": "two"\n}'
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
def test_indent_output(self):
|
||||
json.__opts__['output_indent'] = 2
|
||||
expect = '{\n "test": "two", \n "example": "one"\n}'
|
||||
ret = json.output(self.data)
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
def test_negative_zero_output(self):
|
||||
json.__opts__['output_indent'] = 0
|
||||
expect = '{\n"test": "two", \n"example": "one"\n}'
|
||||
ret = json.output(self.data)
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
def test_negative_int_output(self):
|
||||
json.__opts__['output_indent'] = -1
|
||||
expect = '{"test": "two", "example": "one"}'
|
||||
ret = json.output(self.data)
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
def test_unicode_output(self):
|
||||
json.__opts__['output_indent'] = 'pretty'
|
||||
data = {'test': '\xe1', 'example': 'one'}
|
||||
expect = ('{"message": "\'utf8\' codec can\'t decode byte 0xe1 in position 0: unexpected end of data", '
|
||||
'"error": "Unable to serialize output to json"}')
|
||||
ret = json.output(data)
|
||||
self.assertEqual(expect, ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from integration import run_tests
|
||||
run_tests(JsonTestCase, needs_daemon=False)
|
Loading…
Reference in New Issue
Block a user