mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 17:33:54 +00:00
Merge branch 'develop' into 2747
This commit is contained in:
commit
f47686bedb
5
.ci/docs
5
.ci/docs
@ -1,8 +1,11 @@
|
||||
pipeline {
|
||||
agent { label 'docs' }
|
||||
agent {
|
||||
label 'docs'
|
||||
}
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
timeout(time: 2, unit: 'HOURS')
|
||||
}
|
||||
environment {
|
||||
PYENV_ROOT = "/usr/local/pyenv"
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py2"
|
||||
TEST_PLATFORM = "centos-7"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=centos-7',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py3"
|
||||
TEST_PLATFORM = "centos-7"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=centos-7',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py2"
|
||||
TEST_PLATFORM = "ubuntu-1604"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=ubuntu-1604',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if ( currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py3"
|
||||
TEST_PLATFORM = "ubuntu-1604"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=ubuntu-1604',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py2"
|
||||
TEST_PLATFORM = "windows-2016"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py2',
|
||||
'TEST_PLATFORM=windows-2016',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,73 +1,82 @@
|
||||
pipeline {
|
||||
agent { label 'kitchen-slave' }
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
|
||||
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
|
||||
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
|
||||
RBENV_VERSION = "2.4.2"
|
||||
TEST_SUITE = "py3"
|
||||
TEST_PLATFORM = "windows-2016"
|
||||
PY_COLORS = 1
|
||||
}
|
||||
stages {
|
||||
stage('github-pending') {
|
||||
steps {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
stage('setup') {
|
||||
steps {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
}
|
||||
stage('run kitchen') {
|
||||
steps {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
timeout(time: 6, unit: 'HOURS') {
|
||||
node('kitchen-slave') {
|
||||
timestamps {
|
||||
ansiColor('xterm') {
|
||||
withEnv([
|
||||
'SALT_KITCHEN_PLATFORMS=/var/jenkins/workspace/platforms.yml',
|
||||
'SALT_KITCHEN_DRIVER=/var/jenkins/workspace/driver.yml',
|
||||
'PATH=/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin',
|
||||
'RBENV_VERSION=2.4.2',
|
||||
'TEST_SUITE=py3',
|
||||
'TEST_PLATFORM=windows-2016',
|
||||
'PY_COLORS=1',
|
||||
]) {
|
||||
stage('checkout-scm') {
|
||||
cleanWs notFailBuild: true
|
||||
checkout scm
|
||||
}
|
||||
}}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
try {
|
||||
stage('github-pending') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
|
||||
status: 'PENDING',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
stage('setup-bundle') {
|
||||
sh 'bundle install --with ec2 windows --without opennebula docker'
|
||||
}
|
||||
try {
|
||||
stage('run kitchen') {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
|
||||
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
stage('cleanup kitchen') {
|
||||
script {
|
||||
withCredentials([
|
||||
[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']
|
||||
]) {
|
||||
sshagent(credentials: ['jenkins-testing-ssh-key']) {
|
||||
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
|
||||
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
|
||||
}
|
||||
}
|
||||
}
|
||||
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/minion'
|
||||
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
currentBuild.result = 'FAILURE'
|
||||
} finally {
|
||||
try {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
} finally {
|
||||
cleanWs notFailBuild: true
|
||||
def currentResult = currentBuild.result ?: 'SUCCESS'
|
||||
if (currentResult == 'SUCCESS') {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
} else {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
junit 'artifacts/xml-unittests-output/*.xml'
|
||||
cleanWs()
|
||||
}
|
||||
success {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
|
||||
status: 'SUCCESS',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
failure {
|
||||
githubNotify credentialsId: 'test-jenkins-credentials',
|
||||
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
|
||||
status: 'FAILURE',
|
||||
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
.ci/lint
1
.ci/lint
@ -3,6 +3,7 @@ pipeline {
|
||||
options {
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
timeout(time: 1, unit: 'HOURS')
|
||||
}
|
||||
environment {
|
||||
PYENV_ROOT = "/usr/local/pyenv"
|
||||
|
4
.github/stale.yml
vendored
4
.github/stale.yml
vendored
@ -1,8 +1,8 @@
|
||||
# Probot Stale configuration file
|
||||
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
# 550 is approximately 1 year and 6 months
|
||||
daysUntilStale: 550
|
||||
# 540 is approximately 1 year and 6 months
|
||||
daysUntilStale: 540
|
||||
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
|
@ -5743,8 +5743,8 @@ Default: False
|
||||
.sp
|
||||
Turning on the master stats enables runtime throughput and statistics events
|
||||
to be fired from the master event bus. These events will report on what
|
||||
functions have been run on the master and how long these runs have, on
|
||||
average, taken over a given period of time.
|
||||
functions have been run on the master along with their average latency and
|
||||
duration, taken over a given period of time.
|
||||
.SS \fBmaster_stats_event_iter\fP
|
||||
.sp
|
||||
Default: 60
|
||||
|
@ -887,8 +887,8 @@ Default: False
|
||||
|
||||
Turning on the master stats enables runtime throughput and statistics events
|
||||
to be fired from the master event bus. These events will report on what
|
||||
functions have been run on the master and how long these runs have, on
|
||||
average, taken over a given period of time.
|
||||
functions have been run on the master along with their average latency and
|
||||
duration, taken over a given period of time.
|
||||
|
||||
.. conf_master:: master_stats_event_iter
|
||||
|
||||
|
@ -752,6 +752,30 @@ Statically assigns grains to the minion.
|
||||
cabinet: 13
|
||||
cab_u: 14-15
|
||||
|
||||
.. conf_minion:: grains_blacklist
|
||||
|
||||
``grains_blacklist``
|
||||
--------------------
|
||||
|
||||
Default: ``[]``
|
||||
|
||||
Each grains key will be compared against each of the expressions in this list.
|
||||
Any keys which match will be filtered from the grains. Exact matches, glob
|
||||
matches, and regular expressions are supported.
|
||||
|
||||
.. note::
|
||||
Some states and execution modules depend on grains. Filtering may cause
|
||||
them to be unavailable or run unreliably.
|
||||
|
||||
.. versionadded:: Neon
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
grains_blacklist:
|
||||
- cpu_flags
|
||||
- zmq*
|
||||
- ipv[46]
|
||||
|
||||
.. conf_minion:: grains_cache
|
||||
|
||||
``grains_cache``
|
||||
|
@ -259,10 +259,6 @@ A State Module must return a dict containing the following keys/values:
|
||||
Prefer to keep line lengths short (use multiple lines as needed),
|
||||
and end with punctuation (e.g. a period) to delimit multiple comments.
|
||||
|
||||
The return data can also, include the **pchanges** key, this stands for
|
||||
`predictive changes`. The **pchanges** key informs the State system what
|
||||
changes are predicted to occur.
|
||||
|
||||
.. note::
|
||||
|
||||
States should not return data which cannot be serialized such as frozensets.
|
||||
@ -448,7 +444,6 @@ Example state module
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': '',
|
||||
'pchanges': {},
|
||||
}
|
||||
|
||||
# Start with basic error-checking. Do all the passed parameters make sense
|
||||
@ -469,7 +464,7 @@ Example state module
|
||||
# in ``test=true`` mode.
|
||||
if __opts__['test'] == True:
|
||||
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
|
||||
ret['pchanges'] = {
|
||||
ret['changes'] = {
|
||||
'old': current_state,
|
||||
'new': 'Description, diff, whatever of the new state',
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
Matchers
|
||||
========
|
||||
|
||||
.. versionadded:: Flourine
|
||||
.. versionadded:: Neon
|
||||
|
||||
Matchers are modules that provide Salt's targeting abilities. As of the
|
||||
Flourine release, matchers can be dynamically loaded. Currently new matchers
|
||||
|
@ -165,6 +165,31 @@ New output:
|
||||
0
|
||||
|
||||
|
||||
State Changes
|
||||
=============
|
||||
|
||||
- The :py:func:`file.rename <salt.states.file.rename>` state will now return a
|
||||
``True`` result (and make no changes) when the destination file already
|
||||
exists, and ``Force`` is not set to ``True``. In previous releases, a
|
||||
``False`` result would be returned, but this meant that subsequent runs of
|
||||
the state would fail due to the destination file being present.
|
||||
|
||||
- The ``onchanges`` and ``prereq`` :ref:`requisites <requisites>` now behave
|
||||
properly in test mode.
|
||||
|
||||
Module Changes
|
||||
==============
|
||||
|
||||
- The :py:func:`debian_ip <salt.modules.debian_ip>` module used by the
|
||||
:py:func:`network.managed <salt.states.network.managed>` state has been
|
||||
heavily refactored. The order that options appear in inet/inet6 blocks may
|
||||
produce cosmetic changes. Many options without an 'ipvX' prefix will now be
|
||||
shared between inet and inet6 blocks. The options ``enable_ipv4`` and
|
||||
``enabled_ipv6`` will now fully remove relevant inet/inet6 blocks. Overriding
|
||||
options by prefixing them with 'ipvX' will now work with most options (i.e.
|
||||
``dns`` can be overriden by ``ipv4dns`` or ``ipv6dns``). The ``proto`` option
|
||||
is now required.
|
||||
|
||||
Salt Cloud Features
|
||||
===================
|
||||
|
||||
@ -212,3 +237,13 @@ Module Deprecations
|
||||
- Support for the ``ssh.recv_known_host`` function has been removed. Please use the
|
||||
:py:func:`ssh.recv_known_host_entries <salt.modules.ssh.recv_known_host_entries>`
|
||||
function instead.
|
||||
|
||||
State Deprecations
|
||||
------------------
|
||||
|
||||
- The :py:mod:`win_servermanager <salt.states.win_servermanager>` state has been
|
||||
changed as follows:
|
||||
|
||||
- Support for the ``force`` kwarg has been removed from the
|
||||
:py:func:`win_servermanager.installed <salt.statues.win_servermanager.installed>`
|
||||
function. Please use ``recurse`` instead.
|
||||
|
@ -23,7 +23,7 @@ Code Names
|
||||
To distinguish future releases from the current release, code names are used.
|
||||
The periodic table is used to derive the next codename. The first release in
|
||||
the date based system was code named ``Hydrogen``, each subsequent release will
|
||||
go to the next `atomic number <https://en.wikipedia.org/wiki/List_of_elements>`.
|
||||
go to the next `atomic number <https://en.wikipedia.org/wiki/List_of_elements>`_.
|
||||
|
||||
Assigned codenames:
|
||||
|
||||
@ -36,6 +36,8 @@ Assigned codenames:
|
||||
- Nitrogen: ``2017.7.0``
|
||||
- Oxygen: ``2018.3.0``
|
||||
- Fluorine: ``TBD``
|
||||
- Neon: ``TBD``
|
||||
- Sodium: ``TBD``
|
||||
|
||||
Example
|
||||
-------
|
||||
|
65
salt/cli/support/__init__.py
Normal file
65
salt/cli/support/__init__.py
Normal file
@ -0,0 +1,65 @@
|
||||
# coding=utf-8
|
||||
'''
|
||||
Get default scenario of the support.
|
||||
'''
|
||||
from __future__ import print_function, unicode_literals, absolute_import
|
||||
import yaml
|
||||
import os
|
||||
import salt.exceptions
|
||||
import jinja2
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _render_profile(path, caller, runner):
|
||||
'''
|
||||
Render profile as Jinja2.
|
||||
:param path:
|
||||
:return:
|
||||
'''
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False)
|
||||
return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip()
|
||||
|
||||
|
||||
def get_profile(profile, caller, runner):
|
||||
'''
|
||||
Get profile.
|
||||
|
||||
:param profile:
|
||||
:return:
|
||||
'''
|
||||
profiles = profile.split(',')
|
||||
data = {}
|
||||
for profile in profiles:
|
||||
if os.path.basename(profile) == profile:
|
||||
profile = profile.split('.')[0] # Trim extension if someone added it
|
||||
profile_path = os.path.join(os.path.dirname(__file__), 'profiles', profile + '.yml')
|
||||
else:
|
||||
profile_path = profile
|
||||
if os.path.exists(profile_path):
|
||||
try:
|
||||
rendered_template = _render_profile(profile_path, caller, runner)
|
||||
log.trace('\n{d}\n{t}\n{d}\n'.format(d='-' * 80, t=rendered_template))
|
||||
data.update(yaml.load(rendered_template))
|
||||
except Exception as ex:
|
||||
log.debug(ex, exc_info=True)
|
||||
raise salt.exceptions.SaltException('Rendering profile failed: {}'.format(ex))
|
||||
else:
|
||||
raise salt.exceptions.SaltException('Profile "{}" is not found.'.format(profile))
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_profiles(config):
|
||||
'''
|
||||
Get available profiles.
|
||||
|
||||
:return:
|
||||
'''
|
||||
profiles = []
|
||||
for profile_name in os.listdir(os.path.join(os.path.dirname(__file__), 'profiles')):
|
||||
if profile_name.endswith('.yml'):
|
||||
profiles.append(profile_name.split('.')[0])
|
||||
|
||||
return sorted(profiles)
|
495
salt/cli/support/collector.py
Normal file
495
salt/cli/support/collector.py
Normal file
@ -0,0 +1,495 @@
|
||||
# coding=utf-8
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import copy
|
||||
import yaml
|
||||
import json
|
||||
import logging
|
||||
import tarfile
|
||||
import time
|
||||
import salt.ext.six as six
|
||||
|
||||
if six.PY2:
|
||||
import exceptions
|
||||
else:
|
||||
import builtins as exceptions
|
||||
from io import IOBase as file
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.parsers
|
||||
import salt.utils.verify
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.exceptions
|
||||
import salt.defaults.exitcodes
|
||||
import salt.cli.caller
|
||||
import salt.cli.support
|
||||
import salt.cli.support.console
|
||||
import salt.cli.support.intfunc
|
||||
import salt.cli.support.localrunner
|
||||
import salt.output.table_out
|
||||
import salt.runner
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
salt.output.table_out.__opts__ = {}
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SupportDataCollector(object):
|
||||
'''
|
||||
Data collector. It behaves just like another outputter,
|
||||
except it grabs the data to the archive files.
|
||||
'''
|
||||
def __init__(self, name, output):
|
||||
'''
|
||||
constructor of the data collector
|
||||
:param name:
|
||||
:param path:
|
||||
:param format:
|
||||
'''
|
||||
self.archive_path = name
|
||||
self.__default_outputter = output
|
||||
self.__format = format
|
||||
self.__arch = None
|
||||
self.__current_section = None
|
||||
self.__current_section_name = None
|
||||
self.__default_root = time.strftime('%Y.%m.%d-%H.%M.%S-snapshot')
|
||||
self.out = salt.cli.support.console.MessagesOutput()
|
||||
|
||||
def open(self):
|
||||
'''
|
||||
Opens archive.
|
||||
:return:
|
||||
'''
|
||||
if self.__arch is not None:
|
||||
raise salt.exceptions.SaltException('Archive already opened.')
|
||||
self.__arch = tarfile.TarFile.bz2open(self.archive_path, 'w')
|
||||
|
||||
def close(self):
|
||||
'''
|
||||
Closes the archive.
|
||||
:return:
|
||||
'''
|
||||
if self.__arch is None:
|
||||
raise salt.exceptions.SaltException('Archive already closed')
|
||||
self._flush_content()
|
||||
self.__arch.close()
|
||||
self.__arch = None
|
||||
|
||||
def _flush_content(self):
|
||||
'''
|
||||
Flush content to the archive
|
||||
:return:
|
||||
'''
|
||||
if self.__current_section is not None:
|
||||
buff = BytesIO()
|
||||
buff._dirty = False
|
||||
for action_return in self.__current_section:
|
||||
for title, ret_data in action_return.items():
|
||||
if isinstance(ret_data, file):
|
||||
self.out.put(ret_data.name, indent=4)
|
||||
self.__arch.add(ret_data.name, arcname=ret_data.name)
|
||||
else:
|
||||
buff.write(salt.utils.stringutils.to_bytes(title + '\n'))
|
||||
buff.write(salt.utils.stringutils.to_bytes(('-' * len(title)) + '\n\n'))
|
||||
buff.write(salt.utils.stringutils.to_bytes(ret_data))
|
||||
buff.write(salt.utils.stringutils.to_bytes('\n\n\n'))
|
||||
buff._dirty = True
|
||||
if buff._dirty:
|
||||
buff.seek(0)
|
||||
tar_info = tarfile.TarInfo(name="{}/{}".format(self.__default_root, self.__current_section_name))
|
||||
if not hasattr(buff, 'getbuffer'): # Py2's BytesIO is older
|
||||
buff.getbuffer = buff.getvalue
|
||||
tar_info.size = len(buff.getbuffer())
|
||||
self.__arch.addfile(tarinfo=tar_info, fileobj=buff)
|
||||
|
||||
def add(self, name):
|
||||
'''
|
||||
Start a new section.
|
||||
:param name:
|
||||
:return:
|
||||
'''
|
||||
if self.__current_section:
|
||||
self._flush_content()
|
||||
self.discard_current(name)
|
||||
|
||||
def discard_current(self, name=None):
|
||||
'''
|
||||
Discard current section
|
||||
:return:
|
||||
'''
|
||||
self.__current_section = []
|
||||
self.__current_section_name = name
|
||||
|
||||
def write(self, title, data, output=None):
|
||||
'''
|
||||
Add a data to the current opened section.
|
||||
:return:
|
||||
'''
|
||||
if not isinstance(data, (dict, list, tuple)):
|
||||
data = {'raw-content': str(data)}
|
||||
output = output or self.__default_outputter
|
||||
|
||||
if output != 'null':
|
||||
try:
|
||||
if isinstance(data, dict) and 'return' in data:
|
||||
data = data['return']
|
||||
content = salt.output.try_printout(data, output, {'extension_modules': '', 'color': False})
|
||||
except Exception: # Fall-back to just raw YAML
|
||||
content = None
|
||||
else:
|
||||
content = None
|
||||
|
||||
if content is None:
|
||||
data = json.loads(json.dumps(data))
|
||||
if isinstance(data, dict) and data.get('return'):
|
||||
data = data.get('return')
|
||||
content = yaml.safe_dump(data, default_flow_style=False, indent=4)
|
||||
|
||||
self.__current_section.append({title: content})
|
||||
|
||||
def link(self, title, path):
|
||||
'''
|
||||
Add a static file on the file system.
|
||||
|
||||
:param title:
|
||||
:param path:
|
||||
:return:
|
||||
'''
|
||||
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
|
||||
# pylint: disable=W8470
|
||||
if not isinstance(path, file):
|
||||
path = salt.utils.files.fopen(path)
|
||||
self.__current_section.append({title: path})
|
||||
# pylint: enable=W8470
|
||||
|
||||
|
||||
class SaltSupport(salt.utils.parsers.SaltSupportOptionParser):
|
||||
'''
|
||||
Class to run Salt Support subsystem.
|
||||
'''
|
||||
RUNNER_TYPE = 'run'
|
||||
CALL_TYPE = 'call'
|
||||
|
||||
def _setup_fun_config(self, fun_conf):
|
||||
'''
|
||||
Setup function configuration.
|
||||
|
||||
:param conf:
|
||||
:return:
|
||||
'''
|
||||
conf = copy.deepcopy(self.config)
|
||||
conf['file_client'] = 'local'
|
||||
conf['fun'] = ''
|
||||
conf['arg'] = []
|
||||
conf['kwarg'] = {}
|
||||
conf['cache_jobs'] = False
|
||||
conf['print_metadata'] = False
|
||||
conf.update(fun_conf)
|
||||
conf['fun'] = conf['fun'].split(':')[-1] # Discard typing prefix
|
||||
|
||||
return conf
|
||||
|
||||
def _get_runner(self, conf):
|
||||
'''
|
||||
Get & setup runner.
|
||||
|
||||
:param conf:
|
||||
:return:
|
||||
'''
|
||||
conf = self._setup_fun_config(copy.deepcopy(conf))
|
||||
if not getattr(self, '_runner', None):
|
||||
self._runner = salt.cli.support.localrunner.LocalRunner(conf)
|
||||
else:
|
||||
self._runner.opts = conf
|
||||
return self._runner
|
||||
|
||||
def _get_caller(self, conf):
|
||||
'''
|
||||
Get & setup caller from the factory.
|
||||
|
||||
:param conf:
|
||||
:return:
|
||||
'''
|
||||
conf = self._setup_fun_config(copy.deepcopy(conf))
|
||||
if not getattr(self, '_caller', None):
|
||||
self._caller = salt.cli.caller.Caller.factory(conf)
|
||||
else:
|
||||
self._caller.opts = conf
|
||||
return self._caller
|
||||
|
||||
def _local_call(self, call_conf):
|
||||
'''
|
||||
Execute local call
|
||||
'''
|
||||
try:
|
||||
ret = self._get_caller(call_conf).call()
|
||||
except SystemExit:
|
||||
ret = 'Data is not available at this moment'
|
||||
self.out.error(ret)
|
||||
except Exception as ex:
|
||||
ret = 'Unhandled exception occurred: {}'.format(ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
self.out.error(ret)
|
||||
|
||||
return ret
|
||||
|
||||
def _local_run(self, run_conf):
|
||||
'''
|
||||
Execute local runner
|
||||
|
||||
:param run_conf:
|
||||
:return:
|
||||
'''
|
||||
try:
|
||||
ret = self._get_runner(run_conf).run()
|
||||
except SystemExit:
|
||||
ret = 'Runner is not available at this moment'
|
||||
self.out.error(ret)
|
||||
except Exception as ex:
|
||||
ret = 'Unhandled exception occurred: {}'.format(ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
|
||||
return ret
|
||||
|
||||
def _internal_function_call(self, call_conf):
|
||||
'''
|
||||
Call internal function.
|
||||
|
||||
:param call_conf:
|
||||
:return:
|
||||
'''
|
||||
def stub(*args, **kwargs):
|
||||
message = 'Function {} is not available'.format(call_conf['fun'])
|
||||
self.out.error(message)
|
||||
log.debug('Attempt to run "{fun}" with {arg} arguments and {kwargs} parameters.'.format(**call_conf))
|
||||
return message
|
||||
|
||||
return getattr(salt.cli.support.intfunc,
|
||||
call_conf['fun'], stub)(self.collector,
|
||||
*call_conf['arg'],
|
||||
**call_conf['kwargs'])
|
||||
|
||||
def _get_action(self, action_meta):
|
||||
'''
|
||||
Parse action and turn into a calling point.
|
||||
:param action_meta:
|
||||
:return:
|
||||
'''
|
||||
conf = {
|
||||
'fun': list(action_meta.keys())[0],
|
||||
'arg': [],
|
||||
'kwargs': {},
|
||||
}
|
||||
if not len(conf['fun'].split('.')) - 1:
|
||||
conf['salt.int.intfunc'] = True
|
||||
|
||||
action_meta = action_meta[conf['fun']]
|
||||
info = action_meta.get('info', 'Action for {}'.format(conf['fun']))
|
||||
for arg in action_meta.get('args') or []:
|
||||
if not isinstance(arg, dict):
|
||||
conf['arg'].append(arg)
|
||||
else:
|
||||
conf['kwargs'].update(arg)
|
||||
|
||||
return info, action_meta.get('output'), conf
|
||||
|
||||
def collect_internal_data(self):
|
||||
'''
|
||||
Dumps current running pillars, configuration etc.
|
||||
:return:
|
||||
'''
|
||||
section = 'configuration'
|
||||
self.out.put(section)
|
||||
self.collector.add(section)
|
||||
self.out.put('Saving config', indent=2)
|
||||
self.collector.write('General Configuration', self.config)
|
||||
self.out.put('Saving pillars', indent=2)
|
||||
self.collector.write('Active Pillars', self._local_call({'fun': 'pillar.items'}))
|
||||
|
||||
section = 'highstate'
|
||||
self.out.put(section)
|
||||
self.collector.add(section)
|
||||
self.out.put('Saving highstate', indent=2)
|
||||
self.collector.write('Rendered highstate', self._local_call({'fun': 'state.show_highstate'}))
|
||||
|
||||
def _extract_return(self, data):
|
||||
'''
|
||||
Extracts return data from the results.
|
||||
|
||||
:param data:
|
||||
:return:
|
||||
'''
|
||||
if isinstance(data, dict):
|
||||
data = data.get('return', data)
|
||||
|
||||
return data
|
||||
|
||||
def collect_local_data(self):
|
||||
'''
|
||||
Collects master system data.
|
||||
:return:
|
||||
'''
|
||||
def call(func, *args, **kwargs):
|
||||
'''
|
||||
Call wrapper for templates
|
||||
:param func:
|
||||
:return:
|
||||
'''
|
||||
return self._extract_return(self._local_call({'fun': func, 'arg': args, 'kwarg': kwargs}))
|
||||
|
||||
def run(func, *args, **kwargs):
|
||||
'''
|
||||
Runner wrapper for templates
|
||||
:param func:
|
||||
:return:
|
||||
'''
|
||||
return self._extract_return(self._local_run({'fun': func, 'arg': args, 'kwarg': kwargs}))
|
||||
|
||||
scenario = salt.cli.support.get_profile(self.config['support_profile'], call, run)
|
||||
for category_name in scenario:
|
||||
self.out.put(category_name)
|
||||
self.collector.add(category_name)
|
||||
for action in scenario[category_name]:
|
||||
if not action:
|
||||
continue
|
||||
action_name = next(iter(action))
|
||||
if not isinstance(action[action_name], six.string_types):
|
||||
info, output, conf = self._get_action(action)
|
||||
action_type = self._get_action_type(action) # run:<something> for runners
|
||||
if action_type == self.RUNNER_TYPE:
|
||||
self.out.put('Running {}'.format(info.lower()), indent=2)
|
||||
self.collector.write(info, self._local_run(conf), output=output)
|
||||
elif action_type == self.CALL_TYPE:
|
||||
if not conf.get('salt.int.intfunc'):
|
||||
self.out.put('Collecting {}'.format(info.lower()), indent=2)
|
||||
self.collector.write(info, self._local_call(conf), output=output)
|
||||
else:
|
||||
self.collector.discard_current()
|
||||
self._internal_function_call(conf)
|
||||
else:
|
||||
self.out.error('Unknown action type "{}" for action: {}'.format(action_type, action))
|
||||
else:
|
||||
# TODO: This needs to be moved then to the utils.
|
||||
# But the code is not yet there (other PRs)
|
||||
self.out.msg('\n'.join(salt.cli.support.console.wrap(action[action_name])), ident=2)
|
||||
|
||||
def _get_action_type(self, action):
|
||||
'''
|
||||
Get action type.
|
||||
:param action:
|
||||
:return:
|
||||
'''
|
||||
action_name = next(iter(action or {'': None}))
|
||||
if ':' not in action_name:
|
||||
action_name = '{}:{}'.format(self.CALL_TYPE, action_name)
|
||||
|
||||
return action_name.split(':')[0] or None
|
||||
|
||||
def collect_targets_data(self):
|
||||
'''
|
||||
Collects minion targets data
|
||||
:return:
|
||||
'''
|
||||
# TODO: remote collector?
|
||||
|
||||
def _cleanup(self):
|
||||
'''
|
||||
Cleanup if crash/exception
|
||||
:return:
|
||||
'''
|
||||
if (hasattr(self, 'config')
|
||||
and self.config.get('support_archive')
|
||||
and os.path.exists(self.config['support_archive'])):
|
||||
self.out.warning('Terminated earlier, cleaning up')
|
||||
os.unlink(self.config['support_archive'])
|
||||
|
||||
def _check_existing_archive(self):
|
||||
'''
|
||||
Check if archive exists or not. If exists and --force was not specified,
|
||||
bail out. Otherwise remove it and move on.
|
||||
|
||||
:return:
|
||||
'''
|
||||
if os.path.exists(self.config['support_archive']):
|
||||
if self.config['support_archive_force_overwrite']:
|
||||
self.out.warning('Overwriting existing archive: {}'.format(self.config['support_archive']))
|
||||
os.unlink(self.config['support_archive'])
|
||||
ret = True
|
||||
else:
|
||||
self.out.warning('File {} already exists.'.format(self.config['support_archive']))
|
||||
ret = False
|
||||
else:
|
||||
ret = True
|
||||
|
||||
return ret
|
||||
|
||||
def run(self):
|
||||
exit_code = salt.defaults.exitcodes.EX_OK
|
||||
self.out = salt.cli.support.console.MessagesOutput()
|
||||
try:
|
||||
self.parse_args()
|
||||
except (Exception, SystemExit) as ex:
|
||||
if not isinstance(ex, exceptions.SystemExit):
|
||||
exit_code = salt.defaults.exitcodes.EX_GENERIC
|
||||
self.out.error(ex)
|
||||
elif isinstance(ex, exceptions.SystemExit):
|
||||
exit_code = ex.code
|
||||
else:
|
||||
exit_code = salt.defaults.exitcodes.EX_GENERIC
|
||||
self.out.error(ex)
|
||||
else:
|
||||
if self.config['log_level'] not in ('quiet', ):
|
||||
self.setup_logfile_logger()
|
||||
salt.utils.verify.verify_log(self.config)
|
||||
salt.cli.support.log = log # Pass update logger so trace is available
|
||||
|
||||
if self.config['support_profile_list']:
|
||||
self.out.put('List of available profiles:')
|
||||
for idx, profile in enumerate(salt.cli.support.get_profiles(self.config)):
|
||||
msg_template = ' {}. '.format(idx + 1) + '{}'
|
||||
self.out.highlight(msg_template, profile)
|
||||
exit_code = salt.defaults.exitcodes.EX_OK
|
||||
elif self.config['support_show_units']:
|
||||
self.out.put('List of available units:')
|
||||
for idx, unit in enumerate(self.find_existing_configs(None)):
|
||||
msg_template = ' {}. '.format(idx + 1) + '{}'
|
||||
self.out.highlight(msg_template, unit)
|
||||
exit_code = salt.defaults.exitcodes.EX_OK
|
||||
else:
|
||||
if not self.config['support_profile']:
|
||||
self.print_help()
|
||||
raise SystemExit()
|
||||
|
||||
if self._check_existing_archive():
|
||||
try:
|
||||
self.collector = SupportDataCollector(self.config['support_archive'],
|
||||
output=self.config['support_output_format'])
|
||||
except Exception as ex:
|
||||
self.out.error(ex)
|
||||
exit_code = salt.defaults.exitcodes.EX_GENERIC
|
||||
log.debug(ex, exc_info=True)
|
||||
else:
|
||||
try:
|
||||
self.collector.open()
|
||||
self.collect_local_data()
|
||||
self.collect_internal_data()
|
||||
self.collect_targets_data()
|
||||
self.collector.close()
|
||||
|
||||
archive_path = self.collector.archive_path
|
||||
self.out.highlight('\nSupport data has been written to "{}" file.\n',
|
||||
archive_path, _main='YELLOW')
|
||||
except Exception as ex:
|
||||
self.out.error(ex)
|
||||
log.debug(ex, exc_info=True)
|
||||
exit_code = salt.defaults.exitcodes.EX_SOFTWARE
|
||||
|
||||
if exit_code:
|
||||
self._cleanup()
|
||||
|
||||
sys.exit(exit_code)
|
165
salt/cli/support/console.py
Normal file
165
salt/cli/support/console.py
Normal file
@ -0,0 +1,165 @@
|
||||
# coding=utf-8
|
||||
'''
|
||||
Collection of tools to report messages to console.
|
||||
|
||||
NOTE: This is subject to incorporate other formatting bits
|
||||
from all around everywhere and then to be moved to utils.
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import sys
|
||||
import os
|
||||
import salt.utils.color
|
||||
import textwrap
|
||||
|
||||
|
||||
class IndentOutput(object):
|
||||
'''
|
||||
Paint different indends in different output.
|
||||
'''
|
||||
def __init__(self, conf=None, device=sys.stdout):
|
||||
if conf is None:
|
||||
conf = {0: 'CYAN', 2: 'GREEN', 4: 'LIGHT_BLUE', 6: 'BLUE'}
|
||||
self._colors_conf = conf
|
||||
self._device = device
|
||||
self._colors = salt.utils.color.get_colors()
|
||||
self._default_color = 'GREEN'
|
||||
self._default_hl_color = 'LIGHT_GREEN'
|
||||
|
||||
def put(self, message, indent=0):
|
||||
'''
|
||||
Print message with an indent.
|
||||
|
||||
:param message:
|
||||
:param indent:
|
||||
:return:
|
||||
'''
|
||||
color = self._colors_conf.get(indent + indent % 2, self._colors_conf.get(0, self._default_color))
|
||||
|
||||
for chunk in [' ' * indent, self._colors[color], message, self._colors['ENDC']]:
|
||||
self._device.write(str(chunk))
|
||||
self._device.write(os.linesep)
|
||||
self._device.flush()
|
||||
|
||||
|
||||
class MessagesOutput(IndentOutput):
|
||||
'''
|
||||
Messages output to the CLI.
|
||||
'''
|
||||
def msg(self, message, title=None, title_color=None, color='BLUE', ident=0):
|
||||
'''
|
||||
Hint message.
|
||||
|
||||
:param message:
|
||||
:param title:
|
||||
:param title_color:
|
||||
:param color:
|
||||
:param ident:
|
||||
:return:
|
||||
'''
|
||||
if title and not title_color:
|
||||
title_color = color
|
||||
if title_color and not title:
|
||||
title_color = None
|
||||
|
||||
self.__colored_output(title, message, title_color, color, ident=ident)
|
||||
|
||||
def info(self, message, ident=0):
|
||||
'''
|
||||
Write an info message to the CLI.
|
||||
|
||||
:param message:
|
||||
:param ident:
|
||||
:return:
|
||||
'''
|
||||
self.__colored_output('Info', message, 'GREEN', 'LIGHT_GREEN', ident=ident)
|
||||
|
||||
def warning(self, message, ident=0):
|
||||
'''
|
||||
Write a warning message to the CLI.
|
||||
|
||||
:param message:
|
||||
:param ident:
|
||||
:return:
|
||||
'''
|
||||
self.__colored_output('Warning', message, 'YELLOW', 'LIGHT_YELLOW', ident=ident)
|
||||
|
||||
def error(self, message, ident=0):
|
||||
'''
|
||||
Write an error message to the CLI.
|
||||
|
||||
:param message:
|
||||
:param ident
|
||||
:return:
|
||||
'''
|
||||
self.__colored_output('Error', message, 'RED', 'LIGHT_RED', ident=ident)
|
||||
|
||||
def __colored_output(self, title, message, title_color, message_color, ident=0):
|
||||
if title and not title.endswith(':'):
|
||||
_linesep = title.endswith(os.linesep)
|
||||
title = '{}:{}'.format(title.strip(), _linesep and os.linesep or ' ')
|
||||
|
||||
for chunk in [title_color and self._colors[title_color] or None, ' ' * ident,
|
||||
title, self._colors[message_color], message, self._colors['ENDC']]:
|
||||
if chunk:
|
||||
self._device.write(str(chunk))
|
||||
self._device.write(os.linesep)
|
||||
self._device.flush()
|
||||
|
||||
def highlight(self, message, *values, **colors):
|
||||
'''
|
||||
Highlighter works the way that message parameter is a template,
|
||||
the "values" is a list of arguments going one after another as values there.
|
||||
And so the "colors" should designate either highlight color or alternate for each.
|
||||
|
||||
Example:
|
||||
|
||||
highlight('Hello {}, there! It is {}.', 'user', 'daytime', _main='GREEN', _highlight='RED')
|
||||
highlight('Hello {}, there! It is {}.', 'user', 'daytime', _main='GREEN', _highlight='RED', 'daytime'='YELLOW')
|
||||
|
||||
First example will highlight all the values in the template with the red color.
|
||||
Second example will highlight the second value with the yellow color.
|
||||
|
||||
Usage:
|
||||
|
||||
colors:
|
||||
_main: Sets the main color (or default is used)
|
||||
_highlight: Sets the alternative color for everything
|
||||
'any phrase' that is the same in the "values" can override color.
|
||||
|
||||
:param message:
|
||||
:param formatted:
|
||||
:param colors:
|
||||
:return:
|
||||
'''
|
||||
|
||||
m_color = colors.get('_main', self._default_color)
|
||||
h_color = colors.get('_highlight', self._default_hl_color)
|
||||
|
||||
_values = []
|
||||
for value in values:
|
||||
_values.append('{p}{c}{r}'.format(p=self._colors[colors.get(value, h_color)],
|
||||
c=value, r=self._colors[m_color]))
|
||||
self._device.write('{s}{m}{e}'.format(s=self._colors[m_color],
|
||||
m=message.format(*_values), e=self._colors['ENDC']))
|
||||
self._device.write(os.linesep)
|
||||
self._device.flush()
|
||||
|
||||
|
||||
def wrap(txt, width=80, ident=0):
|
||||
'''
|
||||
Wrap text to the required dimensions and clean it up, prepare for display.
|
||||
|
||||
:param txt:
|
||||
:param width:
|
||||
:return:
|
||||
'''
|
||||
ident = ' ' * ident
|
||||
txt = (txt or '').replace(os.linesep, ' ').strip()
|
||||
|
||||
wrapper = textwrap.TextWrapper()
|
||||
wrapper.fix_sentence_endings = False
|
||||
wrapper.initial_indent = wrapper.subsequent_indent = ident
|
||||
|
||||
return wrapper.wrap(txt)
|
42
salt/cli/support/intfunc.py
Normal file
42
salt/cli/support/intfunc.py
Normal file
@ -0,0 +1,42 @@
|
||||
# coding=utf-8
|
||||
'''
|
||||
Internal functions.
|
||||
'''
|
||||
# Maybe this needs to be a modules in a future?
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import os
|
||||
from salt.cli.support.console import MessagesOutput
|
||||
import salt.utils.files
|
||||
|
||||
|
||||
out = MessagesOutput()
|
||||
|
||||
|
||||
def filetree(collector, path):
|
||||
'''
|
||||
Add all files in the tree. If the "path" is a file,
|
||||
only that file will be added.
|
||||
|
||||
:param path: File or directory
|
||||
:return:
|
||||
'''
|
||||
if not path:
|
||||
out.error('Path not defined', ident=2)
|
||||
else:
|
||||
# The filehandler needs to be explicitly passed here, so PyLint needs to accept that.
|
||||
# pylint: disable=W8470
|
||||
if os.path.isfile(path):
|
||||
filename = os.path.basename(path)
|
||||
try:
|
||||
file_ref = salt.utils.files.fopen(path) # pylint: disable=W
|
||||
out.put('Add {}'.format(filename), indent=2)
|
||||
collector.add(filename)
|
||||
collector.link(title=path, path=file_ref)
|
||||
except Exception as err:
|
||||
out.error(err, ident=4)
|
||||
# pylint: enable=W8470
|
||||
else:
|
||||
for fname in os.listdir(path):
|
||||
fname = os.path.join(path, fname)
|
||||
filetree(collector, fname)
|
34
salt/cli/support/localrunner.py
Normal file
34
salt/cli/support/localrunner.py
Normal file
@ -0,0 +1,34 @@
|
||||
# coding=utf-8
|
||||
'''
|
||||
Local Runner
|
||||
'''
|
||||
|
||||
from __future__ import print_function, absolute_import, unicode_literals
|
||||
import salt.runner
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LocalRunner(salt.runner.Runner):
|
||||
'''
|
||||
Runner class that changes its default behaviour.
|
||||
'''
|
||||
|
||||
def _proc_function(self, fun, low, user, tag, jid, daemonize=True):
|
||||
'''
|
||||
Same as original _proc_function in AsyncClientMixin,
|
||||
except it calls "low" without firing a print event.
|
||||
'''
|
||||
if daemonize and not salt.utils.platform.is_windows():
|
||||
salt.log.setup.shutdown_multiprocessing_logging()
|
||||
salt.utils.process.daemonize()
|
||||
salt.log.setup.setup_multiprocessing_logging()
|
||||
|
||||
low['__jid__'] = jid
|
||||
low['__user__'] = user
|
||||
low['__tag__'] = tag
|
||||
|
||||
return self.low(fun, low, print_event=False, full_return=False)
|
71
salt/cli/support/profiles/default.yml
Normal file
71
salt/cli/support/profiles/default.yml
Normal file
@ -0,0 +1,71 @@
|
||||
sysinfo:
|
||||
- description: |
|
||||
Get the Salt grains of the current system.
|
||||
- grains.items:
|
||||
info: System grains
|
||||
|
||||
packages:
|
||||
- description: |
|
||||
Fetch list of all the installed packages.
|
||||
- pkg.list_pkgs:
|
||||
info: Installed packages
|
||||
|
||||
repositories:
|
||||
- pkg.list_repos:
|
||||
info: Available repositories
|
||||
|
||||
upgrades:
|
||||
- pkg.list_upgrades:
|
||||
info: Possible upgrades
|
||||
|
||||
## TODO: Some data here belongs elsewhere and also is duplicated
|
||||
status:
|
||||
- status.version:
|
||||
info: Status version
|
||||
- status.cpuinfo:
|
||||
info: CPU information
|
||||
- status.cpustats:
|
||||
info: CPU stats
|
||||
- status.diskstats:
|
||||
info: Disk stats
|
||||
- status.loadavg:
|
||||
info: Average load of the current system
|
||||
- status.uptime:
|
||||
info: Uptime of the machine
|
||||
- status.meminfo:
|
||||
info: Information about memory
|
||||
- status.vmstats:
|
||||
info: Virtual memory stats
|
||||
- status.netdev:
|
||||
info: Network device stats
|
||||
- status.nproc:
|
||||
info: Number of processing units available on this system
|
||||
- status.procs:
|
||||
info: Process data
|
||||
|
||||
general-health:
|
||||
- ps.boot_time:
|
||||
info: System Boot Time
|
||||
- ps.swap_memory:
|
||||
info: Swap Memory
|
||||
output: txt
|
||||
- ps.cpu_times:
|
||||
info: CPU times
|
||||
- ps.disk_io_counters:
|
||||
info: Disk IO counters
|
||||
- ps.disk_partition_usage:
|
||||
info: Disk partition usage
|
||||
output: table
|
||||
- ps.disk_partitions:
|
||||
info: Disk partitions
|
||||
output: table
|
||||
- ps.top:
|
||||
info: Top CPU consuming processes
|
||||
|
||||
system.log:
|
||||
# This works on any file system object.
|
||||
- filetree:
|
||||
info: Add system log
|
||||
args:
|
||||
- /var/log/syslog
|
||||
|
3
salt/cli/support/profiles/jobs-active.yml
Normal file
3
salt/cli/support/profiles/jobs-active.yml
Normal file
@ -0,0 +1,3 @@
|
||||
jobs-active:
|
||||
- run:jobs.active:
|
||||
info: List of all actively running jobs
|
3
salt/cli/support/profiles/jobs-last.yml
Normal file
3
salt/cli/support/profiles/jobs-last.yml
Normal file
@ -0,0 +1,3 @@
|
||||
jobs-last:
|
||||
- run:jobs.last_run:
|
||||
info: List all detectable jobs and associated functions
|
7
salt/cli/support/profiles/jobs-trace.yml
Normal file
7
salt/cli/support/profiles/jobs-trace.yml
Normal file
@ -0,0 +1,7 @@
|
||||
jobs-details:
|
||||
{% for job in runners('jobs.list_jobs') %}
|
||||
- run:jobs.list_job:
|
||||
info: Details on JID {{job}}
|
||||
args:
|
||||
- {{job}}
|
||||
{% endfor %}
|
27
salt/cli/support/profiles/network.yml
Normal file
27
salt/cli/support/profiles/network.yml
Normal file
@ -0,0 +1,27 @@
|
||||
network:
|
||||
- network.get_hostname:
|
||||
info: Hostname
|
||||
output: txt
|
||||
- network.get_fqdn:
|
||||
info: FQDN
|
||||
output: txt
|
||||
- network.default_route:
|
||||
info: Default route
|
||||
output: table
|
||||
- network.interfaces:
|
||||
info: All the available interfaces
|
||||
output: table
|
||||
- network.subnets:
|
||||
info: List of IPv4 subnets
|
||||
- network.subnets6:
|
||||
info: List of IPv6 subnets
|
||||
- network.routes:
|
||||
info: Network configured routes from routing tables
|
||||
output: table
|
||||
- network.netstat:
|
||||
info: Information on open ports and states
|
||||
output: table
|
||||
- network.active_tcp:
|
||||
info: All running TCP connections
|
||||
- network.arp:
|
||||
info: ARP table
|
11
salt/cli/support/profiles/postgres.yml
Normal file
11
salt/cli/support/profiles/postgres.yml
Normal file
@ -0,0 +1,11 @@
|
||||
system.log:
|
||||
- filetree:
|
||||
info: Add system log
|
||||
args:
|
||||
- /var/log/syslog
|
||||
|
||||
etc/postgres:
|
||||
- filetree:
|
||||
info: Pick entire /etc/postgresql
|
||||
args:
|
||||
- /etc/postgresql
|
9
salt/cli/support/profiles/salt.yml
Normal file
9
salt/cli/support/profiles/salt.yml
Normal file
@ -0,0 +1,9 @@
|
||||
sysinfo:
|
||||
- grains.items:
|
||||
info: System grains
|
||||
|
||||
logfile:
|
||||
- filetree:
|
||||
info: Add current logfile
|
||||
args:
|
||||
- {{salt('config.get', 'log_file')}}
|
22
salt/cli/support/profiles/users.yml
Normal file
22
salt/cli/support/profiles/users.yml
Normal file
@ -0,0 +1,22 @@
|
||||
all-users:
|
||||
{%for uname in salt('user.list_users') %}
|
||||
- user.info:
|
||||
info: Information about "{{uname}}"
|
||||
args:
|
||||
- {{uname}}
|
||||
- user.list_groups:
|
||||
info: List groups for user "{{uname}}"
|
||||
args:
|
||||
- {{uname}}
|
||||
- shadow.info:
|
||||
info: Shadow information about user "{{uname}}"
|
||||
args:
|
||||
- {{uname}}
|
||||
- cron.raw_cron:
|
||||
info: Cron for user "{{uname}}"
|
||||
args:
|
||||
- {{uname}}
|
||||
{%endfor%}
|
||||
- group.getent:
|
||||
info: List of all available groups
|
||||
output: table
|
@ -167,6 +167,16 @@ def _cleanup_slsmod_high_data(high_data):
|
||||
stateconf_data['slsmod'] = None
|
||||
|
||||
|
||||
def _parse_mods(mods):
|
||||
'''
|
||||
Parse modules.
|
||||
'''
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = [item.strip() for item in mods.split(',') if item.strip()]
|
||||
|
||||
return mods
|
||||
|
||||
|
||||
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
|
||||
'''
|
||||
Create the seed file for a state.sls run
|
||||
@ -181,8 +191,7 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
|
||||
__salt__,
|
||||
__context__['fileclient'])
|
||||
st_.push_active()
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
mods = _parse_mods(mods)
|
||||
high_data, errors = st_.render_highstate({saltenv: mods})
|
||||
if exclude:
|
||||
if isinstance(exclude, six.string_types):
|
||||
@ -922,8 +931,7 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
split_mods = mods.split(',')
|
||||
split_mods = _parse_mods(mods)
|
||||
st_.push_active()
|
||||
high_, errors = st_.render_highstate({opts['saltenv']: split_mods})
|
||||
errors += st_.state.verify_high(high_)
|
||||
@ -980,8 +988,7 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
|
||||
__salt__,
|
||||
__context__['fileclient'])
|
||||
st_.push_active()
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
mods = _parse_mods(mods)
|
||||
high_data, errors = st_.render_highstate({saltenv: mods})
|
||||
high_data, ext_errors = st_.state.reconcile_extend(high_data)
|
||||
errors += ext_errors
|
||||
@ -1025,8 +1032,7 @@ def show_low_sls(mods, saltenv='base', test=None, **kwargs):
|
||||
__salt__,
|
||||
__context__['fileclient'])
|
||||
st_.push_active()
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
mods = _parse_mods(mods)
|
||||
high_data, errors = st_.render_highstate({saltenv: mods})
|
||||
high_data, ext_errors = st_.state.reconcile_extend(high_data)
|
||||
errors += ext_errors
|
||||
|
@ -50,6 +50,7 @@ from salt.exceptions import (
|
||||
SaltCloudExecutionFailure,
|
||||
SaltCloudExecutionTimeout
|
||||
)
|
||||
from salt.utils.stringutils import to_bytes
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -770,7 +771,7 @@ def _compute_signature(parameters, access_key_secret):
|
||||
# All aliyun API only support GET method
|
||||
stringToSign = 'GET&%2F&' + percent_encode(canonicalizedQueryString[1:])
|
||||
|
||||
h = hmac.new(access_key_secret + "&", stringToSign, sha1)
|
||||
h = hmac.new(to_bytes(access_key_secret + "&"), stringToSign, sha1)
|
||||
signature = base64.encodestring(h.digest()).strip()
|
||||
return signature
|
||||
|
||||
|
@ -918,6 +918,9 @@ VALID_OPTS = {
|
||||
# Set a hard limit for the amount of memory modules can consume on a minion.
|
||||
'modules_max_memory': int,
|
||||
|
||||
# Blacklist specific core grains to be filtered
|
||||
'grains_blacklist': list,
|
||||
|
||||
# The number of minutes between the minion refreshing its cache of grains
|
||||
'grains_refresh_every': int,
|
||||
|
||||
@ -1248,6 +1251,7 @@ DEFAULT_MINION_OPTS = {
|
||||
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
|
||||
'append_minionid_config_dirs': [],
|
||||
'cache_jobs': False,
|
||||
'grains_blacklist': [],
|
||||
'grains_cache': False,
|
||||
'grains_cache_expiration': 300,
|
||||
'grains_deep_merge': False,
|
||||
|
@ -5,9 +5,11 @@ Generate chronos proxy minion grains.
|
||||
.. versionadded:: 2015.8.2
|
||||
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.http
|
||||
import salt.utils.platform
|
||||
__proxyenabled__ = ['chronos']
|
||||
|
@ -49,6 +49,7 @@ except ImportError:
|
||||
# Import salt libs
|
||||
import salt.exceptions
|
||||
import salt.log
|
||||
import salt.utils.args
|
||||
import salt.utils.dns
|
||||
import salt.utils.files
|
||||
import salt.utils.network
|
||||
@ -2775,3 +2776,24 @@ def default_gateway():
|
||||
except Exception:
|
||||
continue
|
||||
return grains
|
||||
|
||||
|
||||
def kernelparams():
|
||||
'''
|
||||
Return the kernel boot parameters
|
||||
'''
|
||||
try:
|
||||
with salt.utils.files.fopen('/proc/cmdline', 'r') as fhr:
|
||||
cmdline = fhr.read()
|
||||
grains = {'kernelparams': []}
|
||||
for data in [item.split('=') for item in salt.utils.args.shlex_split(cmdline)]:
|
||||
value = None
|
||||
if len(data) == 2:
|
||||
value = data[1].strip('"')
|
||||
|
||||
grains['kernelparams'] += [(data[0], value)]
|
||||
except IOError as exc:
|
||||
grains = {}
|
||||
log.debug('Failed to read /proc/cmdline: %s', exc)
|
||||
|
||||
return grains
|
||||
|
@ -34,6 +34,7 @@ import salt.utils.lazy
|
||||
import salt.utils.odict
|
||||
import salt.utils.platform
|
||||
import salt.utils.versions
|
||||
import salt.utils.stringutils
|
||||
from salt.exceptions import LoaderError
|
||||
from salt.template import check_render_pipe_str
|
||||
from salt.utils.decorators import Depends
|
||||
@ -747,6 +748,7 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
opts['grains'] = {}
|
||||
|
||||
grains_data = {}
|
||||
blist = opts.get('grains_blacklist', [])
|
||||
funcs = grain_funcs(opts, proxy=proxy)
|
||||
if force_refresh: # if we refresh, lets reload grain modules
|
||||
funcs.clear()
|
||||
@ -758,6 +760,14 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
ret = funcs[key]()
|
||||
if not isinstance(ret, dict):
|
||||
continue
|
||||
if blist:
|
||||
for key in list(ret):
|
||||
for block in blist:
|
||||
if salt.utils.stringutils.expr_match(key, block):
|
||||
del ret[key]
|
||||
log.trace('Filtering %s grain', key)
|
||||
if not ret:
|
||||
continue
|
||||
if grains_deep_merge:
|
||||
salt.utils.dictupdate.update(grains_data, ret)
|
||||
else:
|
||||
@ -793,6 +803,14 @@ def grains(opts, force_refresh=False, proxy=None):
|
||||
continue
|
||||
if not isinstance(ret, dict):
|
||||
continue
|
||||
if blist:
|
||||
for key in list(ret):
|
||||
for block in blist:
|
||||
if salt.utils.stringutils.expr_match(key, block):
|
||||
del ret[key]
|
||||
log.trace('Filtering %s grain', key)
|
||||
if not ret:
|
||||
continue
|
||||
if grains_deep_merge:
|
||||
salt.utils.dictupdate.update(grains_data, ret)
|
||||
else:
|
||||
|
@ -113,13 +113,16 @@ __virtualname__ = 'sentry'
|
||||
|
||||
def __virtual__():
|
||||
if HAS_RAVEN is True:
|
||||
__grains__ = salt.loader.grains(__opts__)
|
||||
__salt__ = salt.loader.minion_mods(__opts__)
|
||||
return __virtualname__
|
||||
return False
|
||||
|
||||
|
||||
def setup_handlers():
|
||||
'''
|
||||
sets up the sentry handler
|
||||
'''
|
||||
__grains__ = salt.loader.grains(__opts__)
|
||||
__salt__ = salt.loader.minion_mods(__opts__)
|
||||
if 'sentry_handler' not in __opts__:
|
||||
log.debug('No \'sentry_handler\' key was found in the configuration')
|
||||
return False
|
||||
@ -133,7 +136,9 @@ def setup_handlers():
|
||||
transport_registry = TransportRegistry(default_transports)
|
||||
url = urlparse(dsn)
|
||||
if not transport_registry.supported_scheme(url.scheme):
|
||||
raise ValueError('Unsupported Sentry DSN scheme: {0}'.format(url.scheme))
|
||||
raise ValueError(
|
||||
'Unsupported Sentry DSN scheme: %s', url.scheme
|
||||
)
|
||||
except ValueError as exc:
|
||||
log.info(
|
||||
'Raven failed to parse the configuration provided DSN: %s', exc
|
||||
@ -202,7 +207,11 @@ def setup_handlers():
|
||||
context_dict = {}
|
||||
if context is not None:
|
||||
for tag in context:
|
||||
tag_value = __salt__['grains.get'](tag)
|
||||
try:
|
||||
tag_value = __grains__[tag]
|
||||
except KeyError:
|
||||
log.debug('Sentry tag \'%s\' not found in grains.', tag)
|
||||
continue
|
||||
if len(tag_value) > 0:
|
||||
context_dict[tag] = tag_value
|
||||
if len(context_dict) > 0:
|
||||
@ -229,4 +238,7 @@ def setup_handlers():
|
||||
|
||||
|
||||
def get_config_value(name, default=None):
|
||||
'''
|
||||
returns a configuration option for the sentry_handler
|
||||
'''
|
||||
return __opts__['sentry_handler'].get(name, default)
|
||||
|
@ -977,7 +977,7 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
self.mkey = mkey
|
||||
self.key = key
|
||||
self.k_mtime = 0
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
|
||||
self.stat_clock = time.time()
|
||||
|
||||
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
|
||||
@ -1059,18 +1059,16 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
'clear': self._handle_clear}[key](load)
|
||||
raise tornado.gen.Return(ret)
|
||||
|
||||
def _post_stats(self, start, cmd):
|
||||
def _post_stats(self, stats):
|
||||
'''
|
||||
Calculate the master stats and fire events with stat info
|
||||
Fire events with stat info if it's time
|
||||
'''
|
||||
end = time.time()
|
||||
duration = end - start
|
||||
self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs']
|
||||
if end - self.stat_clock > self.opts['master_stats_event_iter']:
|
||||
end_time = time.time()
|
||||
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
|
||||
# Fire the event with the stats and wipe the tracker
|
||||
self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats'))
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
|
||||
self.stat_clock = end
|
||||
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
|
||||
self.stat_clock = end_time
|
||||
|
||||
def _handle_clear(self, load):
|
||||
'''
|
||||
@ -1086,10 +1084,10 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
return False
|
||||
if self.opts['master_stats']:
|
||||
start = time.time()
|
||||
self.stats[cmd]['runs'] += 1
|
||||
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
|
||||
if self.opts['master_stats']:
|
||||
self._post_stats(start, cmd)
|
||||
stats = salt.utils.event.update_stats(self.stats, start, load)
|
||||
self._post_stats(stats)
|
||||
return ret
|
||||
|
||||
def _handle_aes(self, data):
|
||||
@ -1109,7 +1107,6 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
return False
|
||||
if self.opts['master_stats']:
|
||||
start = time.time()
|
||||
self.stats[cmd]['runs'] += 1
|
||||
|
||||
def run_func(data):
|
||||
return self.aes_funcs.run_func(data['cmd'], data)
|
||||
@ -1120,7 +1117,8 @@ class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
|
||||
ret = run_func(data)
|
||||
|
||||
if self.opts['master_stats']:
|
||||
self._post_stats(start, cmd)
|
||||
stats = salt.utils.event.update_stats(self.stats, start, data)
|
||||
self._post_stats(stats)
|
||||
return ret
|
||||
|
||||
def run(self):
|
||||
|
@ -2393,7 +2393,8 @@ class Minion(MinionBase):
|
||||
else:
|
||||
# delete the scheduled job to don't interfere with the failover process
|
||||
if self.opts['transport'] != 'tcp':
|
||||
self.schedule.delete_job(name=master_event(type='alive'))
|
||||
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
|
||||
persist=True)
|
||||
|
||||
log.info('Trying to tune in to next master from master-list')
|
||||
|
||||
|
@ -4511,7 +4511,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
|
||||
try:
|
||||
lattrs = lsattr(name)
|
||||
except SaltInvocationError:
|
||||
lsattrs = None
|
||||
lattrs = None
|
||||
if lattrs is not None:
|
||||
# List attributes on file
|
||||
perms['lattrs'] = ''.join(lattrs.get(name, ''))
|
||||
|
@ -402,18 +402,14 @@ def add(connect_spec, dn, attributes):
|
||||
# convert the "iterable of values" to lists in case that's what
|
||||
# addModlist() expects (also to ensure that the caller's objects
|
||||
# are not modified)
|
||||
attributes = dict(((attr, list(vals))
|
||||
attributes = dict(((attr, salt.utils.data.encode(list(vals)))
|
||||
for attr, vals in six.iteritems(attributes)))
|
||||
log.info('adding entry: dn: %s attributes: %s', repr(dn), repr(attributes))
|
||||
|
||||
if 'unicodePwd' in attributes:
|
||||
attributes['unicodePwd'] = [_format_unicode_password(x) for x in attributes['unicodePwd']]
|
||||
|
||||
modlist = salt.utils.data.decode(
|
||||
ldap.modlist.addModlist(attributes),
|
||||
to_str=True,
|
||||
preserve_tuples=True
|
||||
)
|
||||
modlist = ldap.modlist.addModlist(attributes),
|
||||
try:
|
||||
l.c.add_s(dn, modlist)
|
||||
except ldap.LDAPError as e:
|
||||
@ -572,19 +568,16 @@ def change(connect_spec, dn, before, after):
|
||||
# convert the "iterable of values" to lists in case that's what
|
||||
# modifyModlist() expects (also to ensure that the caller's dicts
|
||||
# are not modified)
|
||||
before = dict(((attr, list(vals))
|
||||
before = dict(((attr, salt.utils.data.encode(list(vals)))
|
||||
for attr, vals in six.iteritems(before)))
|
||||
after = dict(((attr, list(vals))
|
||||
after = dict(((attr, salt.utils.data.encode(list(vals)))
|
||||
for attr, vals in six.iteritems(after)))
|
||||
|
||||
if 'unicodePwd' in after:
|
||||
after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']]
|
||||
|
||||
modlist = salt.utils.data.decode(
|
||||
ldap.modlist.modifyModlist(before, after),
|
||||
to_str=True,
|
||||
preserve_tuples=True
|
||||
)
|
||||
modlist = ldap.modlist.modifyModlist(before, after)
|
||||
|
||||
try:
|
||||
l.c.modify_s(dn, modlist)
|
||||
except ldap.LDAPError as e:
|
||||
|
174
salt/modules/salt_version.py
Normal file
174
salt/modules/salt_version.py
Normal file
@ -0,0 +1,174 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Access Salt's elemental release code-names.
|
||||
|
||||
.. versionadded:: Neon
|
||||
|
||||
Salt's feature release schedule is based on the Periodic Table, as described
|
||||
in the :ref:`Version Numbers <version-numbers>` documentation.
|
||||
|
||||
Since deprecation notices often use the elemental release code-name when warning
|
||||
users about deprecated changes, it can be difficult to build out future-proof
|
||||
functionality that are dependent on a naming scheme that moves.
|
||||
|
||||
For example, a state syntax needs to change to support an option that will be
|
||||
removed in the future, but there are many Minion versions in use across an
|
||||
infrastructure. It would be handy to use some Jinja syntax to check for these
|
||||
instances to perform one state syntax over another.
|
||||
|
||||
A simple example might be something like the following:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{# a boolean check #}
|
||||
{% set option_deprecated = salt['salt_version.is_older']("Sodium") %}
|
||||
|
||||
{% if option_deprecated %}
|
||||
<use old syntax>
|
||||
{% else %}
|
||||
<use new syntax>
|
||||
{% endif %}
|
||||
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import logging
|
||||
|
||||
# Import Salt libs
|
||||
from salt.ext import six
|
||||
import salt.version
|
||||
import salt.utils.versions
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'salt_version'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on POSIX-like systems
|
||||
'''
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def get_release_number(name):
|
||||
'''
|
||||
Returns the release number of a given release code name in a
|
||||
``<year>.<month>`` context.
|
||||
|
||||
If the release name has not been given an assigned release number, the
|
||||
function returns a string. If the release cannot be found, it returns
|
||||
``None``.
|
||||
|
||||
name
|
||||
The release codename for which to find a release number.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' salt_version.get_release_number 'Oxygen'
|
||||
'''
|
||||
name = name.lower()
|
||||
version_map = salt.version.SaltStackVersion.LNAMES
|
||||
version = version_map.get(name)
|
||||
if version is None:
|
||||
log.info('Version {} not found.'.format(name))
|
||||
return None
|
||||
|
||||
if version[1] == 0:
|
||||
log.info('Version {} found, but no release number has been assigned '
|
||||
'yet.'.format(name))
|
||||
return 'No version assigned.'
|
||||
|
||||
return '.'.join(str(item) for item in version)
|
||||
|
||||
|
||||
def is_equal(name):
|
||||
'''
|
||||
Returns a boolean if the named version matches the minion's current Salt
|
||||
version.
|
||||
|
||||
name
|
||||
The release codename to check the version against.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' salt_version.is_equal 'Oxygen'
|
||||
'''
|
||||
if _check_release_cmp(name) == 0:
|
||||
log.info('Release codename \'{}\' equals the minion\'s '
|
||||
'version.'.format(name))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_newer(name):
|
||||
'''
|
||||
Returns a boolean if the named version is newer that the minion's current
|
||||
Salt version.
|
||||
|
||||
name
|
||||
The release codename to check the version against.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' salt_version.is_newer 'Sodium'
|
||||
'''
|
||||
if _check_release_cmp(name) == 1:
|
||||
log.info('Release codename \'{}\' is newer than the minion\'s '
|
||||
'version.'.format(name))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def is_older(name):
|
||||
'''
|
||||
Returns a boolean if the named version is older that the minion's current
|
||||
Salt version.
|
||||
|
||||
name
|
||||
The release codename to check the version against.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' salt_version.is_newer 'Sodium'
|
||||
'''
|
||||
if _check_release_cmp(name) == -1:
|
||||
log.info('Release codename \'{}\' is older than the minion\'s '
|
||||
'version.'.format(name))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _check_release_cmp(name):
|
||||
'''
|
||||
Helper function to compare release codename versions to the minion's current
|
||||
Salt version.
|
||||
|
||||
If release codename isn't found, the function returns None. Otherwise, it
|
||||
returns the results of the version comparison as documented by the
|
||||
``versions_cmp`` function in ``salt.utils.versions.py``.
|
||||
'''
|
||||
map_version = get_release_number(name)
|
||||
if map_version is None:
|
||||
log.info('Release codename {} was not found.'.format(name))
|
||||
return None
|
||||
|
||||
current_version = six.text_type(salt.version.SaltStackVersion(
|
||||
*salt.version.__version_info__))
|
||||
current_version = current_version.rsplit('.', 1)[0]
|
||||
version_cmp = salt.utils.versions.version_cmp(map_version, current_version)
|
||||
|
||||
return version_cmp
|
@ -570,7 +570,7 @@ class SaltCheck(object):
|
||||
else:
|
||||
assertion = test_dict['assertion']
|
||||
expected_return = test_dict.get('expected-return', None)
|
||||
assert_print_result = test_dict.get('print_result', None)
|
||||
assert_print_result = test_dict.get('print_result', True)
|
||||
actual_return = self._call_salt_command(mod_and_func, args, kwargs, assertion_section)
|
||||
if assertion not in ["assertIn", "assertNotIn", "assertEmpty", "assertNotEmpty",
|
||||
"assertTrue", "assertFalse"]:
|
||||
|
@ -1100,13 +1100,15 @@ class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
|
||||
'''
|
||||
Disbatch runner client commands
|
||||
'''
|
||||
full_return = chunk.pop('full_return', False)
|
||||
pub_data = self.saltclients['runner'](chunk)
|
||||
tag = pub_data['tag'] + '/ret'
|
||||
try:
|
||||
event = yield self.application.event_listener.get_event(self, tag=tag)
|
||||
|
||||
# only return the return data
|
||||
raise tornado.gen.Return(event['data']['return'])
|
||||
ret = event if full_return else event['data']['return']
|
||||
raise tornado.gen.Return(ret)
|
||||
except TimeoutException:
|
||||
raise tornado.gen.Return('Timeout waiting for runner to execute')
|
||||
|
||||
|
@ -518,3 +518,17 @@ def salt_extend(extension, name, description, salt_dir, merge):
|
||||
description=description,
|
||||
salt_dir=salt_dir,
|
||||
merge=merge)
|
||||
|
||||
|
||||
def salt_support():
|
||||
'''
|
||||
Run Salt Support that collects system data, logs etc for debug and support purposes.
|
||||
:return:
|
||||
'''
|
||||
|
||||
import salt.cli.support.collector
|
||||
if '' in sys.path:
|
||||
sys.path.remove('')
|
||||
client = salt.cli.support.collector.SaltSupport()
|
||||
_install_signal_handlers(client)
|
||||
client.run()
|
||||
|
@ -1431,25 +1431,19 @@ def extracted(name,
|
||||
dir_result = __states__['file.directory'](full_path,
|
||||
user=user,
|
||||
group=group,
|
||||
recurse=recurse,
|
||||
test=__opts__['test'])
|
||||
recurse=recurse)
|
||||
log.debug('file.directory: %s', dir_result)
|
||||
|
||||
if __opts__['test']:
|
||||
if dir_result.get('pchanges'):
|
||||
ret['changes']['updated ownership'] = True
|
||||
else:
|
||||
try:
|
||||
if dir_result['result']:
|
||||
if dir_result['changes']:
|
||||
ret['changes']['updated ownership'] = True
|
||||
else:
|
||||
enforce_failed.append(full_path)
|
||||
except (KeyError, TypeError):
|
||||
log.warning(
|
||||
'Bad state return %s for file.directory state on %s',
|
||||
dir_result, dirname
|
||||
)
|
||||
if dir_result.get('changes'):
|
||||
ret['changes']['updated ownership'] = True
|
||||
try:
|
||||
if not dir_result['result']:
|
||||
enforce_failed.append(full_path)
|
||||
except (KeyError, TypeError):
|
||||
log.warning(
|
||||
'Bad state return %s for file.directory state on %s',
|
||||
dir_result, dirname
|
||||
)
|
||||
|
||||
for filename in enforce_files + enforce_links:
|
||||
full_path = os.path.join(name, filename)
|
||||
|
@ -135,7 +135,7 @@ def present(
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Distribution {0} set for creation.'.format(name)
|
||||
ret['pchanges'] = {'old': None, 'new': name}
|
||||
ret['changes'] = {'old': None, 'new': name}
|
||||
return ret
|
||||
|
||||
res = __salt__['boto_cloudfront.create_distribution'](
|
||||
@ -203,7 +203,7 @@ def present(
|
||||
'Distribution {0} set for new config:'.format(name),
|
||||
changes_diff,
|
||||
])
|
||||
ret['pchanges'] = {'diff': changes_diff}
|
||||
ret['changes'] = {'diff': changes_diff}
|
||||
return ret
|
||||
|
||||
res = __salt__['boto_cloudfront.update_distribution'](
|
||||
|
@ -282,7 +282,7 @@ def object_present(
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'S3 object {0} set to be {1}d.'.format(name, action)
|
||||
ret['comment'] += '\nChanges:\n{0}'.format(changes_diff)
|
||||
ret['pchanges'] = {'diff': changes_diff}
|
||||
ret['changes'] = {'diff': changes_diff}
|
||||
return ret
|
||||
|
||||
r = __salt__['boto_s3.upload_file'](
|
||||
|
@ -136,7 +136,7 @@ def present(
|
||||
ret['comment'].append(
|
||||
'SQS queue {0} is set to be created.'.format(name),
|
||||
)
|
||||
ret['pchanges'] = {'old': None, 'new': name}
|
||||
ret['changes'] = {'old': None, 'new': name}
|
||||
return ret
|
||||
|
||||
r = __salt__['boto_sqs.create'](
|
||||
@ -225,7 +225,7 @@ def present(
|
||||
attributes_diff,
|
||||
)
|
||||
)
|
||||
ret['pchanges'] = {'attributes': {'diff': attributes_diff}}
|
||||
ret['changes'] = {'attributes': {'diff': attributes_diff}}
|
||||
return ret
|
||||
|
||||
r = __salt__['boto_sqs.set_attributes'](
|
||||
@ -300,7 +300,7 @@ def absent(
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'SQS queue {0} is set to be removed.'.format(name)
|
||||
ret['pchanges'] = {'old': name, 'new': None}
|
||||
ret['changes'] = {'old': name, 'new': None}
|
||||
return ret
|
||||
|
||||
r = __salt__['boto_sqs.delete'](
|
||||
|
@ -336,7 +336,6 @@ def upgraded(name,
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': ''}
|
||||
|
||||
# Get list of currently installed packages
|
||||
@ -346,12 +345,10 @@ def upgraded(name,
|
||||
# Package not installed
|
||||
if name.lower() not in [package.lower() for package in pre_install.keys()]:
|
||||
if version:
|
||||
ret['pchanges'] = {
|
||||
name: 'Version {0} will be installed'.format(version)
|
||||
}
|
||||
ret['changes'][name] = 'Version {0} will be installed'.format(version)
|
||||
ret['comment'] = 'Install version {0}'.format(version)
|
||||
else:
|
||||
ret['pchanges'] = {name: 'Latest version will be installed'}
|
||||
ret['changes'][name] = 'Latest version will be installed'
|
||||
ret['comment'] = 'Install latest version'
|
||||
|
||||
# Package installed
|
||||
@ -378,8 +375,7 @@ def upgraded(name,
|
||||
oper="==",
|
||||
ver2=version):
|
||||
if force:
|
||||
ret['pchanges'] = {
|
||||
name: 'Version {0} will be reinstalled'.format(version)}
|
||||
ret['changes'][name] = 'Version {0} will be reinstalled'.format(version)
|
||||
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
|
||||
else:
|
||||
ret['comment'] = '{0} {1} is already installed'.format(
|
||||
@ -389,11 +385,9 @@ def upgraded(name,
|
||||
# If installed version is older than new version
|
||||
if salt.utils.versions.compare(
|
||||
ver1=installed_version, oper="<", ver2=version):
|
||||
ret['pchanges'] = {
|
||||
name: 'Version {0} will be upgraded to Version {1}'.format(
|
||||
installed_version, version
|
||||
)
|
||||
}
|
||||
ret['changes'][name] = 'Version {0} will be upgraded to Version {1}'.format(
|
||||
installed_version, version
|
||||
)
|
||||
ret['comment'] = 'Upgrade {0} {1} to {2}'.format(
|
||||
full_name, installed_version, version
|
||||
)
|
||||
@ -409,13 +403,13 @@ def upgraded(name,
|
||||
else:
|
||||
ret['comment'] = 'No version found to install'
|
||||
|
||||
# Return if `test=True`
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
# Return if there are no changes to be made
|
||||
if not ret['changes']:
|
||||
return ret
|
||||
|
||||
# Return if there are no changes to be made
|
||||
if not ret['pchanges']:
|
||||
# Return if running in test mode
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
# Install the package
|
||||
@ -439,6 +433,9 @@ def upgraded(name,
|
||||
# Get list of installed packages after 'chocolatey.install'
|
||||
post_install = __salt__['chocolatey.list'](local_only=True)
|
||||
|
||||
# Prior to this, ret['changes'] would have contained expected changes,
|
||||
# replace them with the actual changes now that we have completed the
|
||||
# installation.
|
||||
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
|
||||
|
||||
return ret
|
||||
|
@ -401,13 +401,11 @@ def dvs_configured(name, dvs):
|
||||
''.format(dvs_name, datacenter_name)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
ret.update({
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes,
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
return ret
|
||||
|
||||
|
||||
@ -512,8 +510,10 @@ def portgroups_configured(name, dvs, portgroups):
|
||||
log.info('Running state {0} on DVS \'{1}\', datacenter '
|
||||
'\'{2}\''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': None}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
@ -623,13 +623,11 @@ def portgroups_configured(name, dvs, portgroups):
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
ret.update({
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes,
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
return ret
|
||||
|
||||
|
||||
@ -649,8 +647,10 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup):
|
||||
log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\''
|
||||
''.format(name, dvs, datacenter))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': None}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
@ -708,11 +708,9 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup):
|
||||
'Nothing to be done.'.format(dvs, datacenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
ret.update({
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes,
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
return ret
|
||||
|
@ -89,11 +89,11 @@ def datacenter_configured(name):
|
||||
dc_name = name
|
||||
log.info('Running datacenter_configured for datacenter \'{0}\''
|
||||
''.format(dc_name))
|
||||
ret = {'name': name, 'changes': {}, 'pchanges': {},
|
||||
'result': None, 'comment': 'Default'}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': 'Default'}
|
||||
comments = []
|
||||
changes = {}
|
||||
pchanges = {}
|
||||
si = None
|
||||
try:
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
@ -103,27 +103,19 @@ def datacenter_configured(name):
|
||||
if __opts__['test']:
|
||||
comments.append('State will create '
|
||||
'datacenter \'{0}\'.'.format(dc_name))
|
||||
log.info(comments[-1])
|
||||
pchanges.update({'new': {'name': dc_name}})
|
||||
else:
|
||||
log.debug('Creating datacenter \'{0}\'. '.format(dc_name))
|
||||
__salt__['vsphere.create_datacenter'](dc_name, si)
|
||||
comments.append('Created datacenter \'{0}\'.'.format(dc_name))
|
||||
log.info(comments[-1])
|
||||
changes.update({'new': {'name': dc_name}})
|
||||
log.info(comments[-1])
|
||||
ret['changes'].update({'new': {'name': dc_name}})
|
||||
else:
|
||||
comments.append('Datacenter \'{0}\' already exists. Nothing to be '
|
||||
'done.'.format(dc_name))
|
||||
log.info(comments[-1])
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
if __opts__['test'] and pchanges:
|
||||
ret_status = None
|
||||
else:
|
||||
ret_status = True
|
||||
ret.update({'result': ret_status,
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes,
|
||||
'pchanges': pchanges})
|
||||
ret['comment'] = '\n'.join(comments)
|
||||
ret['result'] = None if __opts__['test'] and ret['changes'] else True
|
||||
return ret
|
||||
except salt.exceptions.CommandExecutionError as exc:
|
||||
log.error('Error: {}'.format(exc))
|
||||
|
@ -1070,8 +1070,10 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
|
||||
else proxy_details['esxi_host']
|
||||
log.info('Running state {0} for host \'{1}\''.format(name, hostname))
|
||||
# Variable used to return the result of the invocation
|
||||
ret = {'name': name, 'result': None, 'changes': {},
|
||||
'pchanges': {}, 'comments': None}
|
||||
ret = {'name': name,
|
||||
'result': None,
|
||||
'changes': {},
|
||||
'comments': None}
|
||||
# Signals if errors have been encountered
|
||||
errors = False
|
||||
# Signals if changes are required
|
||||
@ -1294,12 +1296,8 @@ def diskgroups_configured(name, diskgroups, erase_disks=False):
|
||||
None if __opts__['test'] else # running in test mode
|
||||
False if errors else True) # found errors; defaults to True
|
||||
ret.update({'result': result,
|
||||
'comment': '\n'.join(comments)})
|
||||
if changes:
|
||||
if __opts__['test']:
|
||||
ret['pchanges'] = diskgroup_changes
|
||||
elif changes:
|
||||
ret['changes'] = diskgroup_changes
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': diskgroup_changes})
|
||||
return ret
|
||||
|
||||
|
||||
@ -1387,8 +1385,10 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
|
||||
else proxy_details['esxi_host']
|
||||
log.trace('hostname = %s', hostname)
|
||||
log.info('Running host_cache_swap_configured for host \'%s\'', hostname)
|
||||
ret = {'name': hostname, 'comment': 'Default comments',
|
||||
'result': None, 'changes': {}, 'pchanges': {}}
|
||||
ret = {'name': hostname,
|
||||
'comment': 'Default comments',
|
||||
'result': None,
|
||||
'changes': {}}
|
||||
result = None if __opts__['test'] else True # We assume success
|
||||
needs_setting = False
|
||||
comments = []
|
||||
@ -1582,11 +1582,8 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%',
|
||||
__salt__['vsphere.disconnect'](si)
|
||||
log.info(comments[-1])
|
||||
ret.update({'comment': '\n'.join(comments),
|
||||
'result': result})
|
||||
if __opts__['test']:
|
||||
ret['pchanges'] = changes
|
||||
else:
|
||||
ret['changes'] = changes
|
||||
'result': result,
|
||||
'changes': changes})
|
||||
return ret
|
||||
except CommandExecutionError as err:
|
||||
log.error('Error: %s.', err)
|
||||
|
@ -950,16 +950,25 @@ def _check_touch(name, atime, mtime):
|
||||
'''
|
||||
Check to see if a file needs to be updated or created
|
||||
'''
|
||||
ret = {
|
||||
'result': None,
|
||||
'comment': '',
|
||||
'changes': {'new': name},
|
||||
}
|
||||
if not os.path.exists(name):
|
||||
return None, 'File {0} is set to be created'.format(name)
|
||||
stats = __salt__['file.stats'](name, follow_symlinks=False)
|
||||
if atime is not None:
|
||||
if six.text_type(atime) != six.text_type(stats['atime']):
|
||||
return None, 'Times set to be updated on file {0}'.format(name)
|
||||
if mtime is not None:
|
||||
if six.text_type(mtime) != six.text_type(stats['mtime']):
|
||||
return None, 'Times set to be updated on file {0}'.format(name)
|
||||
return True, 'File {0} exists and has the correct times'.format(name)
|
||||
ret['comment'] = 'File {0} is set to be created'.format(name)
|
||||
else:
|
||||
stats = __salt__['file.stats'](name, follow_symlinks=False)
|
||||
if ((atime is not None
|
||||
and six.text_type(atime) != six.text_type(stats['atime'])) or
|
||||
(mtime is not None
|
||||
and six.text_type(mtime) != six.text_type(stats['mtime']))):
|
||||
ret['comment'] = 'Times set to be updated on file {0}'.format(name)
|
||||
ret['changes'] = {'touched': name}
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'File {0} exists and has the correct times'.format(name)
|
||||
return ret
|
||||
|
||||
|
||||
def _get_symlink_ownership(path):
|
||||
@ -1006,36 +1015,36 @@ def _symlink_check(name, target, force, user, group, win_owner):
|
||||
'''
|
||||
Check the symlink function
|
||||
'''
|
||||
pchanges = {}
|
||||
changes = {}
|
||||
if not os.path.exists(name) and not __salt__['file.is_link'](name):
|
||||
pchanges['new'] = name
|
||||
changes['new'] = name
|
||||
return None, 'Symlink {0} to {1} is set for creation'.format(
|
||||
name, target
|
||||
), pchanges
|
||||
), changes
|
||||
if __salt__['file.is_link'](name):
|
||||
if __salt__['file.readlink'](name) != target:
|
||||
pchanges['change'] = name
|
||||
changes['change'] = name
|
||||
return None, 'Link {0} target is set to be changed to {1}'.format(
|
||||
name, target
|
||||
), pchanges
|
||||
), changes
|
||||
else:
|
||||
result = True
|
||||
msg = 'The symlink {0} is present'.format(name)
|
||||
if not _check_symlink_ownership(name, user, group, win_owner):
|
||||
result = None
|
||||
pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))
|
||||
changes['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))
|
||||
msg += (
|
||||
', but the ownership of the symlink would be changed '
|
||||
'from {2}:{3} to {0}:{1}'
|
||||
).format(user, group, *_get_symlink_ownership(name))
|
||||
return result, msg, pchanges
|
||||
return result, msg, changes
|
||||
else:
|
||||
if force:
|
||||
return None, ('The file or directory {0} is set for removal to '
|
||||
'make way for a new symlink targeting {1}'
|
||||
.format(name, target)), pchanges
|
||||
.format(name, target)), changes
|
||||
return False, ('File or directory exists where the symlink {0} '
|
||||
'should be. Did you mean to use force?'.format(name)), pchanges
|
||||
'should be. Did you mean to use force?'.format(name)), changes
|
||||
|
||||
|
||||
def _test_owner(kwargs, user=None):
|
||||
@ -1197,12 +1206,12 @@ def _shortcut_check(name,
|
||||
'''
|
||||
Check the shortcut function
|
||||
'''
|
||||
pchanges = {}
|
||||
changes = {}
|
||||
if not os.path.exists(name):
|
||||
pchanges['new'] = name
|
||||
changes['new'] = name
|
||||
return None, 'Shortcut "{0}" to "{1}" is set for creation'.format(
|
||||
name, target
|
||||
), pchanges
|
||||
), changes
|
||||
|
||||
if os.path.isfile(name):
|
||||
shell = win32com.client.Dispatch("WScript.Shell")
|
||||
@ -1222,28 +1231,28 @@ def _shortcut_check(name,
|
||||
)
|
||||
|
||||
if not all(state_checks):
|
||||
pchanges['change'] = name
|
||||
changes['change'] = name
|
||||
return None, 'Shortcut "{0}" target is set to be changed to "{1}"'.format(
|
||||
name, target
|
||||
), pchanges
|
||||
), changes
|
||||
else:
|
||||
result = True
|
||||
msg = 'The shortcut "{0}" is present'.format(name)
|
||||
if not _check_shortcut_ownership(name, user):
|
||||
result = None
|
||||
pchanges['ownership'] = '{0}'.format(_get_shortcut_ownership(name))
|
||||
changes['ownership'] = '{0}'.format(_get_shortcut_ownership(name))
|
||||
msg += (
|
||||
', but the ownership of the shortcut would be changed '
|
||||
'from {1} to {0}'
|
||||
).format(user, _get_shortcut_ownership(name))
|
||||
return result, msg, pchanges
|
||||
return result, msg, changes
|
||||
else:
|
||||
if force:
|
||||
return None, ('The link or directory "{0}" is set for removal to '
|
||||
'make way for a new shortcut targeting "{1}"'
|
||||
.format(name, target)), pchanges
|
||||
.format(name, target)), changes
|
||||
return False, ('Link or directory exists where the shortcut "{0}" '
|
||||
'should be. Did you mean to use force?'.format(name)), pchanges
|
||||
'should be. Did you mean to use force?'.format(name)), changes
|
||||
|
||||
|
||||
def _makedirs(name,
|
||||
@ -1473,12 +1482,12 @@ def symlink(
|
||||
msg += '.'
|
||||
return _error(ret, msg)
|
||||
|
||||
presult, pcomment, ret['pchanges'] = _symlink_check(name,
|
||||
target,
|
||||
force,
|
||||
user,
|
||||
group,
|
||||
win_owner)
|
||||
presult, pcomment, pchanges = _symlink_check(name,
|
||||
target,
|
||||
force,
|
||||
user,
|
||||
group,
|
||||
win_owner)
|
||||
|
||||
if not os.path.isdir(os.path.dirname(name)):
|
||||
if makedirs:
|
||||
@ -1511,6 +1520,7 @@ def symlink(
|
||||
if __opts__['test']:
|
||||
ret['result'] = presult
|
||||
ret['comment'] = pcomment
|
||||
ret['changes'] = pchanges
|
||||
return ret
|
||||
|
||||
if __salt__['file.is_link'](name):
|
||||
@ -1633,7 +1643,6 @@ def absent(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -1645,9 +1654,9 @@ def absent(name,
|
||||
if name == '/':
|
||||
return _error(ret, 'Refusing to make "/" absent')
|
||||
if os.path.isfile(name) or os.path.islink(name):
|
||||
ret['pchanges']['removed'] = name
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes']['removed'] = name
|
||||
ret['comment'] = 'File {0} is set for removal'.format(name)
|
||||
return ret
|
||||
try:
|
||||
@ -1662,9 +1671,9 @@ def absent(name,
|
||||
return _error(ret, '{0}'.format(exc))
|
||||
|
||||
elif os.path.isdir(name):
|
||||
ret['pchanges']['removed'] = name
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes']['removed'] = name
|
||||
ret['comment'] = 'Directory {0} is set for removal'.format(name)
|
||||
return ret
|
||||
try:
|
||||
@ -1726,7 +1735,6 @@ def tidied(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
@ -1823,7 +1831,6 @@ def exists(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -1848,7 +1855,6 @@ def missing(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -2457,7 +2463,6 @@ def managed(name,
|
||||
name = os.path.expanduser(name)
|
||||
|
||||
ret = {'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': True}
|
||||
@ -2700,7 +2705,7 @@ def managed(name,
|
||||
try:
|
||||
if __opts__['test']:
|
||||
if 'file.check_managed_changes' in __salt__:
|
||||
ret['pchanges'] = __salt__['file.check_managed_changes'](
|
||||
ret['changes'] = __salt__['file.check_managed_changes'](
|
||||
name,
|
||||
source,
|
||||
source_hash,
|
||||
@ -2731,15 +2736,15 @@ def managed(name,
|
||||
reset=win_perms_reset)
|
||||
except CommandExecutionError as exc:
|
||||
if exc.strerror.startswith('Path not found'):
|
||||
ret['pchanges'] = '{0} will be created'.format(name)
|
||||
ret['changes'] = '{0} will be created'.format(name)
|
||||
|
||||
if isinstance(ret['pchanges'], tuple):
|
||||
ret['result'], ret['comment'] = ret['pchanges']
|
||||
elif ret['pchanges']:
|
||||
if isinstance(ret['changes'], tuple):
|
||||
ret['result'], ret['comment'] = ret['changes']
|
||||
elif ret['changes']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'The file {0} is set to be changed'.format(name)
|
||||
if 'diff' in ret['pchanges'] and not show_changes:
|
||||
ret['pchanges']['diff'] = '<show_changes=False>'
|
||||
if 'diff' in ret['changes'] and not show_changes:
|
||||
ret['changes']['diff'] = '<show_changes=False>'
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'The file {0} is in the correct state'.format(name)
|
||||
@ -3181,7 +3186,6 @@ def directory(name,
|
||||
name = os.path.normcase(os.path.expanduser(name))
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -3255,19 +3259,19 @@ def directory(name,
|
||||
# Remove whatever is in the way
|
||||
if os.path.isfile(name):
|
||||
if __opts__['test']:
|
||||
ret['pchanges']['forced'] = 'File was forcibly replaced'
|
||||
ret['changes']['forced'] = 'File would be forcibly replaced'
|
||||
else:
|
||||
os.remove(name)
|
||||
ret['changes']['forced'] = 'File was forcibly replaced'
|
||||
elif __salt__['file.is_link'](name):
|
||||
if __opts__['test']:
|
||||
ret['pchanges']['forced'] = 'Symlink was forcibly replaced'
|
||||
ret['changes']['forced'] = 'Symlink would be forcibly replaced'
|
||||
else:
|
||||
__salt__['file.remove'](name)
|
||||
ret['changes']['forced'] = 'Symlink was forcibly replaced'
|
||||
else:
|
||||
if __opts__['test']:
|
||||
ret['pchanges']['forced'] = 'Directory was forcibly replaced'
|
||||
ret['changes']['forced'] = 'Directory would be forcibly replaced'
|
||||
else:
|
||||
__salt__['file.remove'](name)
|
||||
ret['changes']['forced'] = 'Directory was forcibly replaced'
|
||||
@ -3296,11 +3300,11 @@ def directory(name,
|
||||
exclude_pat, max_depth, follow_symlinks)
|
||||
|
||||
if pchanges:
|
||||
ret['pchanges'].update(pchanges)
|
||||
ret['changes'].update(pchanges)
|
||||
|
||||
# Don't run through the reset of the function if there are no changes to be
|
||||
# made
|
||||
if not ret['pchanges'] or __opts__['test']:
|
||||
if __opts__['test'] or not ret['changes']:
|
||||
ret['result'] = presult
|
||||
ret['comment'] = pcomment
|
||||
return ret
|
||||
@ -3415,7 +3419,7 @@ def directory(name,
|
||||
dir_mode = None
|
||||
|
||||
if 'silent' in recurse_set:
|
||||
ret['pchanges'] = 'Changes silenced'
|
||||
ret['changes'] = 'Changes silenced'
|
||||
|
||||
check_files = 'ignore_files' not in recurse_set
|
||||
check_dirs = 'ignore_dirs' not in recurse_set
|
||||
@ -3743,7 +3747,6 @@ def recurse(name,
|
||||
ret = {
|
||||
'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': {} # { path: [comment, ...] }
|
||||
}
|
||||
@ -4042,7 +4045,6 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None):
|
||||
name = os.path.expanduser(name)
|
||||
ret = {'name': name,
|
||||
'changes': {'retained': [], 'deleted': [], 'ignored': []},
|
||||
'pchanges': {'retained': [], 'deleted': [], 'ignored': []},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -4152,7 +4154,7 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None):
|
||||
'deleted': deletable_files,
|
||||
'ignored': sorted(list(ignored_files), reverse=True),
|
||||
}
|
||||
ret['pchanges'] = changes
|
||||
ret['changes'] = changes
|
||||
|
||||
# TODO: track and report how much space was / would be reclaimed
|
||||
if __opts__['test']:
|
||||
@ -4293,7 +4295,6 @@ def line(name, content=None, match=None, mode=None, location=None,
|
||||
name = os.path.expanduser(name)
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -4327,14 +4328,13 @@ def line(name, content=None, match=None, mode=None, location=None,
|
||||
before=before, after=after, show_changes=show_changes,
|
||||
backup=backup, quiet=quiet, indent=indent)
|
||||
if changes:
|
||||
ret['pchanges']['diff'] = changes
|
||||
ret['changes']['diff'] = changes
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes)
|
||||
ret['comment'] = 'Changes would be made'
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Changes were made'
|
||||
ret['changes'] = {'diff': changes}
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'No changes needed to be made'
|
||||
@ -4484,7 +4484,6 @@ def replace(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -4514,14 +4513,13 @@ def replace(name,
|
||||
backslash_literal=backslash_literal)
|
||||
|
||||
if changes:
|
||||
ret['pchanges']['diff'] = changes
|
||||
ret['changes']['diff'] = changes
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
|
||||
ret['comment'] = 'Changes would have been made'
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Changes were made'
|
||||
ret['changes'] = {'diff': changes}
|
||||
else:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'No changes needed to be made'
|
||||
@ -4757,7 +4755,6 @@ def blockreplace(
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -4832,13 +4829,11 @@ def blockreplace(
|
||||
return ret
|
||||
|
||||
if changes:
|
||||
ret['pchanges'] = {'diff': changes}
|
||||
ret['changes']['diff'] = changes
|
||||
if __opts__['test']:
|
||||
ret['changes']['diff'] = ret['pchanges']['diff']
|
||||
ret['result'] = None
|
||||
ret['comment'] = 'Changes would be made'
|
||||
else:
|
||||
ret['changes']['diff'] = ret['pchanges']['diff']
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Changes were made'
|
||||
else:
|
||||
@ -4889,7 +4884,6 @@ def comment(name, regex, char='#', backup='.bak'):
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -4919,8 +4913,8 @@ def comment(name, regex, char='#', backup='.bak'):
|
||||
else:
|
||||
return _error(ret, '{0}: Pattern not found'.format(unanchor_regex))
|
||||
|
||||
ret['pchanges'][name] = 'updated'
|
||||
if __opts__['test']:
|
||||
ret['changes'][name] = 'updated'
|
||||
ret['comment'] = 'File {0} is set to be updated'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
@ -4999,7 +4993,6 @@ def uncomment(name, regex, char='#', backup='.bak'):
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -5026,26 +5019,20 @@ def uncomment(name, regex, char='#', backup='.bak'):
|
||||
else:
|
||||
return _error(ret, '{0}: Pattern not found'.format(regex))
|
||||
|
||||
ret['pchanges'][name] = 'updated'
|
||||
if __opts__['test']:
|
||||
ret['changes'][name] = 'updated'
|
||||
ret['comment'] = 'File {0} is set to be updated'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
with salt.utils.files.fopen(name, 'rb') as fp_:
|
||||
slines = fp_.read()
|
||||
if six.PY3:
|
||||
slines = slines.decode(__salt_system_encoding__)
|
||||
slines = slines.splitlines(True)
|
||||
slines = salt.utils.data.decode(fp_.readlines())
|
||||
|
||||
# Perform the edit
|
||||
__salt__['file.comment_line'](name, regex, char, False, backup)
|
||||
|
||||
with salt.utils.files.fopen(name, 'rb') as fp_:
|
||||
nlines = fp_.read()
|
||||
if six.PY3:
|
||||
nlines = nlines.decode(__salt_system_encoding__)
|
||||
nlines = nlines.splitlines(True)
|
||||
nlines = salt.utils.data.decode(fp_.readlines())
|
||||
|
||||
# Check the result
|
||||
ret['result'] = __salt__['file.search'](
|
||||
@ -5209,10 +5196,9 @@ def append(name,
|
||||
.. versionadded:: 0.9.5
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
|
||||
if not name:
|
||||
return _error(ret, 'Must provide name to file.append')
|
||||
@ -5243,18 +5229,20 @@ def append(name,
|
||||
except CommandExecutionError as exc:
|
||||
return _error(ret, 'Drive {0} is not mapped'.format(exc.message))
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname)
|
||||
else:
|
||||
check_res, check_msg, ret['pchanges'] = _check_directory(dirname)
|
||||
check_res, check_msg, check_changes = _check_directory_win(dirname) \
|
||||
if salt.utils.platform.is_windows() \
|
||||
else _check_directory(dirname)
|
||||
|
||||
if not check_res:
|
||||
ret['changes'] = check_changes
|
||||
return _error(ret, check_msg)
|
||||
|
||||
check_res, check_msg = _check_file(name)
|
||||
if not check_res:
|
||||
# Try to create the file
|
||||
touch(name, makedirs=makedirs)
|
||||
touch_ret = touch(name, makedirs=makedirs)
|
||||
if __opts__['test']:
|
||||
return touch_ret
|
||||
retry_res, retry_msg = _check_file(name)
|
||||
if not retry_res:
|
||||
return _error(ret, check_msg)
|
||||
@ -5495,7 +5483,6 @@ def prepend(name,
|
||||
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'result': False,
|
||||
'comment': ''}
|
||||
if not name:
|
||||
@ -5525,17 +5512,20 @@ def prepend(name,
|
||||
except CommandExecutionError as exc:
|
||||
return _error(ret, 'Drive {0} is not mapped'.format(exc.message))
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname)
|
||||
else:
|
||||
check_res, check_msg, ret['pchanges'] = _check_directory(dirname)
|
||||
check_res, check_msg, check_changes = _check_directory_win(dirname) \
|
||||
if salt.utils.platform.is_windows() \
|
||||
else _check_directory(dirname)
|
||||
|
||||
if not check_res:
|
||||
ret['changes'] = check_changes
|
||||
return _error(ret, check_msg)
|
||||
|
||||
check_res, check_msg = _check_file(name)
|
||||
if not check_res:
|
||||
# Try to create the file
|
||||
touch(name, makedirs=makedirs)
|
||||
touch_ret = touch(name, makedirs=makedirs)
|
||||
if __opts__['test']:
|
||||
return touch_ret
|
||||
retry_res, retry_msg = _check_file(name)
|
||||
if not retry_res:
|
||||
return _error(ret, check_msg)
|
||||
@ -6116,7 +6106,7 @@ def touch(name, atime=None, mtime=None, makedirs=False):
|
||||
)
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'], ret['comment'] = _check_touch(name, atime, mtime)
|
||||
ret.update(_check_touch(name, atime, mtime))
|
||||
return ret
|
||||
|
||||
if makedirs:
|
||||
@ -6394,7 +6384,6 @@ def rename(name, source, force=False, makedirs=False):
|
||||
if not force:
|
||||
ret['comment'] = ('The target file "{0}" exists and will not be '
|
||||
'overwritten'.format(name))
|
||||
ret['result'] = False
|
||||
return ret
|
||||
elif not __opts__['test']:
|
||||
# Remove the destination to prevent problems later
|
||||
@ -7386,17 +7375,18 @@ def shortcut(
|
||||
msg += '.'
|
||||
return _error(ret, msg)
|
||||
|
||||
presult, pcomment, ret['pchanges'] = _shortcut_check(name,
|
||||
target,
|
||||
arguments,
|
||||
working_dir,
|
||||
description,
|
||||
icon_location,
|
||||
force,
|
||||
user)
|
||||
presult, pcomment, pchanges = _shortcut_check(name,
|
||||
target,
|
||||
arguments,
|
||||
working_dir,
|
||||
description,
|
||||
icon_location,
|
||||
force,
|
||||
user)
|
||||
if __opts__['test']:
|
||||
ret['result'] = presult
|
||||
ret['comment'] = pcomment
|
||||
ret['changes'] = pchanges
|
||||
return ret
|
||||
|
||||
if not os.path.isdir(os.path.dirname(name)):
|
||||
|
@ -52,15 +52,16 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['glanceng.setup_clouds'](auth)
|
||||
|
||||
image = __salt__['glanceng.image_get'](name=name)
|
||||
|
||||
if not image:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Image {} will be created.'.format(name)
|
||||
return ret
|
||||
|
||||
@ -91,10 +92,9 @@ def absent(name, auth=None):
|
||||
image = __salt__['glanceng.image_get'](name=name)
|
||||
|
||||
if image:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'name': name}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Image {} will be deleted.'.format(name)
|
||||
return ret
|
||||
|
||||
|
@ -83,8 +83,11 @@ def _changes(name,
|
||||
ret['comment'] = 'Invalid gid'
|
||||
return ret
|
||||
|
||||
if members:
|
||||
# -- if new member list if different than the current
|
||||
if members is not None and not members:
|
||||
if set(lgrp['members']).symmetric_difference(members):
|
||||
change['delusers'] = set(lgrp['members'])
|
||||
elif members:
|
||||
# if new member list if different than the current
|
||||
if set(lgrp['members']).symmetric_difference(members):
|
||||
change['members'] = members
|
||||
|
||||
@ -165,7 +168,7 @@ def present(name,
|
||||
'result': True,
|
||||
'comment': 'Group {0} is present and up to date'.format(name)}
|
||||
|
||||
if members and (addusers or delusers):
|
||||
if members is not None and (addusers is not None or delusers is not None):
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'Error: Conflicting options "members" with "addusers" and/or'
|
||||
|
@ -144,8 +144,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum
|
||||
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {}
|
||||
ret['pchanges'] = {'kernel': {
|
||||
ret['changes'] = {'kernel': {
|
||||
'old': active,
|
||||
'new': latest
|
||||
}}
|
||||
|
@ -56,15 +56,16 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
domain = __salt__['keystoneng.domain_get'](name=name)
|
||||
|
||||
if not domain:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Domain {} will be created.'.format(name)
|
||||
return ret
|
||||
|
||||
@ -76,10 +77,9 @@ def present(name, auth=None, **kwargs):
|
||||
|
||||
changes = __salt__['keystoneng.compare_changes'](domain, **kwargs)
|
||||
if changes:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Domain {} will be updated.'.format(name)
|
||||
return ret
|
||||
|
||||
@ -111,7 +111,6 @@ def absent(name, auth=None):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'name': name}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Domain {} will be deleted.'.format(name)
|
||||
return ret
|
||||
|
||||
|
@ -101,6 +101,8 @@ def present(name, service_name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
success, val = _, endpoint = _common(ret, name, service_name, kwargs)
|
||||
@ -111,7 +113,6 @@ def present(name, service_name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Endpoint will be created.'
|
||||
return ret
|
||||
|
||||
@ -131,7 +132,6 @@ def present(name, service_name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Endpoint will be updated.'
|
||||
return ret
|
||||
|
||||
@ -174,7 +174,6 @@ def absent(name, service_name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': endpoint.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Endpoint will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -73,6 +73,8 @@ def present(name, auth=None, **kwargs):
|
||||
|
||||
__salt__['keystoneng.setup_cloud'](auth)
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
kwargs['name'] = name
|
||||
group = _common(kwargs)
|
||||
|
||||
@ -80,7 +82,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Group will be created.'
|
||||
return ret
|
||||
|
||||
@ -94,7 +95,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Group will be updated.'
|
||||
return ret
|
||||
|
||||
@ -120,6 +120,8 @@ def absent(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_cloud'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -129,7 +131,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': group.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Group will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be created.'
|
||||
return ret
|
||||
|
||||
@ -95,7 +96,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be updated.'
|
||||
return ret
|
||||
|
||||
@ -121,6 +121,8 @@ def absent(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -130,7 +132,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': project.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -52,6 +52,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -61,7 +63,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Role will be created.'
|
||||
return ret
|
||||
|
||||
@ -95,7 +96,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': role.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Role will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -61,6 +61,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
service = __salt__['keystoneng.service_get'](name=name)
|
||||
@ -69,7 +71,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Service will be created.'
|
||||
return ret
|
||||
|
||||
@ -84,7 +85,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Service will be updated.'
|
||||
return ret
|
||||
|
||||
@ -117,7 +117,6 @@ def absent(name, auth=None):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': service.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Service will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -83,6 +83,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -92,7 +94,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'User will be created.'
|
||||
return ret
|
||||
|
||||
@ -106,7 +107,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'User will be updated.'
|
||||
return ret
|
||||
|
||||
@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['keystoneng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -142,7 +144,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': user.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'User will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -104,7 +104,6 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False):
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': ''}
|
||||
|
||||
_octal = {'r': 4, 'w': 2, 'x': 1, '-': 0}
|
||||
@ -172,7 +171,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False):
|
||||
acl_name,
|
||||
six.text_type(user[_search_name]['octal']),
|
||||
perms),
|
||||
'result': None, 'pchanges': changes})
|
||||
'result': None, 'changes': changes})
|
||||
return ret
|
||||
try:
|
||||
if force:
|
||||
@ -195,7 +194,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False):
|
||||
if __opts__['test']:
|
||||
ret.update({'comment': 'New permissions will be applied for '
|
||||
'{0}: {1}'.format(acl_name, perms),
|
||||
'result': None, 'pchanges': changes})
|
||||
'result': None, 'changes': changes})
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
@ -337,7 +336,6 @@ def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': ''}
|
||||
|
||||
_octal = {'r': 4, 'w': 2, 'x': 1, '-': 0}
|
||||
@ -381,7 +379,6 @@ def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': 'Permissions and {}s are in the desired state'.format(acl_type)}
|
||||
return ret
|
||||
# The getfacl execution module lists default with empty names as being
|
||||
@ -425,7 +422,7 @@ def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=
|
||||
acl_names,
|
||||
six.text_type(users[search_name]['octal']),
|
||||
perms),
|
||||
'result': None, 'pchanges': changes})
|
||||
'result': None, 'changes': changes})
|
||||
return ret
|
||||
try:
|
||||
if force:
|
||||
@ -449,7 +446,7 @@ def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=
|
||||
if __opts__['test']:
|
||||
ret.update({'comment': 'New permissions will be applied for '
|
||||
'{0}: {1}'.format(acl_names, perms),
|
||||
'result': None, 'pchanges': changes})
|
||||
'result': None, 'changes': changes})
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
@ -476,7 +473,7 @@ def list_present(name, acl_type, acl_names=None, perms='', recurse=False, force=
|
||||
if __opts__['test']:
|
||||
ret.update({'comment': 'New permissions will be applied for '
|
||||
'{0}: {1}'.format(acl_names, perms),
|
||||
'result': None, 'pchanges': changes})
|
||||
'result': None, 'changes': changes})
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
|
@ -95,8 +95,6 @@ def managed(name,
|
||||
|
||||
compliance_report: ``False``
|
||||
Return the compliance report in the comment.
|
||||
The compliance report structured object can be found however
|
||||
in the ``pchanges`` field of the output (not displayed on the CLI).
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
|
@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Network will be created.'
|
||||
return ret
|
||||
|
||||
@ -115,7 +116,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be updated.'
|
||||
return ret
|
||||
|
||||
@ -140,6 +140,8 @@ def absent(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
kwargs['name'] = name
|
||||
@ -149,7 +151,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': network.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Network will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -74,6 +74,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
if 'project_name' in kwargs:
|
||||
@ -95,7 +97,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Security Group will be created.'
|
||||
return ret
|
||||
|
||||
@ -109,7 +110,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Security Group will be updated.'
|
||||
return ret
|
||||
|
||||
@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
kwargs['project_id'] = __salt__['keystoneng.project_get'](
|
||||
@ -147,7 +149,6 @@ def absent(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': secgroup.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Security group will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -77,6 +77,8 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
if 'project_name' in kwargs:
|
||||
@ -112,7 +114,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Security Group rule will be created.'
|
||||
return ret
|
||||
|
||||
@ -166,10 +167,9 @@ def absent(name, auth=None, **kwargs):
|
||||
rule_exists = True
|
||||
|
||||
if rule_exists:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': kwargs['rule_id']}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Security group rule will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -96,16 +96,17 @@ def present(name, auth=None, **kwargs):
|
||||
'result': True,
|
||||
'comment': ''}
|
||||
|
||||
kwargs = __utils__['args.clean_kwargs'](**kwargs)
|
||||
|
||||
__salt__['neutronng.setup_clouds'](auth)
|
||||
|
||||
kwargs['subnet_name'] = name
|
||||
subnet = __salt__['neutronng.subnet_get'](name=name)
|
||||
|
||||
if subnet is None:
|
||||
if __opts__['test'] is True:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
ret['changes'] = kwargs
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Subnet will be created.'
|
||||
return ret
|
||||
|
||||
@ -119,7 +120,6 @@ def present(name, auth=None, **kwargs):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = changes
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be updated.'
|
||||
return ret
|
||||
|
||||
@ -160,7 +160,6 @@ def absent(name, auth=None):
|
||||
if __opts__['test'] is True:
|
||||
ret['result'] = None
|
||||
ret['changes'] = {'id': subnet.id}
|
||||
ret['pchanges'] = ret['changes']
|
||||
ret['comment'] = 'Project will be deleted.'
|
||||
return ret
|
||||
|
||||
|
@ -156,8 +156,10 @@ def default_vsan_policy_configured(name, policy):
|
||||
'\'{1}\''.format(name, vcenter))
|
||||
log.trace('policy = {0}'.format(policy))
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': None}
|
||||
comments = []
|
||||
changes = {}
|
||||
changes_required = False
|
||||
@ -266,13 +268,11 @@ def default_vsan_policy_configured(name, policy):
|
||||
'Nothing to be done.'.format(vcenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': changes,
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': changes,
|
||||
'result': True})
|
||||
ret.update({
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': changes,
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
return ret
|
||||
|
||||
|
||||
@ -286,8 +286,10 @@ def storage_policies_configured(name, policies):
|
||||
comments = []
|
||||
changes = []
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': None}
|
||||
log.trace('policies = {0}'.format(policies))
|
||||
si = None
|
||||
try:
|
||||
@ -430,13 +432,11 @@ def storage_policies_configured(name, policies):
|
||||
'Nothing to be done.'.format(vcenter)),
|
||||
'result': True})
|
||||
else:
|
||||
ret.update({'comment': '\n'.join(comments)})
|
||||
if __opts__['test']:
|
||||
ret.update({'pchanges': {'storage_policies': changes},
|
||||
'result': None})
|
||||
else:
|
||||
ret.update({'changes': {'storage_policies': changes},
|
||||
'result': True})
|
||||
ret.update({
|
||||
'comment': '\n'.join(comments),
|
||||
'changes': {'storage_policies': changes},
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
return ret
|
||||
|
||||
|
||||
@ -454,8 +454,10 @@ def default_storage_policy_assigned(name, policy, datastore):
|
||||
''.format(name, policy, datastore))
|
||||
changes = {}
|
||||
changes_required = False
|
||||
ret = {'name': name, 'changes': {}, 'result': None, 'comment': None,
|
||||
'pchanges': {}}
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'result': None,
|
||||
'comment': None}
|
||||
si = None
|
||||
try:
|
||||
si = __salt__['vsphere.get_service_instance_via_proxy']()
|
||||
@ -488,14 +490,13 @@ def default_storage_policy_assigned(name, policy, datastore):
|
||||
ret.update({'comment': exc.strerror,
|
||||
'result': False if not __opts__['test'] else None})
|
||||
return ret
|
||||
|
||||
ret['comment'] = comment
|
||||
if changes_required:
|
||||
if __opts__['test']:
|
||||
ret.update({'result': None,
|
||||
'pchanges': changes})
|
||||
else:
|
||||
ret.update({'result': True,
|
||||
'changes': changes})
|
||||
ret.update({
|
||||
'changes': changes,
|
||||
'result': None if __opts__['test'] else True,
|
||||
})
|
||||
else:
|
||||
ret['result'] = True
|
||||
return ret
|
||||
|
@ -385,7 +385,6 @@ def present(name,
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': ''}
|
||||
|
||||
hive, key = _parse_key(name)
|
||||
|
@ -444,7 +444,8 @@ def function(
|
||||
kwarg=None,
|
||||
timeout=None,
|
||||
batch=None,
|
||||
subset=None):
|
||||
subset=None,
|
||||
**kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Execute a single module function on a remote minion via salt or salt-ssh
|
||||
|
||||
@ -495,15 +496,15 @@ def function(
|
||||
|
||||
'''
|
||||
func_ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': True}
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': True}
|
||||
if kwarg is None:
|
||||
kwarg = {}
|
||||
if isinstance(arg, six.string_types):
|
||||
func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. '
|
||||
'Modifying in place, but please update SLS file '
|
||||
'to remove this warning.']
|
||||
func_ret['warnings'] = [
|
||||
'Please specify \'arg\' as a list of arguments.'
|
||||
]
|
||||
arg = arg.split()
|
||||
|
||||
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
|
||||
@ -526,9 +527,8 @@ def function(
|
||||
|
||||
fun = name
|
||||
if __opts__['test'] is True:
|
||||
func_ret['comment'] = (
|
||||
'Function {0} will be executed on target {1} as test={2}'
|
||||
).format(fun, tgt, six.text_type(False))
|
||||
func_ret['comment'] = \
|
||||
'Function {0} would be executed on target {1}'.format(fun, tgt)
|
||||
func_ret['result'] = None
|
||||
return func_ret
|
||||
try:
|
||||
@ -768,7 +768,7 @@ def runner(name, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def parallel_runners(name, runners):
|
||||
def parallel_runners(name, runners, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Executes multiple runner modules on the master in parallel.
|
||||
|
||||
|
@ -199,8 +199,7 @@ def baseline_snapshot(name, number=None, tag=None, include_diff=True, config='ro
|
||||
filename=file).get(file, {}))
|
||||
|
||||
if __opts__['test'] and status:
|
||||
ret['pchanges'] = status
|
||||
ret['changes'] = ret['pchanges']
|
||||
ret['changes'] = status
|
||||
ret['comment'] = "{0} files changes are set to be undone".format(len(status.keys()))
|
||||
ret['result'] = None
|
||||
elif __opts__['test'] and not status:
|
||||
|
@ -34,10 +34,9 @@ def alias(name, collections, **kwargs):
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': '',
|
||||
'pchanges': {},
|
||||
}
|
||||
|
||||
if __salt__["solrcloud.alias_exists"](name, **kwargs):
|
||||
if __salt__['solrcloud.alias_exists'](name, **kwargs):
|
||||
alias_content = __salt__['solrcloud.alias_get_collections'](name, **kwargs)
|
||||
diff = set(alias_content).difference(set(collections))
|
||||
|
||||
@ -48,38 +47,31 @@ def alias(name, collections, **kwargs):
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'The alias "{0}" will be updated.'.format(name)
|
||||
ret['pchanges'] = {
|
||||
'old': ",".join(alias_content),
|
||||
'new': ",".join(collections)
|
||||
}
|
||||
ret['result'] = None
|
||||
else:
|
||||
__salt__["solrcloud.alias_set_collections"](name, collections, **kwargs)
|
||||
__salt__['solrcloud.alias_set_collections'](name, collections, **kwargs)
|
||||
ret['comment'] = 'The alias "{0}" has been updated.'.format(name)
|
||||
ret['changes'] = {
|
||||
'old': ",".join(alias_content),
|
||||
'new': ",".join(collections)
|
||||
}
|
||||
|
||||
ret['result'] = True
|
||||
|
||||
ret['changes'] = {
|
||||
'old': ','.join(alias_content),
|
||||
'new': ','.join(collections),
|
||||
}
|
||||
|
||||
else:
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'The alias "{0}" will be created.'.format(name)
|
||||
ret['pchanges'] = {
|
||||
'old': None,
|
||||
'new': ",".join(collections)
|
||||
}
|
||||
ret['result'] = None
|
||||
else:
|
||||
__salt__["solrcloud.alias_set_collections"](name, collections, **kwargs)
|
||||
__salt__['solrcloud.alias_set_collections'](name, collections, **kwargs)
|
||||
ret['comment'] = 'The alias "{0}" has been created.'.format(name)
|
||||
ret['changes'] = {
|
||||
'old': None,
|
||||
'new': ",".join(collections)
|
||||
}
|
||||
|
||||
ret['result'] = True
|
||||
|
||||
ret['changes'] = {
|
||||
'old': None,
|
||||
'new': ','.join(collections),
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@ -101,7 +93,6 @@ def collection(name, options=None, **kwargs):
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': '',
|
||||
'pchanges': {},
|
||||
}
|
||||
|
||||
if options is None:
|
||||
@ -137,42 +128,32 @@ def collection(name, options=None, **kwargs):
|
||||
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'Collection options "{0}" will be changed.'.format(name)
|
||||
ret['pchanges'] = {
|
||||
'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')),
|
||||
'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))
|
||||
}
|
||||
ret['result'] = None
|
||||
|
||||
return ret
|
||||
else:
|
||||
__salt__["solrcloud.collection_set_options"](name, diff, **kwargs)
|
||||
|
||||
__salt__['solrcloud.collection_set_options'](name, diff, **kwargs)
|
||||
ret['comment'] = 'Parameters were updated for collection "{0}".'.format(name)
|
||||
ret['result'] = True
|
||||
ret['changes'] = {
|
||||
'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')),
|
||||
'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))
|
||||
}
|
||||
|
||||
return ret
|
||||
ret['changes'] = {
|
||||
'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')),
|
||||
'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))
|
||||
}
|
||||
return ret
|
||||
|
||||
else:
|
||||
|
||||
new_changes = salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))
|
||||
if __opts__['test']:
|
||||
ret['comment'] = 'The collection "{0}" will be created.'.format(name)
|
||||
ret['pchanges'] = {
|
||||
'old': None,
|
||||
'new': str('options=') + new_changes # future lint: disable=blacklisted-function
|
||||
}
|
||||
ret['result'] = None
|
||||
else:
|
||||
__salt__["solrcloud.collection_create"](name, options, **kwargs)
|
||||
ret['comment'] = 'The collection "{0}" has been created.'.format(name)
|
||||
ret['changes'] = {
|
||||
'old': None,
|
||||
'new': str('options=') + new_changes # future lint: disable=blacklisted-function
|
||||
}
|
||||
|
||||
ret['result'] = True
|
||||
|
||||
ret['changes'] = {
|
||||
'old': None,
|
||||
'new': str('options=') + new_changes # future lint: disable=blacklisted-function
|
||||
}
|
||||
|
||||
return ret
|
||||
|
@ -67,7 +67,7 @@ def nop(name, **kwargs):
|
||||
return succeed_without_changes(name)
|
||||
|
||||
|
||||
def succeed_without_changes(name):
|
||||
def succeed_without_changes(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Returns successful.
|
||||
|
||||
@ -85,7 +85,7 @@ def succeed_without_changes(name):
|
||||
return ret
|
||||
|
||||
|
||||
def fail_without_changes(name):
|
||||
def fail_without_changes(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Returns failure.
|
||||
|
||||
@ -108,7 +108,7 @@ def fail_without_changes(name):
|
||||
return ret
|
||||
|
||||
|
||||
def succeed_with_changes(name):
|
||||
def succeed_with_changes(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Returns successful and changes is not empty
|
||||
|
||||
@ -141,7 +141,7 @@ def succeed_with_changes(name):
|
||||
return ret
|
||||
|
||||
|
||||
def fail_with_changes(name):
|
||||
def fail_with_changes(name, **kwargs): # pylint: disable=unused-argument
|
||||
'''
|
||||
Returns failure and changes is not empty.
|
||||
|
||||
|
@ -27,8 +27,7 @@ def installed(name,
|
||||
recurse=False,
|
||||
restart=False,
|
||||
source=None,
|
||||
exclude=None,
|
||||
**kwargs):
|
||||
exclude=None):
|
||||
'''
|
||||
Install the windows feature. To install a single feature, use the ``name``
|
||||
parameter. To install multiple features, use the ``features`` parameter.
|
||||
@ -113,15 +112,6 @@ def installed(name,
|
||||
- exclude:
|
||||
- Web-Server
|
||||
'''
|
||||
if 'force' in kwargs:
|
||||
salt.utils.versions.warn_until(
|
||||
'Neon',
|
||||
'Parameter \'force\' has been detected in the argument list. This'
|
||||
'parameter is no longer used and has been replaced by \'recurse\''
|
||||
'as of Salt 2018.3.0. This warning will be removed in Salt Neon.'
|
||||
)
|
||||
kwargs.pop('force')
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {},
|
||||
|
@ -455,13 +455,14 @@ def traverse_dict(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):
|
||||
data['foo']['bar']['baz'] if this value exists, and will otherwise return
|
||||
the dict in the default argument.
|
||||
'''
|
||||
ptr = data
|
||||
try:
|
||||
for each in key.split(delimiter):
|
||||
data = data[each]
|
||||
ptr = ptr[each]
|
||||
except (KeyError, IndexError, TypeError):
|
||||
# Encountered a non-indexable value in the middle of traversing
|
||||
return default
|
||||
return data
|
||||
return ptr
|
||||
|
||||
|
||||
@jinja_filter('traverse')
|
||||
@ -476,16 +477,17 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL
|
||||
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
|
||||
then return data['foo']['bar']['0']
|
||||
'''
|
||||
ptr = data
|
||||
for each in key.split(delimiter):
|
||||
if isinstance(data, list):
|
||||
if isinstance(ptr, list):
|
||||
try:
|
||||
idx = int(each)
|
||||
except ValueError:
|
||||
embed_match = False
|
||||
# Index was not numeric, lets look at any embedded dicts
|
||||
for embedded in (x for x in data if isinstance(x, dict)):
|
||||
for embedded in (x for x in ptr if isinstance(x, dict)):
|
||||
try:
|
||||
data = embedded[each]
|
||||
ptr = embedded[each]
|
||||
embed_match = True
|
||||
break
|
||||
except KeyError:
|
||||
@ -495,15 +497,15 @@ def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DEL
|
||||
return default
|
||||
else:
|
||||
try:
|
||||
data = data[idx]
|
||||
ptr = ptr[idx]
|
||||
except IndexError:
|
||||
return default
|
||||
else:
|
||||
try:
|
||||
data = data[each]
|
||||
ptr = ptr[each]
|
||||
except (KeyError, TypeError):
|
||||
return default
|
||||
return data
|
||||
return ptr
|
||||
|
||||
|
||||
def subdict_match(data,
|
||||
@ -519,16 +521,33 @@ def subdict_match(data,
|
||||
former, as more deeply-nested matches are tried first.
|
||||
'''
|
||||
def _match(target, pattern, regex_match=False, exact_match=False):
|
||||
# The reason for using six.text_type first and _then_ using
|
||||
# to_unicode as a fallback is because we want to eventually have
|
||||
# unicode types for comparison below. If either value is numeric then
|
||||
# six.text_type will turn it into a unicode string. However, if the
|
||||
# value is a PY2 str type with non-ascii chars, then the result will be
|
||||
# a UnicodeDecodeError. In those cases, we simply use to_unicode to
|
||||
# decode it to unicode. The reason we can't simply use to_unicode to
|
||||
# begin with is that (by design) to_unicode will raise a TypeError if a
|
||||
# non-string/bytestring/bytearray value is passed.
|
||||
try:
|
||||
target = six.text_type(target).lower()
|
||||
except UnicodeDecodeError:
|
||||
target = salt.utils.stringutils.to_unicode(target).lower()
|
||||
try:
|
||||
pattern = six.text_type(pattern).lower()
|
||||
except UnicodeDecodeError:
|
||||
pattern = salt.utils.stringutils.to_unicode(pattern).lower()
|
||||
|
||||
if regex_match:
|
||||
try:
|
||||
return re.match(pattern.lower(), six.text_type(target).lower())
|
||||
return re.match(pattern, target)
|
||||
except Exception:
|
||||
log.error('Invalid regex \'%s\' in match', pattern)
|
||||
return False
|
||||
elif exact_match:
|
||||
return six.text_type(target).lower() == pattern.lower()
|
||||
else:
|
||||
return fnmatch.fnmatch(six.text_type(target).lower(), pattern.lower())
|
||||
return target == pattern if exact_match \
|
||||
else fnmatch.fnmatch(target, pattern)
|
||||
|
||||
def _dict_match(target, pattern, regex_match=False, exact_match=False):
|
||||
wildcard = pattern.startswith('*:')
|
||||
@ -548,11 +567,6 @@ def subdict_match(data,
|
||||
return True
|
||||
if wildcard:
|
||||
for key in target:
|
||||
if _match(key,
|
||||
pattern,
|
||||
regex_match=regex_match,
|
||||
exact_match=exact_match):
|
||||
return True
|
||||
if isinstance(target[key], dict):
|
||||
if _dict_match(target[key],
|
||||
pattern,
|
||||
@ -566,6 +580,17 @@ def subdict_match(data,
|
||||
regex_match=regex_match,
|
||||
exact_match=exact_match):
|
||||
return True
|
||||
elif _match(target[key],
|
||||
pattern,
|
||||
regex_match=regex_match,
|
||||
exact_match=exact_match):
|
||||
return True
|
||||
return False
|
||||
|
||||
splits = expr.split(delimiter)
|
||||
num_splits = len(splits)
|
||||
if num_splits == 1:
|
||||
# Delimiter not present, this can't possibly be a match
|
||||
return False
|
||||
|
||||
splits = expr.split(delimiter)
|
||||
@ -578,10 +603,16 @@ def subdict_match(data,
|
||||
# want to use are 3, 2, and 1, in that order.
|
||||
for idx in range(num_splits - 1, 0, -1):
|
||||
key = delimiter.join(splits[:idx])
|
||||
matchstr = delimiter.join(splits[idx:])
|
||||
if key == '*':
|
||||
# We are matching on everything under the top level, so we need to
|
||||
# treat the match as the entire data being passed in
|
||||
matchstr = expr
|
||||
match = data
|
||||
else:
|
||||
matchstr = delimiter.join(splits[idx:])
|
||||
match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
|
||||
log.debug("Attempting to match '%s' in '%s' using delimiter '%s'",
|
||||
matchstr, key, delimiter)
|
||||
match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
|
||||
if match == {}:
|
||||
continue
|
||||
if isinstance(match, dict):
|
||||
|
@ -212,6 +212,32 @@ def tagify(suffix='', prefix='', base=SALT):
|
||||
return TAGPARTER.join([part for part in parts if part])
|
||||
|
||||
|
||||
def update_stats(stats, start_time, data):
|
||||
'''
|
||||
Calculate the master stats and return the updated stat info
|
||||
'''
|
||||
end_time = time.time()
|
||||
cmd = data['cmd']
|
||||
# the jid is used as the create time
|
||||
try:
|
||||
jid = data['jid']
|
||||
except KeyError:
|
||||
try:
|
||||
jid = data['data']['__pub_jid']
|
||||
except KeyError:
|
||||
log.info('jid not found in data, stats not updated')
|
||||
return stats
|
||||
create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f')))
|
||||
latency = start_time - create_time
|
||||
duration = end_time - start_time
|
||||
|
||||
stats[cmd]['runs'] += 1
|
||||
stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs']
|
||||
stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs']
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
class SaltEvent(object):
|
||||
'''
|
||||
Warning! Use the get_event function or the code will not be
|
||||
|
@ -350,7 +350,11 @@ def _available_services(refresh=False):
|
||||
try:
|
||||
# This assumes most of the plist files
|
||||
# will be already in XML format
|
||||
plist = plistlib.readPlist(true_path)
|
||||
if six.PY2:
|
||||
plist = plistlib.readPlist(true_path)
|
||||
else:
|
||||
with salt.utils.files.fopen(true_path, 'rb') as plist_handle:
|
||||
plist = plistlib.load(plist_handle)
|
||||
|
||||
except Exception:
|
||||
# If plistlib is unable to read the file we'll need to use
|
||||
|
@ -492,7 +492,6 @@ def default_ret(name):
|
||||
'''
|
||||
ret = {
|
||||
'name': name,
|
||||
'pchanges': {},
|
||||
'changes': {},
|
||||
'result': False,
|
||||
'comment': ''
|
||||
@ -510,22 +509,16 @@ def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None):
|
||||
'''
|
||||
# Always get the comment
|
||||
changes = {}
|
||||
pchanges = {}
|
||||
ret['comment'] = loaded['comment']
|
||||
if 'diff' in loaded:
|
||||
changes['diff'] = loaded['diff']
|
||||
pchanges['diff'] = loaded['diff']
|
||||
if 'commit_id' in loaded:
|
||||
changes['commit_id'] = loaded['commit_id']
|
||||
pchanges['commit_id'] = loaded['commit_id']
|
||||
if 'compliance_report' in loaded:
|
||||
if compliance_report:
|
||||
changes['compliance_report'] = loaded['compliance_report']
|
||||
pchanges['compliance_report'] = loaded['compliance_report']
|
||||
if debug and 'loaded_config' in loaded:
|
||||
changes['loaded_config'] = loaded['loaded_config']
|
||||
pchanges['loaded_config'] = loaded['loaded_config']
|
||||
ret['pchanges'] = pchanges
|
||||
if changes.get('diff'):
|
||||
ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'],
|
||||
diff=changes['diff'])
|
||||
|
@ -20,6 +20,7 @@ import getpass
|
||||
import logging
|
||||
import optparse
|
||||
import traceback
|
||||
import tempfile
|
||||
from functools import partial
|
||||
|
||||
|
||||
@ -34,6 +35,7 @@ import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.jid
|
||||
import salt.utils.kinds as kinds
|
||||
import salt.utils.network
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.stringutils
|
||||
@ -1902,6 +1904,69 @@ class SyndicOptionParser(six.with_metaclass(OptionParserMeta,
|
||||
self.get_config_file_path('minion'))
|
||||
|
||||
|
||||
class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser, ConfigDirMixIn,
|
||||
MergeConfigMixIn, LogLevelMixIn, TimeoutMixIn)):
|
||||
default_timeout = 5
|
||||
description = 'Salt Support is a program to collect all support data: logs, system configuration etc.'
|
||||
usage = '%prog [options] \'<target>\' <function> [arguments]'
|
||||
# ConfigDirMixIn config filename attribute
|
||||
_config_filename_ = 'master'
|
||||
|
||||
# LogLevelMixIn attributes
|
||||
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
|
||||
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
|
||||
|
||||
def _mixin_setup(self):
|
||||
self.add_option('-P', '--show-profiles', default=False, action='store_true',
|
||||
dest='support_profile_list', help='Show available profiles')
|
||||
self.add_option('-p', '--profile', default='', dest='support_profile',
|
||||
help='Specify support profile or comma-separated profiles, e.g.: "salt,network"')
|
||||
support_archive = '{t}/{h}-support.tar.bz2'.format(t=tempfile.gettempdir(),
|
||||
h=salt.utils.network.get_fqhostname())
|
||||
self.add_option('-a', '--archive', default=support_archive, dest='support_archive',
|
||||
help=('Specify name of the resulting support archive. '
|
||||
'Default is "{f}".'.format(f=support_archive)))
|
||||
self.add_option('-u', '--unit', default='', dest='support_unit',
|
||||
help='Specify examined unit (default "master").')
|
||||
self.add_option('-U', '--show-units', default=False, action='store_true', dest='support_show_units',
|
||||
help='Show available units')
|
||||
self.add_option('-f', '--force', default=False, action='store_true', dest='support_archive_force_overwrite',
|
||||
help='Force overwrite existing archive, if exists')
|
||||
self.add_option('-o', '--out', default='null', dest='support_output_format',
|
||||
help=('Set the default output using the specified outputter, '
|
||||
'unless profile does not overrides this. Default: "yaml".'))
|
||||
|
||||
def find_existing_configs(self, default):
|
||||
'''
|
||||
Find configuration files on the system.
|
||||
:return:
|
||||
'''
|
||||
configs = []
|
||||
for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']:
|
||||
if not cfg:
|
||||
continue
|
||||
config_path = self.get_config_file_path(cfg)
|
||||
if os.path.exists(config_path):
|
||||
configs.append(cfg)
|
||||
|
||||
if default and default not in configs:
|
||||
raise SystemExit('Unknown configuration unit: {}'.format(default))
|
||||
|
||||
return configs
|
||||
|
||||
def setup_config(self, cfg=None):
|
||||
'''
|
||||
Open suitable config file.
|
||||
:return:
|
||||
'''
|
||||
_opts, _args = optparse.OptionParser.parse_args(self)
|
||||
configs = self.find_existing_configs(_opts.support_unit)
|
||||
if cfg not in configs:
|
||||
cfg = configs[0]
|
||||
|
||||
return config.master_config(self.get_config_file_path(cfg))
|
||||
|
||||
|
||||
class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
|
||||
OptionParser,
|
||||
ConfigDirMixIn,
|
||||
|
@ -6,9 +6,11 @@ Functions which implement running reactor jobs
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import collections
|
||||
import fnmatch
|
||||
import glob
|
||||
import logging
|
||||
import time
|
||||
|
||||
# Import salt libs
|
||||
import salt.client
|
||||
@ -23,6 +25,7 @@ import salt.utils.process
|
||||
import salt.utils.yaml
|
||||
import salt.wheel
|
||||
import salt.defaults.exitcodes
|
||||
from salt.utils.event import tagify
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -56,6 +59,9 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
local_minion_opts['file_client'] = 'local'
|
||||
self.minion = salt.minion.MasterMinion(local_minion_opts)
|
||||
salt.state.Compiler.__init__(self, opts, self.minion.rend)
|
||||
self.event = salt.utils.event.get_master_event(opts, opts['sock_dir'], listen=False)
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
|
||||
self.stat_clock = time.time()
|
||||
|
||||
# We need __setstate__ and __getstate__ to avoid pickling errors since
|
||||
# 'self.rend' (from salt.state.Compiler) contains a function reference
|
||||
@ -77,6 +83,17 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
'log_queue_level': self.log_queue_level
|
||||
}
|
||||
|
||||
def _post_stats(self, stats):
|
||||
'''
|
||||
Fire events with stat info if it's time
|
||||
'''
|
||||
end_time = time.time()
|
||||
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
|
||||
# Fire the event with the stats and wipe the tracker
|
||||
self.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
|
||||
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
|
||||
self.stat_clock = end_time
|
||||
|
||||
def render_reaction(self, glob_ref, tag, data):
|
||||
'''
|
||||
Execute the render system against a single reaction file and return
|
||||
@ -246,6 +263,7 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
# skip all events fired by ourselves
|
||||
if data['data'].get('user') == self.wrap.event_user:
|
||||
continue
|
||||
|
||||
if data['tag'].endswith('salt/reactors/manage/add'):
|
||||
_data = data['data']
|
||||
res = self.add_reactor(_data['event'], _data['reactors'])
|
||||
@ -267,11 +285,18 @@ class Reactor(salt.utils.process.SignalHandlingMultiprocessingProcess, salt.stat
|
||||
continue
|
||||
chunks = self.reactions(data['tag'], data['data'], reactors)
|
||||
if chunks:
|
||||
if self.opts['master_stats']:
|
||||
_data = data['data']
|
||||
start = time.time()
|
||||
try:
|
||||
self.call_reactions(chunks)
|
||||
except SystemExit:
|
||||
log.warning('Exit ignored by reactor')
|
||||
|
||||
if self.opts['master_stats']:
|
||||
stats = salt.utils.event.update_stats(self.stats, start, _data)
|
||||
self._post_stats(stats)
|
||||
|
||||
|
||||
class ReactWrap(object):
|
||||
'''
|
||||
|
@ -212,10 +212,6 @@ def merge_subreturn(original_return, sub_return, subkey=None):
|
||||
original_return.setdefault('changes', {})
|
||||
original_return['changes'][subkey] = sub_return['changes']
|
||||
|
||||
if sub_return.get('pchanges'): # pchanges may or may not exist
|
||||
original_return.setdefault('pchanges', {})
|
||||
original_return['pchanges'][subkey] = sub_return['pchanges']
|
||||
|
||||
return original_return
|
||||
|
||||
|
||||
|
@ -1957,17 +1957,15 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret):
|
||||
changes[user]['applies_to'] = applies_to
|
||||
|
||||
if changes:
|
||||
if 'perms' not in ret['pchanges']:
|
||||
ret['pchanges']['perms'] = {}
|
||||
if 'perms' not in ret['changes']:
|
||||
ret['changes']['perms'] = {}
|
||||
for user in changes:
|
||||
user_name = get_name(principal=user)
|
||||
|
||||
if __opts__['test'] is True:
|
||||
if user not in ret['pchanges']['perms']:
|
||||
ret['pchanges']['perms'][user] = {}
|
||||
ret['pchanges']['perms'][user][access_mode] = changes[user][access_mode]
|
||||
if user not in ret['changes']['perms']:
|
||||
ret['changes']['perms'][user] = {}
|
||||
ret['changes']['perms'][user][access_mode] = changes[user][access_mode]
|
||||
else:
|
||||
# Get applies_to
|
||||
applies_to = None
|
||||
@ -2123,7 +2121,6 @@ def check_perms(obj_name,
|
||||
if not ret:
|
||||
ret = {'name': obj_name,
|
||||
'changes': {},
|
||||
'pchanges': {},
|
||||
'comment': [],
|
||||
'result': True}
|
||||
orig_comment = ''
|
||||
@ -2137,7 +2134,7 @@ def check_perms(obj_name,
|
||||
current_owner = get_owner(obj_name=obj_name, obj_type=obj_type)
|
||||
if owner != current_owner:
|
||||
if __opts__['test'] is True:
|
||||
ret['pchanges']['owner'] = owner
|
||||
ret['changes']['owner'] = owner
|
||||
else:
|
||||
try:
|
||||
set_owner(obj_name=obj_name,
|
||||
@ -2155,7 +2152,7 @@ def check_perms(obj_name,
|
||||
if not inheritance == get_inheritance(obj_name=obj_name,
|
||||
obj_type=obj_type):
|
||||
if __opts__['test'] is True:
|
||||
ret['pchanges']['inheritance'] = inheritance
|
||||
ret['changes']['inheritance'] = inheritance
|
||||
else:
|
||||
try:
|
||||
set_inheritance(
|
||||
@ -2202,9 +2199,9 @@ def check_perms(obj_name,
|
||||
if user_name.lower() not in set(k.lower() for k in grant_perms):
|
||||
if 'grant' in cur_perms['Not Inherited'][user_name]:
|
||||
if __opts__['test'] is True:
|
||||
if 'remove_perms' not in ret['pchanges']:
|
||||
ret['pchanges']['remove_perms'] = {}
|
||||
ret['pchanges']['remove_perms'].update(
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
ret['changes']['remove_perms'] = {}
|
||||
ret['changes']['remove_perms'].update(
|
||||
{user_name: cur_perms['Not Inherited'][user_name]})
|
||||
else:
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
@ -2220,9 +2217,9 @@ def check_perms(obj_name,
|
||||
if user_name.lower() not in set(k.lower() for k in deny_perms):
|
||||
if 'deny' in cur_perms['Not Inherited'][user_name]:
|
||||
if __opts__['test'] is True:
|
||||
if 'remove_perms' not in ret['pchanges']:
|
||||
ret['pchanges']['remove_perms'] = {}
|
||||
ret['pchanges']['remove_perms'].update(
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
ret['changes']['remove_perms'] = {}
|
||||
ret['changes']['remove_perms'].update(
|
||||
{user_name: cur_perms['Not Inherited'][user_name]})
|
||||
else:
|
||||
if 'remove_perms' not in ret['changes']:
|
||||
@ -2246,7 +2243,7 @@ def check_perms(obj_name,
|
||||
ret['comment'] = '\n'.join(ret['comment'])
|
||||
|
||||
# Set result for test = True
|
||||
if __opts__['test'] and (ret['changes'] or ret['pchanges']):
|
||||
if __opts__['test'] and ret['changes']:
|
||||
ret['result'] = None
|
||||
|
||||
return ret
|
||||
|
11
scripts/salt-support
Executable file
11
scripts/salt-support
Executable file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
Salt support is to collect logs,
|
||||
debug data and system information
|
||||
for support purposes.
|
||||
'''
|
||||
|
||||
from salt.scripts import salt_support
|
||||
|
||||
if __name__ == '__main__':
|
||||
salt_support()
|
@ -25,6 +25,7 @@ integration.test: True
|
||||
# Grains addons
|
||||
grains:
|
||||
test_grain: cheese
|
||||
grain_path: /tmp/salt-tests-tmpdir/file-grain-test
|
||||
script: grail
|
||||
alot: many
|
||||
planets:
|
||||
|
22
tests/integration/files/file/base/onchanges_prereq.sls
Normal file
22
tests/integration/files/file/base/onchanges_prereq.sls
Normal file
@ -0,0 +1,22 @@
|
||||
one:
|
||||
file.managed:
|
||||
- name: {{ pillar['file1'] }}
|
||||
- source: {{ pillar['source'] }}
|
||||
|
||||
# This should run because there were changes
|
||||
two:
|
||||
test.succeed_without_changes:
|
||||
- {{ pillar['req'] }}:
|
||||
- file: one
|
||||
|
||||
# Run the same state as "one" again, this should not cause changes
|
||||
three:
|
||||
file.managed:
|
||||
- name: {{ pillar['file2'] }}
|
||||
- source: {{ pillar['source'] }}
|
||||
|
||||
# This should not run because there should be no changes
|
||||
four:
|
||||
test.succeed_without_changes:
|
||||
- {{ pillar['req'] }}:
|
||||
- file: three
|
3
tests/integration/files/file/base/orch/req_test.sls
Normal file
3
tests/integration/files/file/base/orch/req_test.sls
Normal file
@ -0,0 +1,3 @@
|
||||
{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('orch.req_test') }}:
|
||||
file.managed:
|
||||
- contents: 'Hello world!'
|
@ -11,7 +11,3 @@ base:
|
||||
'localhost':
|
||||
- generic
|
||||
- blackout
|
||||
'N@mins not L@minion':
|
||||
- ng1
|
||||
'N@missing_minion':
|
||||
- ng2
|
||||
|
@ -194,6 +194,35 @@ class BasePillarTest(ModuleCase):
|
||||
'''
|
||||
Tests for pillar decryption
|
||||
'''
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.makedirs(PILLAR_BASE)
|
||||
with salt.utils.files.fopen(TOP_SLS, 'w') as fp_:
|
||||
fp_.write(textwrap.dedent('''\
|
||||
base:
|
||||
'N@mins not L@minion':
|
||||
- ng1
|
||||
'N@missing_minion':
|
||||
- ng2
|
||||
'''))
|
||||
|
||||
with salt.utils.files.fopen(os.path.join(PILLAR_BASE, 'ng1.sls'), 'w') as fp_:
|
||||
fp_.write('pillar_from_nodegroup: True')
|
||||
|
||||
with salt.utils.files.fopen(os.path.join(PILLAR_BASE, 'ng2.sls'), 'w') as fp_:
|
||||
fp_.write('pillar_from_nodegroup_with_ghost: True')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
shutil.rmtree(PILLAR_BASE)
|
||||
|
||||
def _build_opts(self, opts):
|
||||
ret = copy.deepcopy(DEFAULT_OPTS)
|
||||
for item in ADDITIONAL_OPTS:
|
||||
ret[item] = self.master_opts[item]
|
||||
ret.update(opts)
|
||||
return ret
|
||||
|
||||
def test_pillar_top_compound_match(self, grains=None):
|
||||
'''
|
||||
Test that a compound match topfile that refers to a nodegroup via N@ works
|
||||
@ -202,12 +231,21 @@ class BasePillarTest(ModuleCase):
|
||||
if not grains:
|
||||
grains = {}
|
||||
grains['os'] = 'Fedora'
|
||||
pillar_obj = pillar.Pillar(self.get_config('master', from_scratch=True), grains, 'minion', 'base')
|
||||
nodegroup_opts = salt.utils.yaml.safe_load(textwrap.dedent('''\
|
||||
nodegroups:
|
||||
min: minion
|
||||
sub_min: sub_minion
|
||||
mins: N@min or N@sub_min
|
||||
missing_minion: L@minion,ghostminion
|
||||
'''))
|
||||
|
||||
opts = self._build_opts(nodegroup_opts)
|
||||
pillar_obj = pillar.Pillar(opts, grains, 'minion', 'base')
|
||||
ret = pillar_obj.compile_pillar()
|
||||
self.assertEqual(ret.get('pillar_from_nodegroup_with_ghost'), True)
|
||||
self.assertEqual(ret.get('pillar_from_nodegroup'), None)
|
||||
|
||||
sub_pillar_obj = pillar.Pillar(self.get_config('master', from_scratch=True), grains, 'sub_minion', 'base')
|
||||
sub_pillar_obj = pillar.Pillar(opts, grains, 'sub_minion', 'base')
|
||||
sub_ret = sub_pillar_obj.compile_pillar()
|
||||
self.assertEqual(sub_ret.get('pillar_from_nodegroup_with_ghost'), None)
|
||||
self.assertEqual(sub_ret.get('pillar_from_nodegroup'), True)
|
||||
|
@ -71,9 +71,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
def setUp(self):
|
||||
super(StateModuleTest, self).setUp()
|
||||
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'firstif')
|
||||
reline(destpath, destpath, force=True)
|
||||
destpath = os.path.join(FILES, 'file', 'base', 'testappend', 'secondif')
|
||||
reline(destpath, destpath, force=True)
|
||||
sls = self.run_function('saltutil.sync_modules')
|
||||
assert isinstance(sls, list)
|
||||
|
||||
@ -1874,7 +1872,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
|
||||
for key, val in ret.items():
|
||||
self.assertEqual(val['comment'], comment)
|
||||
self.assertEqual(val['changes'], {})
|
||||
self.assertEqual(val['changes'], {'newfile': testfile})
|
||||
|
||||
def test_state_sls_id_test_state_test_post_run(self):
|
||||
'''
|
||||
@ -1907,7 +1905,7 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
self.assertEqual(
|
||||
val['comment'],
|
||||
'The file {0} is set to be changed'.format(file_name))
|
||||
self.assertEqual(val['changes'], {})
|
||||
self.assertEqual(val['changes'], {'newfile': file_name})
|
||||
|
||||
def test_state_sls_id_test_true_post_run(self):
|
||||
'''
|
||||
@ -1965,7 +1963,6 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
'result': True},
|
||||
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
|
||||
{'comment': 'Empty file',
|
||||
'pchanges': {},
|
||||
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
|
||||
'start_time': '18:10:20.341753',
|
||||
'result': True,
|
||||
|
100
tests/integration/modules/test_vault.py
Normal file
100
tests/integration/modules/test_vault.py
Normal file
@ -0,0 +1,100 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Integration tests for the vault execution module
|
||||
'''
|
||||
|
||||
# Import Python Libs
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import inspect
|
||||
import time
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from tests.support.unit import skipIf
|
||||
from tests.support.case import ModuleCase, ShellCase
|
||||
from tests.support.helpers import destructiveTest, flaky
|
||||
from tests.support.paths import FILES
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.utils.path
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@destructiveTest
|
||||
@skipIf(not salt.utils.path.which('dockerd'), 'Docker not installed')
|
||||
@skipIf(not salt.utils.path.which('vault'), 'Vault not installed')
|
||||
class VaultTestCase(ModuleCase, ShellCase):
|
||||
'''
|
||||
Test vault module
|
||||
'''
|
||||
count = 0
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
SetUp vault container
|
||||
'''
|
||||
if self.count == 0:
|
||||
config = '{"backend": {"file": {"path": "/vault/file"}}, "default_lease_ttl": "168h", "max_lease_ttl": "720h"}'
|
||||
self.run_state('docker_image.present', name='vault', tag='0.9.6')
|
||||
self.run_state(
|
||||
'docker_container.running',
|
||||
name='vault',
|
||||
image='vault:0.9.6',
|
||||
port_bindings='8200:8200',
|
||||
environment={
|
||||
'VAULT_DEV_ROOT_TOKEN_ID': 'testsecret',
|
||||
'VAULT_LOCAL_CONFIG': config,
|
||||
},
|
||||
cap_add='IPC_LOCK',
|
||||
)
|
||||
time.sleep(5)
|
||||
ret = self.run_function(
|
||||
'cmd.retcode',
|
||||
cmd='/usr/local/bin/vault login token=testsecret',
|
||||
env={'VAULT_ADDR': 'http://127.0.0.1:8200'},
|
||||
)
|
||||
if ret != 0:
|
||||
self.skipTest('unable to login to vault')
|
||||
ret = self.run_function(
|
||||
'cmd.retcode',
|
||||
cmd='/usr/local/bin/vault policy write testpolicy {0}/vault.hcl'.format(FILES),
|
||||
env={'VAULT_ADDR': 'http://127.0.0.1:8200'},
|
||||
)
|
||||
if ret != 0:
|
||||
self.skipTest('unable to assign policy to vault')
|
||||
self.count += 1
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
TearDown vault container
|
||||
'''
|
||||
def count_tests(funcobj):
|
||||
return inspect.ismethod(funcobj) and funcobj.__name__.startswith('test_')
|
||||
numtests = len(inspect.getmembers(VaultTestCase, predicate=count_tests))
|
||||
if self.count >= numtests:
|
||||
self.run_state('docker_container.stopped', name='vault')
|
||||
self.run_state('docker_container.absent', name='vault')
|
||||
self.run_state('docker_image.absent', name='vault', force=True)
|
||||
|
||||
@flaky
|
||||
def test_write_read_secret(self):
|
||||
assert self.run_function('vault.write_secret', path='secret/my/secret', user='foo', password='bar') is True
|
||||
assert self.run_function('vault.read_secret', arg=['secret/my/secret']) == {'password': 'bar', 'user': 'foo'}
|
||||
|
||||
@flaky
|
||||
def test_write_raw_read_secret(self):
|
||||
assert self.run_function('vault.write_raw',
|
||||
path='secret/my/secret',
|
||||
raw={"user": "foo", "password": "bar"}) is True
|
||||
assert self.run_function('vault.read_secret', arg=['secret/my/secret']) == {'password': 'bar', 'user': 'foo'}
|
||||
|
||||
@flaky
|
||||
def test_delete_secret(self):
|
||||
assert self.run_function('vault.write_secret', path='secret/my/secret', user='foo', password='bar') is True
|
||||
assert self.run_function('vault.delete_secret', arg=['secret/my/secret']) is True
|
||||
|
||||
@flaky
|
||||
def test_list_secrets(self):
|
||||
assert self.run_function('vault.write_secret', path='secret/my/secret', user='foo', password='bar') is True
|
||||
assert self.run_function('vault.list_secrets', arg=['secret/my/']) == {'keys': ['secret']}
|
@ -643,3 +643,119 @@ class OrchEventTest(ShellCase):
|
||||
self.assertTrue(received)
|
||||
del listener
|
||||
signal.alarm(0)
|
||||
|
||||
def test_orchestration_onchanges_and_prereq(self):
|
||||
'''
|
||||
Test to confirm that the parallel state requisite works in orch
|
||||
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
|
||||
'''
|
||||
self.write_conf({
|
||||
'fileserver_backend': ['roots'],
|
||||
'file_roots': {
|
||||
'base': [self.base_env],
|
||||
},
|
||||
})
|
||||
|
||||
orch_sls = os.path.join(self.base_env, 'orch.sls')
|
||||
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
|
||||
fp_.write(textwrap.dedent('''
|
||||
manage_a_file:
|
||||
salt.state:
|
||||
- tgt: minion
|
||||
- sls:
|
||||
- orch.req_test
|
||||
|
||||
do_onchanges:
|
||||
salt.function:
|
||||
- tgt: minion
|
||||
- name: test.ping
|
||||
- onchanges:
|
||||
- salt: manage_a_file
|
||||
|
||||
do_prereq:
|
||||
salt.function:
|
||||
- tgt: minion
|
||||
- name: test.ping
|
||||
- prereq:
|
||||
- salt: manage_a_file
|
||||
'''))
|
||||
|
||||
listener = salt.utils.event.get_event(
|
||||
'master',
|
||||
sock_dir=self.master_opts['sock_dir'],
|
||||
transport=self.master_opts['transport'],
|
||||
opts=self.master_opts)
|
||||
|
||||
try:
|
||||
jid1 = self.run_run_plus(
|
||||
'state.orchestrate',
|
||||
'orch',
|
||||
test=True,
|
||||
__reload_config=True).get('jid')
|
||||
|
||||
# Run for real to create the file
|
||||
self.run_run_plus(
|
||||
'state.orchestrate',
|
||||
'orch',
|
||||
__reload_config=True).get('jid')
|
||||
|
||||
# Run again in test mode. Since there were no changes, the
|
||||
# requisites should not fire.
|
||||
jid2 = self.run_run_plus(
|
||||
'state.orchestrate',
|
||||
'orch',
|
||||
test=True,
|
||||
__reload_config=True).get('jid')
|
||||
finally:
|
||||
try:
|
||||
os.remove(os.path.join(TMP, 'orch.req_test'))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
assert jid1 is not None
|
||||
assert jid2 is not None
|
||||
|
||||
tags = {'salt/run/{0}/ret'.format(x): x for x in (jid1, jid2)}
|
||||
ret = {}
|
||||
|
||||
signal.signal(signal.SIGALRM, self.alarm_handler)
|
||||
signal.alarm(self.timeout)
|
||||
try:
|
||||
while True:
|
||||
event = listener.get_event(full=True)
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
if event['tag'] in tags:
|
||||
ret[tags.pop(event['tag'])] = self.repack_state_returns(
|
||||
event['data']['return']['data']['master']
|
||||
)
|
||||
if not tags:
|
||||
# If tags is empty, we've grabbed all the returns we
|
||||
# wanted, so let's stop listening to the event bus.
|
||||
break
|
||||
finally:
|
||||
del listener
|
||||
signal.alarm(0)
|
||||
|
||||
for sls_id in ('manage_a_file', 'do_onchanges', 'do_prereq'):
|
||||
# The first time through, all three states should have a None
|
||||
# result, while the second time through, they should all have a
|
||||
# True result.
|
||||
assert ret[jid1][sls_id]['result'] is None, \
|
||||
'result of {0} ({1}) is not None'.format(
|
||||
sls_id,
|
||||
ret[jid1][sls_id]['result'])
|
||||
assert ret[jid2][sls_id]['result'] is True, \
|
||||
'result of {0} ({1}) is not True'.format(
|
||||
sls_id,
|
||||
ret[jid2][sls_id]['result'])
|
||||
|
||||
# The file.managed state should have shown changes in the test mode
|
||||
# return data.
|
||||
assert ret[jid1]['manage_a_file']['changes']
|
||||
|
||||
# After the file was created, running again in test mode should have
|
||||
# shown no changes.
|
||||
assert not ret[jid2]['manage_a_file']['changes'], \
|
||||
ret[jid2]['manage_a_file']['changes']
|
||||
|
@ -77,6 +77,7 @@ class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin
|
||||
self.assertIn('hello', ''.join(out))
|
||||
self.assertIn('Succeeded: 1', ''.join(out))
|
||||
|
||||
@skipIf(True, 'This test causes the test to hang. Skipping until further investigation can occur.')
|
||||
@destructiveTest
|
||||
@skip_if_not_root
|
||||
@skipIf(salt.utils.platform.is_windows(), 'This test does not apply on Windows')
|
||||
@ -114,11 +115,14 @@ class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin
|
||||
if target in cur_pkgs:
|
||||
self.fail('Target package \'{0}\' already installed'.format(target))
|
||||
|
||||
out = ''.join(self.run_call('--local pkg.install {0}'.format(target)))
|
||||
self.assertIn('local: ----------', out)
|
||||
self.assertIn('{0}: ----------'.format(target), out)
|
||||
self.assertIn('new:', out)
|
||||
self.assertIn('old:', out)
|
||||
try:
|
||||
out = ''.join(self.run_call('--local pkg.install {0}'.format(target)))
|
||||
self.assertIn('local: ----------', out)
|
||||
self.assertIn('{0}: ----------'.format(target), out)
|
||||
self.assertIn('new:', out)
|
||||
self.assertIn('old:', out)
|
||||
finally:
|
||||
self.run_call('--local pkg.remove {0}'.format(target))
|
||||
|
||||
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
|
||||
@flaky
|
||||
|
@ -357,7 +357,6 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
file.
|
||||
'''
|
||||
grain_path = os.path.join(TMP, 'file-grain-test')
|
||||
self.run_function('grains.set', ['grain_path', grain_path])
|
||||
state_file = 'file-grainget'
|
||||
|
||||
self.run_function('state.sls', [state_file])
|
||||
@ -744,7 +743,6 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
source_hash=uppercase_hash
|
||||
)
|
||||
assert ret[state_name]['result'] is True
|
||||
assert ret[state_name]['pchanges'] == {}
|
||||
assert ret[state_name]['changes'] == {}
|
||||
|
||||
# Test uppercase source_hash using test=true
|
||||
@ -757,7 +755,6 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
test=True
|
||||
)
|
||||
assert ret[state_name]['result'] is True
|
||||
assert ret[state_name]['pchanges'] == {}
|
||||
assert ret[state_name]['changes'] == {}
|
||||
|
||||
finally:
|
||||
@ -811,6 +808,87 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin):
|
||||
result = self.run_function('cp.is_cached', [source, saltenv])
|
||||
assert result == '', 'File is still cached at {0}'.format(result)
|
||||
|
||||
@with_tempfile(create=False)
|
||||
@with_tempfile(create=False)
|
||||
def test_file_managed_onchanges(self, file1, file2):
|
||||
'''
|
||||
Test file.managed state with onchanges
|
||||
'''
|
||||
pillar = {'file1': file1,
|
||||
'file2': file2,
|
||||
'source': 'salt://testfile',
|
||||
'req': 'onchanges'}
|
||||
|
||||
# Lay down the file used in the below SLS to ensure that when it is
|
||||
# run, there are no changes.
|
||||
self.run_state(
|
||||
'file.managed',
|
||||
name=pillar['file2'],
|
||||
source=pillar['source'])
|
||||
|
||||
ret = self.repack_state_returns(
|
||||
self.run_function(
|
||||
'state.apply',
|
||||
mods='onchanges_prereq',
|
||||
pillar=pillar,
|
||||
test=True,
|
||||
)
|
||||
)
|
||||
# The file states should both exit with None
|
||||
assert ret['one']['result'] is None, ret['one']['result']
|
||||
assert ret['three']['result'] is True, ret['three']['result']
|
||||
# The first file state should have changes, since a new file was
|
||||
# created. The other one should not, since we already created that file
|
||||
# before applying the SLS file.
|
||||
assert ret['one']['changes']
|
||||
assert not ret['three']['changes'], ret['three']['changes']
|
||||
# The state watching 'one' should have been run due to changes
|
||||
assert ret['two']['comment'] == 'Success!', ret['two']['comment']
|
||||
# The state watching 'three' should not have been run
|
||||
assert ret['four']['comment'] == \
|
||||
'State was not run because none of the onchanges reqs changed', \
|
||||
ret['four']['comment']
|
||||
|
||||
@with_tempfile(create=False)
|
||||
@with_tempfile(create=False)
|
||||
def test_file_managed_prereq(self, file1, file2):
|
||||
'''
|
||||
Test file.managed state with prereq
|
||||
'''
|
||||
pillar = {'file1': file1,
|
||||
'file2': file2,
|
||||
'source': 'salt://testfile',
|
||||
'req': 'prereq'}
|
||||
|
||||
# Lay down the file used in the below SLS to ensure that when it is
|
||||
# run, there are no changes.
|
||||
self.run_state(
|
||||
'file.managed',
|
||||
name=pillar['file2'],
|
||||
source=pillar['source'])
|
||||
|
||||
ret = self.repack_state_returns(
|
||||
self.run_function(
|
||||
'state.apply',
|
||||
mods='onchanges_prereq',
|
||||
pillar=pillar,
|
||||
test=True,
|
||||
)
|
||||
)
|
||||
# The file states should both exit with None
|
||||
assert ret['one']['result'] is None, ret['one']['result']
|
||||
assert ret['three']['result'] is True, ret['three']['result']
|
||||
# The first file state should have changes, since a new file was
|
||||
# created. The other one should not, since we already created that file
|
||||
# before applying the SLS file.
|
||||
assert ret['one']['changes']
|
||||
assert not ret['three']['changes'], ret['three']['changes']
|
||||
# The state watching 'one' should have been run due to changes
|
||||
assert ret['two']['comment'] == 'Success!', ret['two']['comment']
|
||||
# The state watching 'three' should not have been run
|
||||
assert ret['four']['comment'] == 'No changes detected', \
|
||||
ret['four']['comment']
|
||||
|
||||
def test_directory(self):
|
||||
'''
|
||||
file.directory
|
||||
|
@ -211,7 +211,10 @@ def flaky(caller=None, condition=True):
|
||||
if attempt >= 3:
|
||||
raise exc
|
||||
backoff_time = attempt ** 2
|
||||
log.info('Found Exception. Waiting %s seconds to retry.', backoff_time)
|
||||
log.info(
|
||||
'Found Exception. Waiting %s seconds to retry.',
|
||||
backoff_time
|
||||
)
|
||||
time.sleep(backoff_time)
|
||||
return cls
|
||||
return wrap
|
||||
|
@ -268,6 +268,19 @@ class TestCase(_TestCase):
|
||||
)
|
||||
# return _TestCase.assertNotAlmostEquals(self, *args, **kwargs)
|
||||
|
||||
def repack_state_returns(self, state_ret):
|
||||
'''
|
||||
Accepts a state return dict and returns it back with the top level key
|
||||
names rewritten such that the ID declaration is the key instead of the
|
||||
State's unique tag. For example: 'foo' instead of
|
||||
'file_|-foo_|-/etc/foo.conf|-managed'
|
||||
|
||||
This makes it easier to work with state returns when crafting asserts
|
||||
after running states.
|
||||
'''
|
||||
assert isinstance(state_ret, dict), state_ret
|
||||
return {x.split('_|-')[1]: y for x, y in six.iteritems(state_ret)}
|
||||
|
||||
def failUnlessEqual(self, *args, **kwargs):
|
||||
raise DeprecationWarning(
|
||||
'The {0}() function is deprecated. Please start using {1}() '
|
||||
|
477
tests/unit/cli/test_support.py
Normal file
477
tests/unit/cli/test_support.py
Normal file
@ -0,0 +1,477 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codeauthor: Bo Maryniuk <bo@suse.de>
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
from tests.support.unit import skipIf, TestCase
|
||||
from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
|
||||
|
||||
from salt.cli.support.console import IndentOutput
|
||||
from salt.cli.support.collector import SupportDataCollector, SaltSupport
|
||||
from salt.utils.color import get_colors
|
||||
from salt.utils.stringutils import to_bytes
|
||||
import salt.exceptions
|
||||
import salt.cli.support.collector
|
||||
import salt.utils.files
|
||||
import os
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
try:
|
||||
import pytest
|
||||
except ImportError:
|
||||
pytest = None
|
||||
|
||||
|
||||
@skipIf(not bool(pytest), 'Pytest needs to be installed')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class SaltSupportIndentOutputTestCase(TestCase):
|
||||
'''
|
||||
Unit Tests for the salt-support indent output.
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Setup test
|
||||
:return:
|
||||
'''
|
||||
|
||||
self.message = 'Stubborn processes on dumb terminal'
|
||||
self.device = MagicMock()
|
||||
self.iout = IndentOutput(device=self.device)
|
||||
self.colors = get_colors()
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Remove instances after test run
|
||||
:return:
|
||||
'''
|
||||
del self.message
|
||||
del self.device
|
||||
del self.iout
|
||||
del self.colors
|
||||
|
||||
def test_standard_output(self):
|
||||
'''
|
||||
Test console standard output.
|
||||
'''
|
||||
self.iout.put(self.message)
|
||||
assert self.device.write.called
|
||||
assert self.device.write.call_count == 5
|
||||
for idx, data in enumerate(['', str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']):
|
||||
assert self.device.write.call_args_list[idx][0][0] == data
|
||||
|
||||
def test_indent_output(self):
|
||||
'''
|
||||
Test indent distance.
|
||||
:return:
|
||||
'''
|
||||
self.iout.put(self.message, indent=10)
|
||||
for idx, data in enumerate([' ' * 10, str(self.colors['CYAN']), self.message, str(self.colors['ENDC']), '\n']):
|
||||
assert self.device.write.call_args_list[idx][0][0] == data
|
||||
|
||||
def test_color_config(self):
|
||||
'''
|
||||
Test color config changes on each ident.
|
||||
:return:
|
||||
'''
|
||||
|
||||
conf = {0: 'MAGENTA', 2: 'RED', 4: 'WHITE', 6: 'YELLOW'}
|
||||
self.iout = IndentOutput(conf=conf, device=self.device)
|
||||
for indent in sorted(list(conf)):
|
||||
self.iout.put(self.message, indent=indent)
|
||||
|
||||
step = 1
|
||||
for ident_key in sorted(list(conf)):
|
||||
assert str(self.device.write.call_args_list[step][0][0]) == str(self.colors[conf[ident_key]])
|
||||
step += 5
|
||||
|
||||
|
||||
@skipIf(not bool(pytest), 'Pytest needs to be installed')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class SaltSupportCollectorTestCase(TestCase):
|
||||
'''
|
||||
Collector tests.
|
||||
'''
|
||||
def setUp(self):
|
||||
'''
|
||||
Setup the test case
|
||||
:return:
|
||||
'''
|
||||
self.archive_path = '/highway/to/hell'
|
||||
self.output_device = MagicMock()
|
||||
self.collector = SupportDataCollector(self.archive_path, self.output_device)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Tear down the test case elements
|
||||
:return:
|
||||
'''
|
||||
del self.collector
|
||||
del self.archive_path
|
||||
del self.output_device
|
||||
|
||||
@patch('salt.cli.support.collector.tarfile.TarFile', MagicMock())
|
||||
def test_archive_open(self):
|
||||
'''
|
||||
Test archive is opened.
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.collector.open()
|
||||
assert self.collector.archive_path == self.archive_path
|
||||
with pytest.raises(salt.exceptions.SaltException) as err:
|
||||
self.collector.open()
|
||||
assert 'Archive already opened' in str(err)
|
||||
|
||||
@patch('salt.cli.support.collector.tarfile.TarFile', MagicMock())
|
||||
def test_archive_close(self):
|
||||
'''
|
||||
Test archive is opened.
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.collector.open()
|
||||
self.collector._flush_content = lambda: None
|
||||
self.collector.close()
|
||||
assert self.collector.archive_path == self.archive_path
|
||||
with pytest.raises(salt.exceptions.SaltException) as err:
|
||||
self.collector.close()
|
||||
assert 'Archive already closed' in str(err)
|
||||
|
||||
def test_archive_addwrite(self):
|
||||
'''
|
||||
Test add to the archive a section and write to it.
|
||||
|
||||
:return:
|
||||
'''
|
||||
archive = MagicMock()
|
||||
with patch('salt.cli.support.collector.tarfile.TarFile', archive):
|
||||
self.collector.open()
|
||||
self.collector.add('foo')
|
||||
self.collector.write(title='title', data='data', output='null')
|
||||
self.collector._flush_content()
|
||||
|
||||
assert (archive.bz2open().addfile.call_args[1]['fileobj'].read()
|
||||
== to_bytes('title\n-----\n\nraw-content: data\n\n\n\n'))
|
||||
|
||||
@patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null'))
|
||||
def test_archive_addlink(self):
|
||||
'''
|
||||
Test add to the archive a section and link an external file or directory to it.
|
||||
|
||||
:return:
|
||||
'''
|
||||
archive = MagicMock()
|
||||
with patch('salt.cli.support.collector.tarfile.TarFile', archive):
|
||||
self.collector.open()
|
||||
self.collector.add('foo')
|
||||
self.collector.link(title='Backup Path', path='/path/to/backup.config')
|
||||
self.collector._flush_content()
|
||||
|
||||
assert archive.bz2open().addfile.call_count == 1
|
||||
assert (archive.bz2open().addfile.call_args[1]['fileobj'].read()
|
||||
== to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n'))
|
||||
|
||||
@patch('salt.utils.files.fopen', MagicMock(return_value='path=/dev/null'))
|
||||
def test_archive_discard_section(self):
|
||||
'''
|
||||
Test discard a section from the archive.
|
||||
|
||||
:return:
|
||||
'''
|
||||
archive = MagicMock()
|
||||
with patch('salt.cli.support.collector.tarfile.TarFile', archive):
|
||||
self.collector.open()
|
||||
self.collector.add('solar-interference')
|
||||
self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config')
|
||||
self.collector.add('foo')
|
||||
self.collector.link(title='Backup Path', path='/path/to/backup.config')
|
||||
self.collector._flush_content()
|
||||
assert archive.bz2open().addfile.call_count == 2
|
||||
assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read()
|
||||
== to_bytes('Thermal anomaly\n---------------\n\npath=/dev/null\n\n\n'))
|
||||
self.collector.close()
|
||||
|
||||
archive = MagicMock()
|
||||
with patch('salt.cli.support.collector.tarfile.TarFile', archive):
|
||||
self.collector.open()
|
||||
self.collector.add('solar-interference')
|
||||
self.collector.link(title='Thermal anomaly', path='/path/to/another/great.config')
|
||||
self.collector.discard_current()
|
||||
self.collector.add('foo')
|
||||
self.collector.link(title='Backup Path', path='/path/to/backup.config')
|
||||
self.collector._flush_content()
|
||||
assert archive.bz2open().addfile.call_count == 2
|
||||
assert (archive.bz2open().addfile.mock_calls[0][2]['fileobj'].read()
|
||||
== to_bytes('Backup Path\n-----------\n\npath=/dev/null\n\n\n'))
|
||||
self.collector.close()
|
||||
|
||||
|
||||
@skipIf(not bool(pytest), 'Pytest needs to be installed')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class SaltSupportRunnerTestCase(TestCase):
|
||||
'''
|
||||
Test runner class.
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Set up test suite.
|
||||
:return:
|
||||
'''
|
||||
self.archive_path = '/dev/null'
|
||||
self.output_device = MagicMock()
|
||||
self.runner = SaltSupport()
|
||||
self.runner.collector = SupportDataCollector(self.archive_path, self.output_device)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Tear down.
|
||||
|
||||
:return:
|
||||
'''
|
||||
del self.archive_path
|
||||
del self.output_device
|
||||
del self.runner
|
||||
|
||||
def test_function_config(self):
|
||||
'''
|
||||
Test function config formation.
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.runner.config = {}
|
||||
msg = 'Electromagnetic energy loss'
|
||||
assert self.runner._setup_fun_config({'description': msg}) == {'print_metadata': False,
|
||||
'file_client': 'local',
|
||||
'fun': '', 'kwarg': {},
|
||||
'description': msg,
|
||||
'cache_jobs': False, 'arg': []}
|
||||
|
||||
def test_local_caller(self):
|
||||
'''
|
||||
Test local caller.
|
||||
|
||||
:return:
|
||||
'''
|
||||
msg = 'Because of network lag due to too many people playing deathmatch'
|
||||
caller = MagicMock()
|
||||
caller().call = MagicMock(return_value=msg)
|
||||
|
||||
self.runner._get_caller = caller
|
||||
self.runner.out = MagicMock()
|
||||
assert self.runner._local_call({}) == msg
|
||||
|
||||
caller().call = MagicMock(side_effect=SystemExit)
|
||||
assert self.runner._local_call({}) == 'Data is not available at this moment'
|
||||
|
||||
err_msg = "The UPS doesn't have a battery backup."
|
||||
caller().call = MagicMock(side_effect=Exception(err_msg))
|
||||
assert self.runner._local_call({}) == "Unhandled exception occurred: The UPS doesn't have a battery backup."
|
||||
|
||||
def test_local_runner(self):
|
||||
'''
|
||||
Test local runner.
|
||||
|
||||
:return:
|
||||
'''
|
||||
msg = 'Big to little endian conversion error'
|
||||
runner = MagicMock()
|
||||
runner().run = MagicMock(return_value=msg)
|
||||
|
||||
self.runner._get_runner = runner
|
||||
self.runner.out = MagicMock()
|
||||
assert self.runner._local_run({}) == msg
|
||||
|
||||
runner().run = MagicMock(side_effect=SystemExit)
|
||||
assert self.runner._local_run({}) == 'Runner is not available at this moment'
|
||||
|
||||
err_msg = 'Trojan horse ran out of hay'
|
||||
runner().run = MagicMock(side_effect=Exception(err_msg))
|
||||
assert self.runner._local_run({}) == 'Unhandled exception occurred: Trojan horse ran out of hay'
|
||||
|
||||
@patch('salt.cli.support.intfunc', MagicMock(spec=[]))
|
||||
def test_internal_function_call_stub(self):
|
||||
'''
|
||||
Test missing internal function call is handled accordingly.
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.runner.out = MagicMock()
|
||||
out = self.runner._internal_function_call({'fun': 'everythingisawesome',
|
||||
'arg': [], 'kwargs': {}})
|
||||
assert out == 'Function everythingisawesome is not available'
|
||||
|
||||
def test_internal_function_call(self):
|
||||
'''
|
||||
Test missing internal function call is handled accordingly.
|
||||
|
||||
:return:
|
||||
'''
|
||||
msg = 'Internet outage'
|
||||
intfunc = MagicMock()
|
||||
intfunc.everythingisawesome = MagicMock(return_value=msg)
|
||||
self.runner.out = MagicMock()
|
||||
with patch('salt.cli.support.intfunc', intfunc):
|
||||
out = self.runner._internal_function_call({'fun': 'everythingisawesome',
|
||||
'arg': [], 'kwargs': {}})
|
||||
assert out == msg
|
||||
|
||||
def test_get_action(self):
|
||||
'''
|
||||
Test action meta gets parsed.
|
||||
|
||||
:return:
|
||||
'''
|
||||
action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}}
|
||||
assert self.runner._get_action(action_meta) == ('List jobs filter', None,
|
||||
{'fun': 'run:jobs.list_jobs_filter', 'kwargs': {}, 'arg': [1]})
|
||||
action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}}
|
||||
assert self.runner._get_action(action_meta) == ('Information about "usbmux"', None,
|
||||
{'fun': 'user.info', 'kwargs': {}, 'arg': ['usbmux']})
|
||||
|
||||
def test_extract_return(self):
|
||||
'''
|
||||
Test extract return from the output.
|
||||
|
||||
:return:
|
||||
'''
|
||||
out = {'key': 'value'}
|
||||
assert self.runner._extract_return(out) == out
|
||||
assert self.runner._extract_return({'return': out}) == out
|
||||
|
||||
def test_get_action_type(self):
|
||||
'''
|
||||
Test action meta determines action type.
|
||||
|
||||
:return:
|
||||
'''
|
||||
action_meta = {'run:jobs.list_jobs_filter': {'info': 'List jobs filter', 'args': [1]}}
|
||||
assert self.runner._get_action_type(action_meta) == 'run'
|
||||
|
||||
action_meta = {'user.info': {'info': 'Information about "usbmux"', 'args': ['usbmux']}}
|
||||
assert self.runner._get_action_type(action_meta) == 'call'
|
||||
|
||||
@patch('os.path.exists', MagicMock(return_value=True))
|
||||
def test_cleanup(self):
|
||||
'''
|
||||
Test cleanup routine.
|
||||
|
||||
:return:
|
||||
'''
|
||||
arch = '/tmp/killme.zip'
|
||||
unlink = MagicMock()
|
||||
with patch('os.unlink', unlink):
|
||||
self.runner.config = {'support_archive': arch}
|
||||
self.runner.out = MagicMock()
|
||||
self.runner._cleanup()
|
||||
|
||||
assert self.runner.out.warning.call_args[0][0] == 'Terminated earlier, cleaning up'
|
||||
unlink.assert_called_once_with(arch)
|
||||
|
||||
@patch('os.path.exists', MagicMock(return_value=True))
|
||||
def test_check_existing_archive(self):
|
||||
'''
|
||||
Test check existing archive.
|
||||
|
||||
:return:
|
||||
'''
|
||||
arch = '/tmp/endothermal-recalibration.zip'
|
||||
unlink = MagicMock()
|
||||
with patch('os.unlink', unlink), patch('os.path.exists', MagicMock(return_value=False)):
|
||||
self.runner.config = {'support_archive': '',
|
||||
'support_archive_force_overwrite': True}
|
||||
self.runner.out = MagicMock()
|
||||
assert self.runner._check_existing_archive()
|
||||
assert self.runner.out.warning.call_count == 0
|
||||
|
||||
with patch('os.unlink', unlink):
|
||||
self.runner.config = {'support_archive': arch,
|
||||
'support_archive_force_overwrite': False}
|
||||
self.runner.out = MagicMock()
|
||||
assert not self.runner._check_existing_archive()
|
||||
assert self.runner.out.warning.call_args[0][0] == 'File {} already exists.'.format(arch)
|
||||
|
||||
with patch('os.unlink', unlink):
|
||||
self.runner.config = {'support_archive': arch,
|
||||
'support_archive_force_overwrite': True}
|
||||
self.runner.out = MagicMock()
|
||||
assert self.runner._check_existing_archive()
|
||||
assert self.runner.out.warning.call_args[0][0] == 'Overwriting existing archive: {}'.format(arch)
|
||||
|
||||
|
||||
@skipIf(not bool(pytest), 'Pytest needs to be installed')
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class ProfileIntegrityTestCase(TestCase):
|
||||
'''
|
||||
Default profile integrity
|
||||
'''
|
||||
def setUp(self):
|
||||
'''
|
||||
Set up test suite.
|
||||
|
||||
:return:
|
||||
'''
|
||||
self.profiles = {}
|
||||
profiles = os.path.join(os.path.dirname(salt.cli.support.collector.__file__), 'profiles')
|
||||
for profile in os.listdir(profiles):
|
||||
self.profiles[profile.split('.')[0]] = os.path.join(profiles, profile)
|
||||
|
||||
def tearDown(self):
|
||||
'''
|
||||
Tear down test suite.
|
||||
|
||||
:return:
|
||||
'''
|
||||
del self.profiles
|
||||
|
||||
def _render_template_to_yaml(self, name, *args, **kwargs):
|
||||
'''
|
||||
Get template referene for rendering.
|
||||
:return:
|
||||
'''
|
||||
with salt.utils.files.fopen(self.profiles[name]) as t_fh:
|
||||
template = t_fh.read()
|
||||
return yaml.load(jinja2.Environment().from_string(template).render(*args, **kwargs))
|
||||
|
||||
def test_non_template_profiles_parseable(self):
|
||||
'''
|
||||
Test shipped default profile is YAML parse-able.
|
||||
|
||||
:return:
|
||||
'''
|
||||
for t_name in ['default', 'jobs-active', 'jobs-last', 'network', 'postgres']:
|
||||
with salt.utils.files.fopen(self.profiles[t_name]) as ref:
|
||||
try:
|
||||
yaml.load(ref)
|
||||
parsed = True
|
||||
except Exception:
|
||||
parsed = False
|
||||
assert parsed
|
||||
|
||||
def test_users_template_profile(self):
|
||||
'''
|
||||
Test users template profile.
|
||||
|
||||
:return:
|
||||
'''
|
||||
users_data = self._render_template_to_yaml('users', salt=MagicMock(return_value=['pokemon']))
|
||||
assert len(users_data['all-users']) == 5
|
||||
for user_data in users_data['all-users']:
|
||||
for tgt in ['user.list_groups', 'shadow.info', 'cron.raw_cron']:
|
||||
if tgt in user_data:
|
||||
assert user_data[tgt]['args'] == ['pokemon']
|
||||
|
||||
def test_jobs_trace_template_profile(self):
|
||||
'''
|
||||
Test jobs-trace template profile.
|
||||
|
||||
:return:
|
||||
'''
|
||||
jobs_trace = self._render_template_to_yaml('jobs-trace', runners=MagicMock(return_value=['0000']))
|
||||
assert len(jobs_trace['jobs-details']) == 1
|
||||
assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['info'] == 'Details on JID 0000'
|
||||
assert jobs_trace['jobs-details'][0]['run:jobs.list_job']['args'] == [0]
|
@ -1001,3 +1001,32 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin):
|
||||
assert ret[count]['model'] == device[2]
|
||||
assert ret[count]['vendor'] == device[3]
|
||||
count += 1
|
||||
|
||||
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
|
||||
def test_kernelparams_return(self):
|
||||
expectations = [
|
||||
('BOOT_IMAGE=/vmlinuz-3.10.0-693.2.2.el7.x86_64',
|
||||
{'kernelparams': [('BOOT_IMAGE', '/vmlinuz-3.10.0-693.2.2.el7.x86_64')]}),
|
||||
('root=/dev/mapper/centos_daemon-root',
|
||||
{'kernelparams': [('root', '/dev/mapper/centos_daemon-root')]}),
|
||||
('rhgb quiet ro',
|
||||
{'kernelparams': [('rhgb', None), ('quiet', None), ('ro', None)]}),
|
||||
('param="value1"',
|
||||
{'kernelparams': [('param', 'value1')]}),
|
||||
('param="value1 value2 value3"',
|
||||
{'kernelparams': [('param', 'value1 value2 value3')]}),
|
||||
('param="value1 value2 value3" LANG="pl" ro',
|
||||
{'kernelparams': [('param', 'value1 value2 value3'), ('LANG', 'pl'), ('ro', None)]}),
|
||||
('ipv6.disable=1',
|
||||
{'kernelparams': [('ipv6.disable', '1')]}),
|
||||
('param="value1:value2:value3"',
|
||||
{'kernelparams': [('param', 'value1:value2:value3')]}),
|
||||
('param="value1,value2,value3"',
|
||||
{'kernelparams': [('param', 'value1,value2,value3')]}),
|
||||
('param="value1" param="value2" param="value3"',
|
||||
{'kernelparams': [('param', 'value1'), ('param', 'value2'), ('param', 'value3')]}),
|
||||
]
|
||||
|
||||
for cmdline, expectation in expectations:
|
||||
with patch('salt.utils.files.fopen', mock_open(read_data=cmdline)):
|
||||
self.assertEqual(core.kernelparams(), expectation)
|
||||
|
@ -267,7 +267,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin):
|
||||
self.assertDictEqual(network.connect('host', 'port'),
|
||||
{'comment': ret, 'result': True})
|
||||
|
||||
@skipIf(bool(ipaddress) is False, 'unable to import \'ipaddress\'')
|
||||
@skipIf(not bool(ipaddress), 'unable to import \'ipaddress\'')
|
||||
def test_is_private(self):
|
||||
'''
|
||||
Test for Check if the given IP address is a private address
|
||||
@ -279,7 +279,7 @@ class NetworkTestCase(TestCase, LoaderModuleMockMixin):
|
||||
return_value=True):
|
||||
self.assertTrue(network.is_private('::1'))
|
||||
|
||||
@skipIf(bool(ipaddress) is False, 'unable to import \'ipaddress\'')
|
||||
@skipIf(not bool(ipaddress), 'unable to import \'ipaddress\'')
|
||||
def test_is_loopback(self):
|
||||
'''
|
||||
Test for Check if the given IP address is a loopback address
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user