Merge pull request #45397 from rallytime/merge-develop

[develop] Merge forward from oxygen to develop
This commit is contained in:
Nicole Thomas 2018-01-14 07:45:04 -05:00 committed by GitHub
commit 2c0d274e4f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
224 changed files with 3173 additions and 2555 deletions

View File

@ -51,6 +51,9 @@ provisioner:
- .travis.yml
state_top:
base:
"os:Windows":
- match: grain
- prep_windows
"*":
- git.salt
<% if File.exists?(platformsfile) %>
@ -119,39 +122,49 @@ platforms:
- name: windows-2012r2
driver:
box: mwrock/Windows2012R2
communicator: winrm
name: vagrant
gui: true
username: administrator
password: Pass@word1
customize:
cpus: 4
memory: 8192
transport:
name: winrm
username: Vagrant
password: vagrant
provisioner:
init_environment: |
Clear-Host
$AddedLocation ="c:\salt"
$Reg = "Registry::HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
$OldPath = (Get-ItemProperty -Path "$Reg" -Name PATH).Path
$NewPath= $OldPath + ; + $AddedLocation
Set-ItemProperty -Path "$Reg" -Name PATH Value $NewPath
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: ''
salt_bootstrap_options: -version <%= version %>
verifier:
windows: true
types:
- unit
coverage_xml: false
save:
$env:TEMP/salt-runtests.log: artifacts/logs/salt-runtests.log
/salt/var/log/salt/minion: artifacts/logs/minion
- name: windows-2016
driver:
box: mwrock/Windows2016
communicator: winrm
name: vagrant
gui: true
customize:
cpus: 4
memory: 8192
transport:
name: winrm
username: Vagrant
password: vagrant
gui: true
provisioner:
init_environment: |
Clear-Host
$AddedLocation ="c:\salt;c:\salt\bin\Scripts"
$Reg = "Registry::HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
$OldPath = (Get-ItemProperty -Path "$Reg" -Name PATH).Path
$NewPath= $OldPath + ; + $AddedLocation
Set-ItemProperty -Path "$Reg" -Name PATH Value $NewPath
salt_bootstrap_url: https://raw.githubusercontent.com/saltstack/salt-bootstrap/develop/bootstrap-salt.ps1
salt_bootstrap_options: ''
salt_bootstrap_options: -version <%= version %>
verifier:
windows: true
types:
- unit
coverage_xml: false
save:
$env:TEMP/salt-runtests.log: artifacts/logs/salt-runtests.log
/salt/var/log/salt/minion: artifacts/logs/minion
<% end %>
<% end %>
suites:
@ -164,10 +177,15 @@ suites:
base:
"*":
- jenkins
"os:Windows":
- match: grain
- windows
jenkins.sls:
testing_dir: /tmp/kitchen/testing
testing_dir: "{{salt.config.get('root_dir')|replace('\\', '\\\\')}}/testing"
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
windows.sls:
virtualenv_path: 'c:\Python27\Scripts\pip.exe'
- name: py3
excludes:
- centos-6
@ -180,11 +198,16 @@ suites:
base:
"*":
- jenkins
"os:Windows":
- match: grain
- windows
jenkins.sls:
testing_dir: /tmp/kitchen/testing
testing_dir: "{{salt.config.get('root_dir')|replace('\\', '\\\\')}}/testing"
clone_repo: false
py3: true
salttesting_namespec: salttesting==2017.6.1
windows.sls:
virtualenv_path: 'c:\Python35\Scripts\pip.exe'
<% if File.exists?(verifierfile) %>
<%= ERB.new(File.read(verifierfile)).result %>

View File

@ -2,7 +2,7 @@
source 'https://rubygems.org'
gem 'test-kitchen', :git => 'https://github.com/test-kitchen/test-kitchen.git'
gem 'test-kitchen', :git => 'https://github.com/gtmanfred/test-kitchen.git'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'kitchen-sync'
gem 'git'
@ -20,7 +20,7 @@ group :windows do
gem 'vagrant-wrapper'
gem 'kitchen-vagrant'
gem 'winrm', '~>2.0'
gem 'winrm-fs', '~>1.0'
gem 'winrm-fs', :git => 'https://github.com/gtmanfred/winrm-fs.git'
end
group :ec2 do

View File

@ -2355,7 +2355,7 @@ Example:
fileserver_backend:
- roots
- git
- gitfs
.. note::
For masterless Salt, this parameter must be specified in the minion config
@ -2538,6 +2538,19 @@ nothing is ignored.
fileserver, it is good practice to include ``'\*.swp'`` in the
:conf_master:`file_ignore_glob`.
.. conf_master:: master_roots
``master_roots``
----------------
Default: ``/srv/salt-master``
A master-only copy of the :conf_master:`file_roots` dictionary, used by the
state compiler.
.. code-block:: yaml
master_roots: /srv/salt-master
roots: Master's Local File Server
---------------------------------
@ -2581,21 +2594,28 @@ Example:
For masterless Salt, this parameter must be specified in the minion config
file.
.. conf_master:: master_roots
.. conf_master:: roots_update_interval
``master_roots``
----------------
``roots_update_interval``
*************************
Default: ``/srv/salt-master``
.. versionadded:: Oxygen
A master-only copy of the file_roots dictionary, used by the state compiler.
Default: ``60``
This option defines the update interval (in seconds) for
:conf_master:`file_roots`.
.. note::
Since ``file_roots`` consists of files local to the minion, the update
process for this fileserver backend just reaps the cache for this backend.
.. code-block:: yaml
master_roots: /srv/salt-master
roots_update_interval: 120
git: Git Remote File Server Backend
-----------------------------------
gitfs: Git Remote File Server Backend
-------------------------------------
.. conf_master:: gitfs_remotes
@ -2891,6 +2911,22 @@ they were created by a different master.
.. __: http://www.gluster.org/
.. conf_master:: gitfs_update_interval
``gitfs_update_interval``
*************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the default update interval (in seconds) for gitfs remotes.
The update interval can also be set for a single repository via a
:ref:`per-remote config option <gitfs-per-remote-config>`
.. code-block:: yaml
gitfs_update_interval: 120
GitFS Authentication Options
****************************
@ -3049,8 +3085,8 @@ can be found in the :ref:`GitFS Walkthrough <gitfs-custom-refspecs>`.
- '+refs/pull/*/head:refs/remotes/origin/pr/*'
- '+refs/pull/*/merge:refs/remotes/origin/merge/*'
hg: Mercurial Remote File Server Backend
----------------------------------------
hgfs: Mercurial Remote File Server Backend
------------------------------------------
.. conf_master:: hgfs_remotes
@ -3249,8 +3285,24 @@ blacklist will be exposed as fileserver environments.
- v1.*
- 'mybranch\d+'
svn: Subversion Remote File Server Backend
------------------------------------------
.. conf_master:: hgfs_update_interval
``hgfs_update_interval``
************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the update interval (in seconds) for
:conf_master:`hgfs_remotes`.
.. code-block:: yaml
hgfs_update_interval: 120
svnfs: Subversion Remote File Server Backend
--------------------------------------------
.. conf_master:: svnfs_remotes
@ -3460,8 +3512,24 @@ will be exposed as fileserver environments.
- v1.*
- 'mybranch\d+'
minion: MinionFS Remote File Server Backend
-------------------------------------------
.. conf_master:: svnfs_update_interval
``svnfs_update_interval``
*************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the update interval (in seconds) for
:conf_master:`svnfs_remotes`.
.. code-block:: yaml
svnfs_update_interval: 120
minionfs: MinionFS Remote File Server Backend
---------------------------------------------
.. conf_master:: minionfs_env
@ -3550,6 +3618,72 @@ exposed.
- dev*
- 'mail\d+.mydomain.tld'
.. conf_master:: minionfs_update_interval
``minionfs_update_interval``
****************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the update interval (in seconds) for :ref:`MinionFS
<tutorial-minionfs>`.
.. note::
Since :ref:`MinionFS <tutorial-minionfs>` consists of files local to the
master, the update process for this fileserver backend just reaps the cache
for this backend.
.. code-block:: yaml
minionfs_update_interval: 120
azurefs: Azure File Server Backend
----------------------------------
.. versionadded:: 2015.8.0
See the :mod:`azurefs documentation <salt.fileserver.azurefs>` for usage
examples.
.. conf_master:: azurefs_update_interval
``azurefs_update_interval``
***************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the update interval (in seconds) for azurefs.
.. code-block:: yaml
azurefs_update_interval: 120
s3fs: S3 File Server Backend
----------------------------
.. versionadded:: 0.16.0
See the :mod:`s3fs documentation <salt.fileserver.s3fs>` for usage examples.
.. conf_master:: s3fs_update_interval
``s3fs_update_interval``
************************
.. versionadded:: Oxygen
Default: ``60``
This option defines the update interval (in seconds) for s3fs.
.. code-block:: yaml
s3fs_update_interval: 120
.. _pillar-configuration-master:

View File

@ -86,7 +86,7 @@ Dependencies
Salt should run on any Unix-like platform so long as the dependencies are met.
* `Python 2.6`_ >= 2.6 <3.0
* `Python 2.7`_ >= 2.7 <3.0
* `msgpack-python`_ - High-performance message interchange format
* `YAML`_ - Python YAML bindings
* `Jinja2`_ - parsing Salt States (configurable in the master settings)

View File

@ -335,8 +335,39 @@ they failed. Here's some example pseudocode:
__context__['retcode'] = 1
return result
Variable Update Intervals for Fileserver Backends
-------------------------------------------------
Prior to this release, fileservers would be updated as part of a dedicated
"maintenance" process, in which various routine maintenance tasks were
performed. This tied the update interval to the :conf_master:`loop_interval`
config option, and also forced all fileservers to update at the same interval.
Oxygen adds the following configuration options for the various fileserver
backends:
- :conf_master:`roots_update_interval`
- :conf_master:`azurefs_update_interval`
- :conf_master:`gitfs_update_interval`
- :conf_master:`hgfs_update_interval`
- :conf_master:`minionfs_update_interval`
- :conf_master:`s3fs_update_interval`
- :conf_master:`svnfs_update_interval`
These allow for update intervals to be set for each individual backend. The
default value for each of these is 60 seconds.
In addition, for :ref:`GitFS <tutorial-gitfs>` it is also possible to apply
intervals to individual remotes. See :ref:`here <gitfs-update-intervals>` for
examples.
.. note::
git_pillar does not yet support variable update intervals, this is targeted
for the next feature release (Fluorine).
LDAP via External Authentication Changes
----------------------------------------
In this release of Salt, if LDAP Bind Credentials are supplied, then
these credentials will be used for all LDAP access except the first
authentication when a job is submitted. The first authentication will

View File

@ -209,13 +209,17 @@ Simple Configuration
To use the gitfs backend, only two configuration changes are required on the
master:
1. Include ``git`` in the :conf_master:`fileserver_backend` list in the master
config file:
1. Include ``gitfs`` in the :conf_master:`fileserver_backend` list in the
master config file:
.. code-block:: yaml
fileserver_backend:
- git
- gitfs
.. note::
``git`` also works here. Prior to the Oxygen release, *only* ``git``
would work.
2. Specify one or more ``git://``, ``https://``, ``file://``, or ``ssh://``
URLs in :conf_master:`gitfs_remotes` to configure which repositories to
@ -334,6 +338,7 @@ configured gitfs remotes):
* :conf_master:`gitfs_refspecs` (new in 2017.7.0)
* :conf_master:`gitfs_disable_saltenv_mapping` (new in Oxygen)
* :conf_master:`gitfs_ref_types` (new in Oxygen)
* :conf_master:`gitfs_update_interval` (new in Oxygen)
.. note::
pygit2 only supports disabling SSL verification in versions 0.23.2 and
@ -354,6 +359,7 @@ tremendous amount of customization. Here's some example usage:
- mountpoint: salt://bar
- base: salt-base
- ssl_verify: False
- update_interval: 120
- https://foo.com/bar.git:
- name: second_bar_repo
- root: other/salt
@ -427,6 +433,8 @@ In the example configuration above, the following is true:
``insecure_auth`` parameter must be used (as in the fourth remote) to
force Salt to authenticate to an ``http://`` remote.
9. The first remote will wait 120 seconds between updates instead of 60.
.. _gitfs-per-saltenv-config:
Per-Saltenv Configuration Parameters
@ -562,6 +570,32 @@ single branch.
- http://foo.com/quux.git:
- all_saltenvs: anything
.. _gitfs-update-intervals:
Update Intervals
================
Prior to the Oxygen release, GitFS would update its fileserver backends as part
of a dedicated "maintenance" process, in which various routine maintenance
tasks were performed. This tied the update interval to the
:conf_master:`loop_interval` config option, and also forced all fileservers to
update at the same interval.
Now it is possible to make GitFS update at its own interval, using
:conf_master:`gitfs_update_interval`:
.. code-block:: yaml
gitfs_update_interval: 180
gitfs_remotes:
- https://foo.com/foo.git
- https://foo.com/bar.git:
- update_interval: 120
Using the above configuration, the first remote would update every three
minutes, while the second remote would update every two minutes.
Configuration Order of Precedence
=================================

View File

@ -73,7 +73,7 @@ pushed files are made available.
Simple Configuration
--------------------
To use the :mod:`minionfs <salt.fileserver.minionfs>` backend, add ``minion``
To use the :mod:`minionfs <salt.fileserver.minionfs>` backend, add ``minionfs``
to the list of backends in the :conf_master:`fileserver_backend` configuration
option on the master:
@ -83,10 +83,13 @@ option on the master:
fileserver_backend:
- roots
- minion
- minionfs
.. note::
As described earlier, ``file_recv: True`` is also needed to enable the
``minion`` also works here. Prior to the Oxygen release, *only* ``minion``
would work.
Also, as described earlier, ``file_recv: True`` is needed to enable the
master to receive files pushed from minions. As always, changes to the
master configuration require a restart of the ``salt-master`` service.
@ -127,7 +130,7 @@ blacklist, can be found below:
fileserver_backend:
- roots
- minion
- minionfs
minionfs_mountpoint: salt://minionfs

View File

@ -247,7 +247,9 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts2Dir'])\pip.exe" "in
# Move DLL's to Python Root
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python2Dir'])" -Force
# The dlls have to be in Python directory and the site-packages\win32 directory
Copy-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python2Dir'])" -Force
Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force
# Create gen_py directory
Write-Output " - $script_name :: Creating gen_py Directory . . ."

View File

@ -246,8 +246,10 @@ DownloadFileWithProgress $url $file
Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "install $file " "pip install PyWin32"
# Move DLL's to Python Root
# The dlls have to be in Python directory and the site-packages\win32 directory
Write-Output " - $script_name :: Moving PyWin32 DLLs . . ."
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
Copy-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force
Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force
# Create gen_py directory
Write-Output " - $script_name :: Creating gen_py Directory . . ."

View File

@ -2,7 +2,7 @@
from __future__ import print_function
import sys
import os.path
import os
import getopt
@ -16,7 +16,9 @@ def display_help():
print('# Parameters: #')
print('# -f, --file : target file #')
print('# -s, --search : term to search for #')
print('# default is "C:\Python" #')
print('# Default is the base path for the python #')
print('# executable that is running this script. #')
print('# In Py2 that would be C:\\Python27 #')
print('# -r, --replace : replace with this #')
print('# default is ".." #')
print('# #')
@ -29,16 +31,12 @@ def display_help():
def main(argv):
target = ''
python_dir = 'Python{0}{1}'.format(sys.version_info[0], sys.version_info[1])
if sys.version_info >= (3, 5):
from win32com.shell import shellcon, shell
search = shell.SHGetFolderPath(0, shellcon.CSIDL_PROGRAM_FILES, 0, 0)
search = os.path.join(search, python_dir)
else:
search = os.path.join('C:\\', python_dir)
search = os.path.dirname(sys.executable)
replace = '..'
try:
opts, args = getopt.getopt(argv,"hf:s:r:",["file=","search=", "replace="])
opts, args = getopt.getopt(argv,
"hf:s:r:",
["file=", "search=", "replace="])
except getopt.GetoptError:
display_help()
for opt, arg in opts:
@ -56,10 +54,10 @@ def main(argv):
if sys.version_info >= (3, 0):
search = search.encode('utf-8')
replace = replace.encode('utf-8')
f = open( target, 'rb' ).read()
f = f.replace( search, replace )
f = f.replace( search.lower(), replace )
open( target, 'wb' ).write(f)
f = open(target, 'rb').read()
f = f.replace(search, replace)
f = f.replace(search.lower(), replace)
open(target, 'wb').write(f)
if __name__ == "__main__":

View File

@ -25,6 +25,7 @@ PyMySQL==0.7.11
pyOpenSSL==17.5.0
python-dateutil==2.6.1
python-gnupg==0.4.1
pythonnet==2.3.0
pyyaml==3.12
pyzmq==16.0.3
requests==2.18.4

View File

@ -9,7 +9,7 @@ found by reading the salt documentation:
'''
# Import python libraries
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.stringutils

View File

@ -14,13 +14,14 @@ so that any external authentication system can be used inside of Salt
# 6. Interface to verify tokens
# Import python libs
from __future__ import absolute_import, print_function
from __future__ import absolute_import, print_function, unicode_literals
import collections
import time
import logging
import random
import getpass
from salt.ext.six.moves import input
from salt.ext import six
# Import salt libs
import salt.config
@ -97,7 +98,7 @@ class LoadAuth(object):
else:
return self.auth[fstr](*fcall['args'])
except Exception as e:
log.debug('Authentication module threw {0}'.format(e))
log.debug('Authentication module threw %s', e)
return False
def time_auth(self, load):
@ -141,7 +142,7 @@ class LoadAuth(object):
try:
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
except Exception as e:
log.debug('Authentication module threw {0}'.format(e))
log.debug('Authentication module threw %s', e)
return None
def __process_acl(self, load, auth_list):
@ -157,7 +158,7 @@ class LoadAuth(object):
try:
return self.auth[fstr](auth_list, self.opts)
except Exception as e:
log.debug('Authentication module threw {0}'.format(e))
log.debug('Authentication module threw %s', e)
return auth_list
def get_groups(self, load):
@ -382,7 +383,7 @@ class LoadAuth(object):
auth_list = self.__process_acl(load, auth_list)
log.trace("Compiled auth_list: {0}".format(auth_list))
log.trace('Compiled auth_list: %s', auth_list)
return auth_list
@ -549,11 +550,7 @@ class Authorize(object):
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
log.error('Exception occurred when generating auth token: %s', exc)
yield {}
if not token:
log.warning('Authentication failure of type "token" occurred.')
@ -583,9 +580,7 @@ class Authorize(object):
if not self.loadauth.time_auth(load):
continue
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
log.error('Exception occurred while authenticating: %s', exc)
continue
yield {'sub_auth': sub_auth, 'name': name}
yield {}
@ -671,7 +666,7 @@ class Resolver(object):
def _send_token_request(self, load):
if self.opts['transport'] in ('zeromq', 'tcp'):
master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \
':' + str(self.opts['ret_port'])
':' + six.text_type(self.opts['ret_port'])
channel = salt.transport.client.ReqChannel.factory(self.opts,
crypt='clear',
master_uri=master_uri)

View File

@ -48,7 +48,7 @@ indicated above, though the model DOES NOT have to be named
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import sys
@ -64,7 +64,7 @@ try:
except Exception as exc:
# If Django is installed and is not detected, uncomment
# the following line to display additional information
#log.warning('Could not load Django auth module. Found exception: {0}'.format(exc))
#log.warning('Could not load Django auth module. Found exception: %s', exc)
HAS_DJANGO = False
# pylint: enable=import-error
@ -208,5 +208,5 @@ def acl(username):
if not found:
auth_dict[a.user_fk.username].append({a.minion_or_fn_matcher: [a.minion_fn]})
log.debug('django auth_dict is {0}'.format(repr(auth_dict)))
log.debug('django auth_dict is %s', auth_dict)
return auth_dict

View File

@ -96,7 +96,7 @@ When using ``htdigest`` the ``^realm`` must be set:
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
@ -136,8 +136,8 @@ def _get_file_auth_config():
return False
if not os.path.exists(config['filename']):
log.error('salt.auth.file: The configured external_auth:file:^filename ({0})'
'does not exist on the filesystem'.format(config['filename']))
log.error('salt.auth.file: The configured external_auth:file:^filename (%s)'
'does not exist on the filesystem', config['filename'])
return False
config['username_field'] = int(config['username_field'])
@ -166,14 +166,14 @@ def _text(username, password, **kwargs):
try:
this_username = fields[username_field]
except IndexError:
log.error('salt.auth.file: username field ({0}) does not exist '
'in file {1}'.format(username_field, filename))
log.error('salt.auth.file: username field (%s) does not exist '
'in file %s', username_field, filename)
return False
try:
this_password = fields[password_field]
except IndexError:
log.error('salt.auth.file: password field ({0}) does not exist '
'in file {1}'.format(password_field, filename))
log.error('salt.auth.file: password field (%s) does not exist '
'in file %s', password_field, filename)
return False
if this_username == username:
@ -240,7 +240,7 @@ def _htfile(username, password, **kwargs):
kwargs['passlib_version'] = passlib.__version__
except ImportError:
log.error('salt.auth.file: The python-passlib library is required '
'for {0} filetype'.format(filetype))
'for %s filetype', filetype)
return False
if filetype == 'htdigest':

View File

@ -5,7 +5,7 @@ Provide authentication using OpenStack Keystone
:depends: - keystoneclient Python module
'''
from __future__ import absolute_import, print_function
from __future__ import absolute_import, print_function, unicode_literals
try:
from keystoneclient.v2_0 import client
from keystoneclient.exceptions import AuthorizationFailure, Unauthorized

View File

@ -6,7 +6,7 @@ Provide authentication using simple LDAP binds
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
from salt.ext import six
@ -103,9 +103,7 @@ class _LDAPConnection(object):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT,
ldap.OPT_X_TLS_NEVER)
self.ldap = ldap.initialize(
'{0}'.format(self.uri)
)
self.ldap = ldap.initialize('{0}'.format(self.uri))
self.ldap.protocol_version = 3 # ldap.VERSION3
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
@ -222,14 +220,12 @@ def _bind(username, password, anonymous=False, opts=None):
# search for the user's DN to be used for the actual authentication
_ldap = _LDAPConnection(**connargs).ldap
log.debug(
'Running LDAP user dn search with filter:{0}, dn:{1}, '
'scope:{2}'.format(
paramvalues['filter'], basedn, scope
)
'Running LDAP user dn search with filter:%s, dn:%s, '
'scope:%s', paramvalues['filter'], basedn, scope
)
result = _ldap.search_s(basedn, int(scope), paramvalues['filter'])
if len(result) < 1:
log.warning('Unable to find user {0}'.format(username))
log.warning('Unable to find user %s', username)
return False
elif len(result) > 1:
# Active Directory returns something odd. Though we do not
@ -245,10 +241,10 @@ def _bind(username, password, anonymous=False, opts=None):
cns = [tup[0] for tup in result]
total_not_none = sum(1 for c in cns if c is not None)
if total_not_none > 1:
log.error('LDAP lookup found multiple results for user {0}'.format(username))
log.error('LDAP lookup found multiple results for user %s', username)
return False
elif total_not_none == 0:
log.error('LDAP lookup--unable to find CN matching user {0}'.format(username))
log.error('LDAP lookup--unable to find CN matching user %s', username)
return False
connargs['binddn'] = result[0][0]
@ -264,19 +260,15 @@ def _bind(username, password, anonymous=False, opts=None):
if paramvalues['anonymous']:
log.debug('Attempting anonymous LDAP bind')
else:
log.debug('Attempting LDAP bind with user dn: {0}'.format(connargs['binddn']))
log.debug('Attempting LDAP bind with user dn: %s', connargs['binddn'])
try:
ldap_conn = _LDAPConnection(**connargs).ldap
except Exception:
connargs.pop('bindpw', None) # Don't log the password
log.error('Failed to authenticate user dn via LDAP: {0}'.format(connargs))
log.error('Failed to authenticate user dn via LDAP: %s', connargs)
log.debug('Error authenticating user dn via LDAP:', exc_info=True)
return False
log.debug(
'Successfully authenticated user dn via LDAP: {0}'.format(
connargs['binddn']
)
)
log.debug('Successfully authenticated user dn via LDAP: %s', connargs['binddn'])
return ldap_conn
@ -331,27 +323,27 @@ def groups(username, **kwargs):
ldap.SCOPE_SUBTREE,
get_user_dn_search, ['distinguishedName'])
except Exception as e:
log.error('Exception thrown while looking up user DN in AD: {0}'.format(e))
log.error('Exception thrown while looking up user DN in AD: %s', e)
return group_list
if not user_dn_results:
log.error('Could not get distinguished name for user {0}'.format(username))
log.error('Could not get distinguished name for user %s', username)
return group_list
# LDAP results are always tuples. First entry in the tuple is the DN
dn = ldap.filter.escape_filter_chars(user_dn_results[0][0])
ldap_search_string = '(&(member={0})(objectClass={1}))'.format(dn, _config('groupclass'))
log.debug('Running LDAP group membership search: {0}'.format(ldap_search_string))
log.debug('Running LDAP group membership search: %s', ldap_search_string)
try:
search_results = bind.search_s(_config('basedn'),
ldap.SCOPE_SUBTREE,
ldap_search_string,
[_config('accountattributename'), 'cn'])
except Exception as e:
log.error('Exception thrown while retrieving group membership in AD: {0}'.format(e))
log.error('Exception thrown while retrieving group membership in AD: %s', e)
return group_list
for _, entry in search_results:
if 'cn' in entry:
group_list.append(entry['cn'][0])
log.debug('User {0} is a member of groups: {1}'.format(username, group_list))
log.debug('User %s is a member of groups: %s', username, group_list)
elif _config('freeipa'):
escaped_username = ldap.filter.escape_filter_chars(username)
@ -367,7 +359,7 @@ def groups(username, **kwargs):
if username == user.split(',')[0].split('=')[-1]:
group_list.append(entry.split(',')[0].split('=')[-1])
log.debug('User {0} is a member of groups: {1}'.format(username, group_list))
log.debug('User %s is a member of groups: %s', username, group_list)
if not auth(username, kwargs['password']):
log.error('LDAP username and password do not match')
@ -390,7 +382,7 @@ def groups(username, **kwargs):
if username == user.split(',')[0].split('=')[-1]:
for group in entry[_config('groupattribute')]:
group_list.append(group.split(',')[0].split('=')[-1])
log.debug('User {0} is a member of groups: {1}'.format(username, group_list))
log.debug('User %s is a member of groups: %s', username, group_list)
# Only test user auth on first call for job.
# 'show_jid' only exists on first payload so we can use that for the conditional.
@ -465,13 +457,13 @@ def __expand_ldap_entries(entries, opts=None):
for minion_id in retrieved_minion_ids:
acl_tree.append({minion_id: permissions})
log.trace('Expanded acl_tree is: {0}'.format(acl_tree))
log.trace('Expanded acl_tree is: %s', acl_tree)
except ldap.NO_SUCH_OBJECT:
pass
else:
acl_tree.append({minion_or_ou: matchers})
log.trace('__expand_ldap_entries: {0}'.format(acl_tree))
log.trace('__expand_ldap_entries: %s', acl_tree)
return acl_tree

View File

@ -49,7 +49,7 @@ Enable MySQL authentication.
:depends: - MySQL-python Python module
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
@ -75,7 +75,7 @@ def __get_connection_info():
conn_info['auth_sql'] = __opts__['mysql_auth']['auth_sql']
except KeyError as e:
log.error('{0} does not exist'.format(e))
log.error('%s does not exist', e)
return None
return conn_info

View File

@ -36,7 +36,7 @@ authenticated against. This defaults to `login`
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
from ctypes.util import find_library

View File

@ -16,7 +16,7 @@ TODO: Add a 'ca_dir' option to configure a directory of CA files, a la Apache.
:depends: - pyOpenSSL module
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import third party libs
@ -78,8 +78,8 @@ def auth(username, password, **kwargs):
cacert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read())
log.debug('Attempting to authenticate via pki.')
log.debug('Using CA file: {0}'.format(cacert_file))
log.debug('Certificate contents: {0}'.format(pem))
log.debug('Using CA file: %s', cacert_file)
log.debug('Certificate contents: %s', pem)
# Get the signing algorithm
algo = cert.get_signature_algorithm()
@ -122,8 +122,8 @@ def auth(username, password, **kwargs):
try:
c.verify(cacert, sig, der_cert, algo)
assert dict(cert.get_subject().get_components())['CN'] == username, "Certificate's CN should match the username"
log.info('Successfully authenticated certificate: {0}'.format(pem))
log.info('Successfully authenticated certificate: %s', pem)
return True
except (OpenSSL.crypto.Error, AssertionError):
log.info('Failed to authenticate certificate: {0}'.format(pem))
log.info('Failed to authenticate certificate: %s', pem)
return False

View File

@ -24,7 +24,7 @@ as above.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
@ -61,10 +61,10 @@ def auth(username, password):
result = salt.utils.http.query(url, method='POST', data=data, status=True,
decode=True)
if result['status'] == 200:
log.debug('eauth REST call returned 200: {0}'.format(result))
log.debug('eauth REST call returned 200: %s', result)
if result['dict'] is not None:
return result['dict']
return True
else:
log.debug('eauth REST call failed: {0}'.format(result))
log.debug('eauth REST call failed: %s', result)
return False

View File

@ -31,7 +31,7 @@ frontal.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)

View File

@ -39,7 +39,7 @@ the API key will be updated on all the YubiCloud servers.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from __future__ import print_function
import logging
@ -80,7 +80,7 @@ def auth(username, password):
try:
return client.verify(password)
except yubico_exceptions.StatusCodeError as e:
log.info('Unable to verify YubiKey `{0}`'.format(e))
log.info('Unable to verify YubiKey `%s`', e)
return False

View File

@ -77,6 +77,12 @@ class SSHHighState(salt.state.BaseHighState):
self.matcher = salt.minion.Matcher(self.opts)
self.tops = salt.loader.tops(self.opts)
self._pydsl_all_decls = {}
self._pydsl_render_stack = []
def push_active(self):
salt.state.HighState.stack.append(self)
def load_dynamic(self, matches):
'''
Stub out load_dynamic

View File

@ -92,6 +92,28 @@ def _merge_extra_filerefs(*args):
return ','.join(ret)
def _cleanup_slsmod_low_data(low_data):
'''
Set "slsmod" keys to None to make
low_data JSON serializable
'''
for i in low_data:
if 'slsmod' in i:
i['slsmod'] = None
def _cleanup_slsmod_high_data(high_data):
'''
Set "slsmod" keys to None to make
high_data JSON serializable
'''
for i in six.itervalues(high_data):
if 'stateconf' in i:
stateconf_data = i['stateconf'][1]
if 'slsmod' in stateconf_data:
stateconf_data['slsmod'] = None
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
@ -105,6 +127,7 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
if isinstance(mods, six.string_types):
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
@ -140,6 +163,7 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
opts,
__context__['fileclient'],
@ -361,6 +385,7 @@ def high(data, **kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
chunks = st_.state.compile_high_data(data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
@ -374,6 +399,7 @@ def high(data, **kwargs):
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
opts,
__context__['fileclient'],
@ -598,6 +624,7 @@ def highstate(test=None, **kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
@ -616,6 +643,7 @@ def highstate(test=None, **kwargs):
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
opts,
__context__['fileclient'],
@ -684,6 +712,7 @@ def top(topfn, test=None, **kwargs):
__salt__,
__context__['fileclient'])
st_.opts['state_top'] = os.path.join('salt://', topfn)
st_.push_active()
chunks = st_.compile_low_chunks()
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
@ -697,6 +726,7 @@ def top(topfn, test=None, **kwargs):
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
opts,
__context__['fileclient'],
@ -756,7 +786,10 @@ def show_highstate(**kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
return st_.compile_highstate()
st_.push_active()
chunks = st_.compile_highstate()
_cleanup_slsmod_high_data(chunks)
return chunks
def show_lowstate(**kwargs):
@ -776,7 +809,10 @@ def show_lowstate(**kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
return st_.compile_low_chunks()
st_.push_active()
chunks = st_.compile_low_chunks()
_cleanup_slsmod_low_data(chunks)
return chunks
def sls_id(id_, mods, test=None, queue=False, **kwargs):
@ -892,6 +928,7 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
if isinstance(mods, six.string_types):
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
@ -906,6 +943,7 @@ def show_sls(mods, saltenv='base', test=None, **kwargs):
# Verify that the high data is structurally sound
if errors:
return errors
_cleanup_slsmod_high_data(high_data)
return high_data
@ -935,6 +973,7 @@ def show_low_sls(mods, saltenv='base', test=None, **kwargs):
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
if isinstance(mods, six.string_types):
mods = mods.split(',')
high_data, errors = st_.render_highstate({saltenv: mods})
@ -950,6 +989,7 @@ def show_low_sls(mods, saltenv='base', test=None, **kwargs):
if errors:
return errors
ret = st_.state.compile_high_data(high_data)
_cleanup_slsmod_low_data(ret)
return ret

View File

@ -12,8 +12,7 @@ Primary interfaces for the salt-cloud system
# the VM data will be in opts['profiles']
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import logging
@ -77,7 +76,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
# Logfile is not using Syslog, verify
verify_files([logfile], salt_master_user)
except (IOError, OSError) as err:
log.error('Error while verifying the environment: {0}'.format(err))
log.error('Error while verifying the environment: %s', err)
sys.exit(err.errno)
# Setup log file logging
@ -131,9 +130,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
self.handle_exception(msg, exc)
elif self.config.get('map', None):
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
log.info('Applying map from \'%s\'.', self.config['map'])
try:
ret = mapper.interpolated_map(
query=self.selected_query_option
@ -189,7 +186,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
'instances.'.format(map_file, names)
self.handle_exception(msg, SaltCloudSystemExit)
log.info('Applying map from \'{0}\'.'.format(map_file))
log.info('Applying map from \'%s\'.', map_file)
matching = mapper.delete_map(query='list_nodes')
else:
matching = mapper.get_running_by_names(
@ -220,9 +217,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
elif self.options.action and (self.config.get('names', None) or
self.config.get('map', None)):
if self.config.get('map', None):
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
log.info('Applying map from \'%s\'.', self.config['map'])
try:
names = mapper.get_vmnames_by_action(self.options.action)
except SaltCloudException as exc:
@ -304,9 +299,7 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
ret = {}
run_map = True
log.info(
'Applying map from \'{0}\'.'.format(self.config['map'])
)
log.info('Applying map from \'%s\'.', self.config['map'])
dmap = mapper.map_data()
msg = ''
@ -423,21 +416,19 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
msg = 'Error: {0}'.format(msg)
self.exit(
exc.exit_code,
'{0}\n'.format(
msg.format(str(exc).rstrip())
)
msg.format(exc).rstrip() + '\n'
)
# It's not a system exit but it's an error we can
# handle
self.error(
msg.format(str(exc))
)
self.error(msg.format(exc))
# This is a generic exception, log it, include traceback if
# debug logging is enabled and exit.
# pylint: disable=str-format-in-logging
log.error(
msg.format(exc),
# Show the traceback if the debug logging level is
# enabled
exc_info_on_loglevel=logging.DEBUG
)
# pylint: enable=str-format-in-logging
self.exit(salt.defaults.exitcodes.EX_GENERIC)

View File

@ -26,7 +26,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
@ -138,7 +138,7 @@ def avail_locations(call=None):
for region in items['Regions']['Region']:
ret[region['RegionId']] = {}
for item in region:
ret[region['RegionId']][item] = str(region[item])
ret[region['RegionId']][item] = six.text_type(region[item])
return ret
@ -173,7 +173,7 @@ def avail_images(kwargs=None, call=None):
for image in items['Images']['Image']:
ret[image['ImageId']] = {}
for item in image:
ret[image['ImageId']][item] = str(image[item])
ret[image['ImageId']][item] = six.text_type(image[item])
return ret
@ -195,7 +195,7 @@ def avail_sizes(call=None):
for image in items['InstanceTypes']['InstanceType']:
ret[image['InstanceTypeId']] = {}
for item in image:
ret[image['InstanceTypeId']][item] = str(image[item])
ret[image['InstanceTypeId']][item] = six.text_type(image[item])
return ret
@ -232,7 +232,7 @@ def list_availability_zones(call=None):
for zone in items['Zones']['Zone']:
ret[zone['ZoneId']] = {}
for item in zone:
ret[zone['ZoneId']][item] = str(zone[item])
ret[zone['ZoneId']][item] = six.text_type(zone[item])
return ret
@ -256,8 +256,9 @@ def list_nodes_min(call=None):
}
nodes = query(params)
log.debug('Total {0} instance found in Region {1}'.format(
nodes['TotalCount'], location)
log.debug(
'Total %s instance found in Region %s',
nodes['TotalCount'], location
)
if 'Code' in nodes or nodes['TotalCount'] == 0:
return ret
@ -289,7 +290,7 @@ def list_nodes(call=None):
'public_ips': node['public_ips'],
'private_ips': node['private_ips'],
'size': node['size'],
'state': str(node['state']),
'state': six.text_type(node['state']),
}
return ret
@ -313,8 +314,9 @@ def list_nodes_full(call=None):
}
result = query(params=params)
log.debug('Total {0} instance found in Region {1}'.format(
result['TotalCount'], location)
log.debug(
'Total %s instance found in Region %s',
result['TotalCount'], location
)
if 'Code' in result or result['TotalCount'] == 0:
return ret
@ -336,7 +338,7 @@ def list_nodes_full(call=None):
}
items = query(params=params)
if 'Code' in items:
log.warning('Query instance:{0} attribute failed'.format(instanceId))
log.warning('Query instance:%s attribute failed', instanceId)
continue
name = items['InstanceName']
@ -350,7 +352,7 @@ def list_nodes_full(call=None):
for item in items:
value = items[item]
if value is not None:
value = str(value)
value = six.text_type(value)
if item == "PublicIpAddress":
ret[name]['public_ips'] = items[item]['IpAddress']
if item == "InnerIpAddress" and 'private_ips' not in ret[name]:
@ -414,14 +416,14 @@ def get_image(vm_):
Return the image object to use
'''
images = avail_images()
vm_image = str(config.get_cloud_config_value(
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
if not vm_image:
raise SaltCloudNotFound('No image specified for this VM.')
if vm_image and str(vm_image) in images:
if vm_image and six.text_type(vm_image) in images:
return images[vm_image]['ImageId']
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
@ -440,7 +442,7 @@ def get_securitygroup(vm_):
if not securitygroup:
raise SaltCloudNotFound('No securitygroup ID specified for this VM.')
if securitygroup and str(securitygroup) in sgs:
if securitygroup and six.text_type(securitygroup) in sgs:
return sgs[securitygroup]['SecurityGroupId']
raise SaltCloudNotFound(
'The specified security group, \'{0}\', could not be found.'.format(
@ -453,14 +455,14 @@ def get_size(vm_):
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = str(config.get_cloud_config_value(
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and str(vm_size) in sizes:
if vm_size and six.text_type(vm_size) in sizes:
return sizes[vm_size]['InstanceTypeId']
raise SaltCloudNotFound(
@ -473,14 +475,14 @@ def __get_location(vm_):
Return the VM's location
'''
locations = avail_locations()
vm_location = str(config.get_cloud_config_value(
vm_location = six.text_type(config.get_cloud_config_value(
'location', vm_, __opts__, search_global=False
))
if not vm_location:
raise SaltCloudNotFound('No location specified for this VM.')
if vm_location and str(vm_location) in locations:
if vm_location and six.text_type(vm_location) in locations:
return locations[vm_location]['RegionId']
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
@ -504,7 +506,7 @@ def start(name, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
log.info('Starting node %s', name)
instanceId = _get_node(name)['InstanceId']
@ -531,14 +533,14 @@ def stop(name, force=False, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
log.info('Stopping node %s', name)
instanceId = _get_node(name)['InstanceId']
params = {
'Action': 'StopInstance',
'InstanceId': instanceId,
'ForceStop': str(force).lower()
'ForceStop': six.text_type(force).lower()
}
result = query(params)
@ -560,7 +562,7 @@ def reboot(name, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Rebooting node {0}'.format(name))
log.info('Rebooting node %s', name)
instance_id = _get_node(name)['InstanceId']
@ -628,7 +630,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
kwargs = {
'name': vm_['name'],
'size_id': get_size(vm_),
@ -641,9 +643,9 @@ def create(vm_):
if 'internet_chargetype' in vm_:
kwargs['InternetChargeType'] = vm_['internet_chargetype']
if 'internet_maxbandwidthin' in vm_:
kwargs['InternetMaxBandwidthIn'] = str(vm_['internet_maxbandwidthin'])
kwargs['InternetMaxBandwidthIn'] = six.text_type(vm_['internet_maxbandwidthin'])
if 'internet_maxbandwidthout' in vm_:
kwargs['InternetMaxBandwidthOut'] = str(vm_['internet_maxbandwidthOut'])
kwargs['InternetMaxBandwidthOut'] = six.text_type(vm_['internet_maxbandwidthOut'])
if 'hostname' in vm_:
kwargs['HostName'] = vm_['hostname']
if 'password' in vm_:
@ -666,12 +668,10 @@ def create(vm_):
ret = create_node(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on Aliyun ECS\n\n'
'Error creating %s on Aliyun ECS\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: {1}'.format(
vm_['name'],
str(exc)
),
'run the initial deployment: %s',
vm_['name'], six.text_type(exc),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -706,7 +706,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
if len(data['public_ips']) > 0:
ssh_ip = data['public_ips'][0]
@ -715,18 +715,17 @@ def create(vm_):
else:
log.info('No available ip:cant connect to salt')
return False
log.debug('VM {0} is now running'.format(ssh_ip))
log.debug('VM %s is now running', ssh_ip)
vm_['ssh_host'] = ssh_ip
# The instance is booted and accessible, let's Salt it!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -798,7 +797,7 @@ def query(params=None):
'AccessKeyId': access_key_id,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'SignatureNonce': six.text_type(uuid.uuid1()),
'TimeStamp': timestamp,
}
@ -872,7 +871,7 @@ def show_disk(name, call=None):
for disk in items['Disks']['Disk']:
ret[disk['DiskId']] = {}
for item in disk:
ret[disk['DiskId']][item] = str(disk[item])
ret[disk['DiskId']][item] = six.text_type(disk[item])
return ret
@ -912,7 +911,7 @@ def list_monitor_data(kwargs=None, call=None):
for data in monitorData['InstanceMonitorData']:
ret[data['InstanceId']] = {}
for item in data:
ret[data['InstanceId']][item] = str(data[item])
ret[data['InstanceId']][item] = six.text_type(data[item])
return ret
@ -937,10 +936,8 @@ def _get_node(name):
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
name, attempts
)
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', name, attempts
)
# Just a little delay between attempts...
time.sleep(0.5)
@ -980,14 +977,15 @@ def show_image(kwargs, call=None):
if 'Code' in items or len(items['Images']['Image']) == 0:
raise SaltCloudNotFound('The specified image could not be found.')
log.debug('Total {0} image found in Region {1}'.format(
items['TotalCount'], location)
log.debug(
'Total %s image found in Region %s',
items['TotalCount'], location
)
for image in items['Images']['Image']:
ret[image['ImageId']] = {}
for item in image:
ret[image['ImageId']][item] = str(image[item])
ret[image['ImageId']][item] = six.text_type(image[item])
return ret

View File

@ -52,7 +52,7 @@ Example ``/etc/salt/cloud.providers`` or
# pylint: disable=E0102
# pylint: disable=wrong-import-position,wrong-import-order
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import os.path
import time
@ -264,7 +264,7 @@ def avail_locations(conn=None, call=None): # pylint: disable=unused-argument
if hasattr(regions, 'value'):
regions = regions.value
for location in regions: # pylint: disable=no-member
lowername = str(location.name).lower().replace(' ', '')
lowername = six.text_type(location.name).lower().replace(' ', '')
ret[lowername] = object_to_dict(location)
return ret
@ -768,8 +768,7 @@ def show_interface(call=None, kwargs=None): # pylint: disable=unused-argument
)
data['ip_configurations'][ip_.name]['public_ip_address']['ip_address'] = pubip.ip_address
except Exception as exc:
log.warning('There was a cloud error: {0}'.format(exc))
log.warning('{0}'.format(type(exc)))
log.warning('There was a %s cloud error: %s', type(exc), exc)
continue
return data
@ -924,7 +923,7 @@ def create_interface(call=None, kwargs=None): # pylint: disable=unused-argument
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs['public_ip_address'] = PublicIPAddress(
str(pub_ip_data.id), # pylint: disable=no-member
six.text_type(pub_ip_data.id), # pylint: disable=no-member
)
ip_configurations = [
NetworkInterfaceIPConfiguration(
@ -936,7 +935,7 @@ def create_interface(call=None, kwargs=None): # pylint: disable=unused-argument
]
break
except CloudError as exc:
log.error('There was a cloud error: {0}'.format(exc))
log.error('There was a cloud error: %s', exc)
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
@ -1092,7 +1091,7 @@ def request_instance(call=None, kwargs=None): # pylint: disable=unused-argument
'name', volume.get(
'name', volume.get('name', '{0}-datadisk{1}'.format(
vm_['name'],
str(lun),
six.text_type(lun),
),
)
)
@ -1214,7 +1213,7 @@ def request_instance(call=None, kwargs=None): # pylint: disable=unused-argument
try:
poller.wait()
except CloudError as exc:
log.warning('There was a cloud error: {0}'.format(exc))
log.warning('There was a cloud error: %s', exc)
log.warning('This may or may not indicate an actual problem')
try:
@ -1245,7 +1244,7 @@ def create(vm_):
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
global compconn # pylint: disable=global-statement,invalid-name
if not compconn:
@ -1283,7 +1282,7 @@ def create(vm_):
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
# calling _query_ip_address() causes Salt to attempt to build the VM again.
#hostname = _query_ip_address()
@ -1304,11 +1303,10 @@ def create(vm_):
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
ret.update(data)

View File

@ -3,7 +3,7 @@
CenturyLink Cloud Module
===================
.. versionadded:: 0yxgen
.. versionadded:: Oyxgen
The CLC cloud module allows you to manage CLC Via the CLC SDK.
@ -62,7 +62,7 @@ cloud configuration at
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import importlib
import logging
import time
@ -71,6 +71,7 @@ import time
import salt.config as config
import salt.utils.json
from salt.exceptions import SaltCloudSystemExit
from salt.ext import six
# Get logging started
log = logging.getLogger(__name__)
@ -286,7 +287,7 @@ def get_build_status(req_id, nodename):
get the build status from CLC to make sure we dont return to early
'''
counter = 0
req_id = str(req_id)
req_id = six.text_type(req_id)
while counter < 10:
queue = clc.v1.Blueprint.GetStatus(request_id=(req_id))
if queue["PercentComplete"] == 100:
@ -298,7 +299,8 @@ def get_build_status(req_id, nodename):
return internal_ip_address
else:
counter = counter + 1
log.info("Creating Cloud VM " + nodename + " Time out in " + str(10 - counter) + " minutes")
log.info('Creating Cloud VM %s Time out in %s minutes',
nodename, six.text_type(10 - counter))
time.sleep(60)

View File

@ -24,7 +24,7 @@ Use of this module requires the ``apikey``, ``secretkey``, ``host`` and
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
@ -36,6 +36,7 @@ from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wild
from salt.utils.functools import namespaced_function
from salt.exceptions import SaltCloudSystemExit
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext import six
# CloudStackNetwork will be needed during creation of a new node
# pylint: disable=import-error
@ -162,7 +163,7 @@ def get_location(conn, vm_):
# Default to Dallas if not otherwise set
loc = config.get_cloud_config_value('location', vm_, __opts__, default=2)
for location in locations:
if str(loc) in (str(location.id), str(location.name)):
if six.text_type(loc) in (six.text_type(location.id), six.text_type(location.name)):
return location
@ -254,10 +255,10 @@ def get_project(conn, vm_):
return False
for project in projects:
if str(projid) in (str(project.id), str(project.name)):
if six.text_type(projid) in (six.text_type(project.id), six.text_type(project.name)):
return project
log.warning("Couldn't find project {0} in projects".format(projid))
log.warning("Couldn't find project %s in projects", projid)
return False
@ -284,7 +285,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
kwargs = {
'name': vm_['name'],
@ -358,11 +359,10 @@ def create(vm_):
)
except Exception as exc:
log.error(
'Error creating volume {0} on CLOUDSTACK\n\n'
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n{1}'.format(
ex_blockdevicemapping['VirtualName'], exc
),
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -373,11 +373,10 @@ def create(vm_):
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating {0} on CLOUDSTACK\n\n'
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -388,11 +387,10 @@ def create(vm_):
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume {0} on CLOUDSTACK\n\n'
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n{1}'.format(
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc
),
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
@ -412,11 +410,10 @@ def create(vm_):
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
@ -454,18 +451,19 @@ def destroy(name, conn=None, call=None):
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.error('Unable to find the VM %s', name)
volumes = conn.list_volumes(node)
if volumes is None:
log.error('Unable to find volumes of the VM {0}'.format(name))
log.error('Unable to find volumes of the VM %s', name)
# TODO add an option like 'delete_sshkeys' below
for volume in volumes:
if volume.extra['volume_type'] != 'DATADISK':
log.info('Ignoring volume type {0}: {1}'.format(
volume.extra['volume_type'], volume.name)
log.info(
'Ignoring volume type %s: %s',
volume.extra['volume_type'], volume.name
)
continue
log.info('Detaching volume: {0}'.format(volume.name))
log.info('Detaching volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detaching volume',
@ -474,9 +472,9 @@ def destroy(name, conn=None, call=None):
args={'name': volume.name},
)
if not conn.detach_volume(volume):
log.error('Failed to Detach volume: {0}'.format(volume.name))
log.error('Failed to Detach volume: %s', volume.name)
return False
log.info('Detached volume: {0}'.format(volume.name))
log.info('Detached volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'detached volume',
@ -485,7 +483,7 @@ def destroy(name, conn=None, call=None):
args={'name': volume.name},
)
log.info('Destroying volume: {0}'.format(volume.name))
log.info('Destroying volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroying volume',
@ -494,9 +492,9 @@ def destroy(name, conn=None, call=None):
args={'name': volume.name},
)
if not conn.destroy_volume(volume):
log.error('Failed to Destroy volume: {0}'.format(volume.name))
log.error('Failed to Destroy volume: %s', volume.name)
return False
log.info('Destroyed volume: {0}'.format(volume.name))
log.info('Destroyed volume: %s', volume.name)
__utils__['cloud.fire_event'](
'event',
'destroyed volume',
@ -504,12 +502,12 @@ def destroy(name, conn=None, call=None):
sock_dir=__opts__['sock_dir'],
args={'name': volume.name},
)
log.info('Destroying VM: {0}'.format(name))
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if not ret:
log.error('Failed to Destroy VM: {0}'.format(name))
log.error('Failed to Destroy VM: %s', name)
return False
log.info('Destroyed VM: {0}'.format(name))
log.info('Destroyed VM: %s', name)
# Fire destroy action
event = salt.utils.event.SaltEvent('master', __opts__['sock_dir'])
__utils__['cloud.fire_event'](

View File

@ -26,7 +26,7 @@ under the "SSH Keys" section.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import decimal
import logging
import os
@ -37,6 +37,7 @@ import time
import salt.utils.cloud
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.config as config
from salt.exceptions import (
SaltCloudConfigError,
@ -114,7 +115,7 @@ def avail_locations(call=None):
for region in items['regions']:
ret[region['name']] = {}
for item in six.iterkeys(region):
ret[region['name']][item] = str(region[item])
ret[region['name']][item] = six.text_type(region[item])
return ret
@ -134,7 +135,7 @@ def avail_images(call=None):
ret = {}
while fetch:
items = query(method='images', command='?page=' + str(page) + '&per_page=200')
items = query(method='images', command='?page=' + six.text_type(page) + '&per_page=200')
for image in items['images']:
ret[image['name']] = {}
@ -165,7 +166,7 @@ def avail_sizes(call=None):
for size in items['sizes']:
ret[size['slug']] = {}
for item in six.iterkeys(size):
ret[size['slug']][item] = str(size[item])
ret[size['slug']][item] = six.text_type(size[item])
return ret
@ -210,7 +211,7 @@ def get_image(vm_):
'image', vm_, __opts__, search_global=False
)
if not isinstance(vm_image, six.string_types):
vm_image = str(vm_image)
vm_image = six.text_type(vm_image)
for image in images:
if vm_image in (images[image]['name'],
@ -229,7 +230,7 @@ def get_size(vm_):
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = str(config.get_cloud_config_value(
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
for size in sizes:
@ -245,7 +246,7 @@ def get_location(vm_):
Return the VM's location
'''
locations = avail_locations()
vm_location = str(config.get_cloud_config_value(
vm_location = six.text_type(config.get_cloud_config_value(
'location', vm_, __opts__, search_global=False
))
@ -291,7 +292,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
kwargs = {
'name': vm_['name'],
@ -340,7 +341,7 @@ def create(vm_):
)
if ssh_interface in ['private', 'public']:
log.info("ssh_interface: Setting interface for ssh to {}".format(ssh_interface))
log.info("ssh_interface: Setting interface for ssh to %s", ssh_interface)
kwargs['ssh_interface'] = ssh_interface
else:
raise SaltCloudConfigError(
@ -391,7 +392,7 @@ def create(vm_):
try:
with salt.utils.files.fopen(userdata_file, 'r') as fp_:
kwargs['user_data'] = salt.utils.cloud.userdata_template(
__opts__, vm_, fp_.read()
__opts__, vm_, salt.utils.stringutils.to_unicode(fp_.read())
)
except Exception as exc:
log.exception(
@ -410,7 +411,7 @@ def create(vm_):
default_dns_hostname = '.'.join(dns_domain_name[:-2])
default_dns_domain = '.'.join(dns_domain_name[-2:])
else:
log.debug("create_dns_record: can't infer dns_domain from {0}".format(vm_['name']))
log.debug("create_dns_record: can't infer dns_domain from %s", vm_['name'])
default_dns_hostname = dns_domain_name[0]
dns_hostname = config.get_cloud_config_value(
@ -420,13 +421,13 @@ def create(vm_):
'dns_domain', vm_, __opts__, search_global=False, default=default_dns_domain,
)
if dns_hostname and dns_domain:
log.info('create_dns_record: using dns_hostname="{0}", dns_domain="{1}"'.format(dns_hostname, dns_domain))
log.info('create_dns_record: using dns_hostname="%s", dns_domain="%s"', dns_hostname, dns_domain)
__add_dns_addr__ = lambda t, d: post_dns_record(dns_domain=dns_domain,
name=dns_hostname,
record_type=t,
record_data=d)
log.debug('create_dns_record: {0}'.format(__add_dns_addr__))
log.debug('create_dns_record: %s', __add_dns_addr__)
else:
log.error('create_dns_record: could not determine dns_hostname and/or dns_domain')
raise SaltCloudConfigError(
@ -447,12 +448,10 @@ def create(vm_):
ret = create_node(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on DIGITALOCEAN\n\n'
'Error creating %s on DIGITALOCEAN\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: {1}'.format(
vm_['name'],
str(exc)
),
'run the initial deployment: %s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -485,7 +484,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
if not vm_.get('ssh_host'):
vm_['ssh_host'] = None
@ -496,7 +495,7 @@ def create(vm_):
for facing, addr_family, ip_address in [(net['type'], family, net['ip_address'])
for family in addr_families
for net in data['networks'][family]]:
log.info('found {0} IP{1} interface for "{2}"'.format(facing, addr_family, ip_address))
log.info('found %s IP%s interface for "%s"', facing, addr_family, ip_address)
dns_rec_type = arec_map[addr_family]
if facing == 'public':
if create_dns_record:
@ -510,17 +509,19 @@ def create(vm_):
'No suitable IP addresses found for ssh minion bootstrapping: {0}'.format(repr(data['networks']))
)
log.debug('Found public IP address to use for ssh minion bootstrapping: {0}'.format(vm_['ssh_host']))
log.debug(
'Found public IP address to use for ssh minion bootstrapping: %s',
vm_['ssh_host']
)
vm_['key_filename'] = key_filename
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -539,7 +540,7 @@ def query(method='droplets', droplet_id=None, command=None, args=None, http_meth
'''
Make a web call to DigitalOcean
'''
base_path = str(config.get_cloud_config_value(
base_path = six.text_type(config.get_cloud_config_value(
'api_root',
get_configured_provider(),
__opts__,
@ -629,10 +630,8 @@ def _get_node(name):
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
name, attempts
)
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', name, attempts
)
# Just a little delay between attempts...
time.sleep(0.5)
@ -655,7 +654,7 @@ def list_keypairs(call=None):
ret = {}
while fetch:
items = query(method='account/keys', command='?page=' + str(page) +
items = query(method='account/keys', command='?page=' + six.text_type(page) +
'&per_page=100')
for key_pair in items['ssh_keys']:
@ -671,7 +670,7 @@ def list_keypairs(call=None):
)
ret[name] = {}
for item in six.iterkeys(key_pair):
ret[name][item] = str(key_pair[item])
ret[name][item] = six.text_type(key_pair[item])
page += 1
try:
@ -701,7 +700,7 @@ def show_keypair(kwargs=None, call=None):
keypairs = list_keypairs(call='function')
keyid = keypairs[kwargs['keyname']]['id']
log.debug('Key ID is {0}'.format(keyid))
log.debug('Key ID is %s', keyid)
details = query(method='account/keys', command=keyid)
@ -720,7 +719,7 @@ def import_keypair(kwargs=None, call=None):
keyname(mandatory): public key name in the provider
'''
with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename:
public_key_content = public_key_filename.read()
public_key_content = salt.utils.stringutils.to_unicode(public_key_filename.read())
digitalocean_kwargs = {
'name': kwargs['keyname'],
@ -835,17 +834,17 @@ def destroy(name, call=None):
'\'delete_dns_record\' should be a boolean value.'
)
# When the "to do" a few lines up is resolved, remove these lines and use the if/else logic below.
log.debug('Deleting DNS records for {0}.'.format(name))
log.debug('Deleting DNS records for %s.', name)
destroy_dns_records(name)
# Until the "to do" from line 754 is taken care of, we don't need this logic.
# if delete_dns_record:
# log.debug('Deleting DNS records for {0}.'.format(name))
# log.debug('Deleting DNS records for %s.', name)
# destroy_dns_records(name)
# else:
# log.debug('delete_dns_record : {0}'.format(delete_dns_record))
# log.debug('delete_dns_record : %s', delete_dns_record)
# for line in pprint.pformat(dir()).splitlines():
# log.debug('delete context: {0}'.format(line))
# log.debug('delete context: %s', line)
__utils__['cloud.fire_event'](
'event',
@ -875,7 +874,7 @@ def post_dns_record(**kwargs):
if kwargs[i]:
pass
else:
error = '{0}="{1}" ## all mandatory args must be provided: {2}'.format(i, kwargs[i], str(mandatory_kwargs))
error = '{0}="{1}" ## all mandatory args must be provided: {2}'.format(i, kwargs[i], mandatory_kwargs)
raise SaltInvocationError(error)
domain = query(method='domains', droplet_id=kwargs['dns_domain'])
@ -905,24 +904,24 @@ def destroy_dns_records(fqdn):
except SaltCloudSystemExit:
log.debug('Failed to find domains.')
return False
log.debug("found DNS records: {0}".format(pprint.pformat(response)))
log.debug("found DNS records: %s", pprint.pformat(response))
records = response['domain_records']
if records:
record_ids = [r['id'] for r in records if r['name'].decode() == hostname]
log.debug("deleting DNS record IDs: {0}".format(repr(record_ids)))
for id in record_ids:
log.debug("deleting DNS record IDs: %s", record_ids)
for id_ in record_ids:
try:
log.info('deleting DNS record {0}'.format(id))
log.info('deleting DNS record %s', id_)
ret = query(
method='domains',
droplet_id=domain,
command='records/{0}'.format(id),
command='records/{0}'.format(id_),
http_method='delete'
)
except SaltCloudSystemExit:
log.error('failed to delete DNS domain {0} record ID {1}.'.format(domain, hostname))
log.debug('DNS deletion REST call returned: {0}'.format(pprint.pformat(ret)))
log.error('failed to delete DNS domain %s record ID %s.', domain, hostname)
log.debug('DNS deletion REST call returned: %s', pprint.pformat(ret))
return False
@ -990,7 +989,7 @@ def list_floating_ips(call=None):
while fetch:
items = query(method='floating_ips',
command='?page=' + str(page) + '&per_page=200')
command='?page=' + six.text_type(page) + '&per_page=200')
for floating_ip in items['floating_ips']:
ret[floating_ip['ip']] = {}
@ -1032,7 +1031,7 @@ def show_floating_ip(kwargs=None, call=None):
return False
floating_ip = kwargs['floating_ip']
log.debug('Floating ip is {0}'.format(floating_ip))
log.debug('Floating ip is %s', floating_ip)
details = query(method='floating_ips', command=floating_ip)
@ -1107,7 +1106,7 @@ def delete_floating_ip(kwargs=None, call=None):
return False
floating_ip = kwargs['floating_ip']
log.debug('Floating ip is {0}'.format('floating_ip'))
log.debug('Floating ip is %s', kwargs['floating_ip'])
result = query(method='floating_ips',
command=floating_ip,
@ -1192,7 +1191,7 @@ def _list_nodes(full=False, for_output=False):
while fetch:
items = query(method='droplets',
command='?page=' + str(page) + '&per_page=200')
command='?page=' + six.text_type(page) + '&per_page=200')
for node in items['droplets']:
name = node['name']
ret[name] = {}
@ -1207,7 +1206,7 @@ def _list_nodes(full=False, for_output=False):
'private_ips': private_ips,
'public_ips': public_ips,
'size': node['size_slug'],
'state': str(node['status']),
'state': six.text_type(node['status']),
}
page += 1
@ -1339,7 +1338,7 @@ def _get_full_output(node, for_output=False):
for item in six.iterkeys(node):
value = node[item]
if value is not None and for_output:
value = str(value)
value = six.text_type(value)
ret[item] = value
return ret

View File

@ -23,7 +23,7 @@ using the existing Libcloud driver for Dimension Data.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import pprint
@ -297,7 +297,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
if ssh_interface(vm_) == 'private_ips':
@ -329,11 +329,10 @@ def create(vm_):
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](

View File

@ -67,7 +67,7 @@ To use the EC2 cloud module, set up the cloud configuration at
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import stat
@ -91,6 +91,7 @@ import salt.utils.cloud
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
from salt._compat import ElementTree as ET
import salt.utils.http as http
@ -324,7 +325,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
return {'error': endpoint_err}, requesturl
return {'error': endpoint_err}
log.debug('Using EC2 endpoint: {0}'.format(endpoint))
log.debug('Using EC2 endpoint: %s', endpoint)
# AWS v4 signature
method = 'GET'
@ -381,21 +382,16 @@ def query(params=None, setname=None, requesturl=None, location=None,
', ' + 'Signature=' + signature
headers = {'x-amz-date': amz_date, 'Authorization': authorization_header}
log.debug('EC2 Request: {0}'.format(requesturl))
log.trace('EC2 Request Parameters: {0}'.format(params_with_headers))
log.debug('EC2 Request: %s', requesturl)
log.trace('EC2 Request Parameters: %s', params_with_headers)
try:
result = requests.get(requesturl, headers=headers, params=params_with_headers)
log.debug(
'EC2 Response Status Code: {0}'.format(
# result.getcode()
result.status_code
)
)
log.trace(
'EC2 Response Text: {0}'.format(
result.text
)
'EC2 Response Status Code: %s',
# result.getcode()
result.status_code
)
log.trace('EC2 Response Text: %s', result.text)
result.raise_for_status()
break
except requests.exceptions.HTTPError as exc:
@ -407,28 +403,25 @@ def query(params=None, setname=None, requesturl=None, location=None,
if attempts > 0 and err_code and err_code in EC2_RETRY_CODES:
attempts -= 1
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}; '
'Attempts remaining: {3}'.format(
exc.response.status_code, exc, data, attempts
)
'EC2 Response Status Code and Error: [%s %s] %s; '
'Attempts remaining: %s',
exc.response.status_code, exc, data, attempts
)
# Wait a bit before continuing to prevent throttling
time.sleep(2)
continue
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
'EC2 Response Status Code and Error: [%s %s] %s',
exc.response.status_code, exc, data
)
if return_url is True:
return {'error': data}, requesturl
return {'error': data}
else:
log.error(
'EC2 Response Status Code and Error: [{0} {1}] {2}'.format(
exc.response.status_code, exc, data
)
'EC2 Response Status Code and Error: [%s %s] %s',
exc.response.status_code, exc, data
)
if return_url is True:
return {'error': data}, requesturl
@ -501,16 +494,13 @@ def _wait_for_spot_instance(update_callback,
while True:
log.debug(
'Waiting for spot instance reservation. Giving up in '
'00:{0:02d}:{1:02d}'.format(
int(timeout // 60),
int(timeout % 60)
)
'00:%02d:%02d', int(timeout // 60), int(timeout % 60)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: {0}'.format(max_failures)
'failure. Remaining Failures: %s', max_failures
)
max_failures -= 1
if max_failures <= 0:
@ -537,7 +527,7 @@ def _wait_for_spot_instance(update_callback,
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s'.format(interval))
'now %ss', interval)
def avail_sizes(call=None):
@ -1033,11 +1023,11 @@ def ssh_interface(vm_):
search_global=False
)
if ret not in ('public_ips', 'private_ips'):
log.warning((
'Invalid ssh_interface: {0}. '
log.warning(
'Invalid ssh_interface: %s. '
'Allowed options are ("public_ips", "private_ips"). '
'Defaulting to "public_ips".'
).format(ret))
'Defaulting to "public_ips".', ret
)
ret = 'public_ips'
return ret
@ -1207,9 +1197,9 @@ def _get_subnetname_id(subnetname):
tags = [tags]
for tag in tags:
if tag['key'] == 'Name' and tag['value'] == subnetname:
log.debug('AWS Subnet ID of {0} is {1}'.format(
subnetname,
subnet['subnetId'])
log.debug(
'AWS Subnet ID of %s is %s',
subnetname, subnet['subnetId']
)
return subnet['subnetId']
return None
@ -1244,9 +1234,9 @@ def _get_securitygroupname_id(securitygroupname_list):
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug('AWS SecurityGroup ID of {0} is {1}'.format(
sg['groupName'],
sg['groupId'])
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set)
@ -1280,8 +1270,9 @@ def securitygroupid(vm_):
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug('AWS SecurityGroup ID of {0} is {1}'.format(
sg['groupName'], sg['groupId'])
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set)
@ -1433,9 +1424,8 @@ def _create_eni_if_necessary(interface, vm_):
eni_id = eni_desc.get('networkInterfaceId')
log.debug(
'Created network interface {0} inst {1}'.format(
eni_id, interface['DeviceIndex']
)
'Created network interface %s inst %s',
eni_id, interface['DeviceIndex']
)
associate_public_ip = interface.get('AssociatePublicIpAddress', False)
@ -1598,9 +1588,8 @@ def _associate_eip_with_interface(eni_id, eip_id, private_ip=None, vm_=None):
break
log.debug(
'Associated ElasticIP address {0} with interface {1}'.format(
eip_id, eni_id
)
'Associated ElasticIP address %s with interface %s',
eip_id, eni_id
)
return result[2].get('associationId')
@ -1623,7 +1612,7 @@ def _update_enis(interfaces, instance, vm_=None):
'Duplicate DeviceIndex in profile. Cannot update ENIs.'
)
return None
config_enis[str(interface['DeviceIndex'])] = interface
config_enis[six.text_type(interface['DeviceIndex'])] = interface
query_enis = instance[0]['instancesSet']['item']['networkInterfaceSet']['item']
if isinstance(query_enis, list):
for query_eni in query_enis:
@ -1714,7 +1703,7 @@ def _param_from_config(key, data):
else:
if isinstance(data, bool):
# convert boolean True/False to 'true'/'false'
param.update({key: str(data).lower()})
param.update({key: six.text_type(data).lower()})
else:
param.update({key: data})
@ -1791,10 +1780,10 @@ def request_instance(vm_=None, call=None):
'userdata', vm_, __opts__, search_global=False, default=None
)
else:
log.trace('userdata_file: {0}'.format(userdata_file))
log.trace('userdata_file: %s', userdata_file)
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
userdata = salt.utils.stringutils.to_unicode(fh_.read())
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
@ -1883,7 +1872,7 @@ def request_instance(vm_=None, call=None):
if network_interfaces:
eni_devices = []
for interface in network_interfaces:
log.debug('Create network interface: {0}'.format(interface))
log.debug('Create network interface: %s', interface)
_new_eni = _create_eni_if_necessary(interface, vm_)
eni_devices.append(_new_eni)
params.update(_param_from_config(spot_prefix + 'NetworkInterface',
@ -1916,8 +1905,8 @@ def request_instance(vm_=None, call=None):
# as Ubuntu and CentOS (and most likely other OSs)
# use different device identifiers
log.info('Attempting to look up root device name for image id {0} on '
'VM {1}'.format(image_id, vm_['name']))
log.info('Attempting to look up root device name for image id %s on '
'VM %s', image_id, vm_['name'])
rd_params = {
'Action': 'DescribeImages',
@ -1931,11 +1920,11 @@ def request_instance(vm_=None, call=None):
sigver='4')
if 'error' in rd_data:
return rd_data['error']
log.debug('EC2 Response: \'{0}\''.format(rd_data))
log.debug('EC2 Response: \'%s\'', rd_data)
except Exception as exc:
log.error(
'Error getting root device name for image id {0} for '
'VM {1}: \n{2}'.format(image_id, vm_['name'], exc),
'Error getting root device name for image id %s for '
'VM %s: \n%s', image_id, vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -1961,7 +1950,7 @@ def request_instance(vm_=None, call=None):
# Grab the volume type
rd_type = item['ebs'].get('volumeType', None)
log.info('Found root device name: {0}'.format(rd_name))
log.info('Found root device name: %s', rd_name)
if rd_name is not None:
if ex_blockdevicemappings:
@ -1985,7 +1974,7 @@ def request_instance(vm_=None, call=None):
# Set the termination value
termination_key = '{0}BlockDeviceMapping.{1}.Ebs.DeleteOnTermination'.format(spot_prefix, dev_index)
params[termination_key] = str(set_del_root_vol_on_destroy).lower()
params[termination_key] = six.text_type(set_del_root_vol_on_destroy).lower()
# Use default volume type if not specified
if ex_blockdevicemappings and dev_index < len(ex_blockdevicemappings) and 'Ebs.VolumeType' not in ex_blockdevicemappings[dev_index]:
@ -2028,10 +2017,8 @@ def request_instance(vm_=None, call=None):
return data['error']
except Exception as exc:
log.error(
'Error creating {0} on EC2 when trying to run the initial '
'deployment: \n{1}'.format(
vm_['name'], exc
),
'Error creating %s on EC2 when trying to run the initial '
'deployment: \n%s', vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -2058,14 +2045,11 @@ def request_instance(vm_=None, call=None):
return False
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. {0}'
.format(data['error'])
)
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for spot instance method
return False
log.debug('Returned query data: {0}'.format(data))
log.debug('Returned query data: %s', data)
state = data[0].get('state')
@ -2074,9 +2058,7 @@ def request_instance(vm_=None, call=None):
if state == 'open':
# Still waiting for an active state
log.info('Spot instance status: {0}'.format(
data[0]['status']['message']
))
log.info('Spot instance status: %s', data[0]['status']['message'])
return None
if state in ['cancelled', 'failed', 'closed']:
@ -2112,7 +2094,7 @@ def request_instance(vm_=None, call=None):
__opts__,
default=10),
)
log.debug('wait_for_spot_instance data {0}'.format(data))
log.debug('wait_for_spot_instance data %s', data)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
@ -2125,13 +2107,13 @@ def request_instance(vm_=None, call=None):
opts=__opts__,
sigver='4')
log.debug('Canceled spot instance request {0}. Data '
'returned: {1}'.format(sir_id, data))
log.debug('Canceled spot instance request %s. Data '
'returned: %s', sir_id, data)
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
return data, vm_
@ -2158,7 +2140,7 @@ def query_instance(vm_=None, call=None):
transport=__opts__['transport']
)
log.debug('The new VM instance_id is {0}'.format(instance_id))
log.debug('The new VM instance_id is %s', instance_id)
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
@ -2173,14 +2155,12 @@ def query_instance(vm_=None, call=None):
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: {0}'.format(data))
log.debug('The query returned: %s', data)
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. {0} attempts '
'remaining: {1}'.format(
attempts, data['error']
)
'There was an error in the query. %s attempts '
'remaining: %s', attempts, data['error']
)
attempts -= 1
# Just a little delay between attempts...
@ -2189,8 +2169,8 @@ def query_instance(vm_=None, call=None):
if isinstance(data, list) and not data:
log.warning(
'Query returned an empty list. {0} attempts '
'remaining.'.format(attempts)
'Query returned an empty list. %s attempts '
'remaining.', attempts
)
attempts -= 1
# Just a little delay between attempts...
@ -2218,13 +2198,11 @@ def query_instance(vm_=None, call=None):
return False
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. {0}'.format(data['error'])
)
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
log.debug('Returned query data: %s', data)
if ssh_interface(vm_) == 'public_ips':
if 'ipAddress' in data[0]['instancesSet']['item']:
@ -2260,7 +2238,7 @@ def query_instance(vm_=None, call=None):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
@ -2451,7 +2429,7 @@ def wait_for_instance(
keys += '\n{0} {1}'.format(ip_address, line)
with salt.utils.files.fopen(known_hosts_file, 'a') as fp_:
fp_.write(keys)
fp_.write(salt.utils.stringutils.to_str(keys))
fp_.close()
for user in vm_['usernames']:
@ -2581,7 +2559,7 @@ def create(vm_=None, call=None):
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location))
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
vm_['usernames'] = salt.utils.cloud.ssh_usernames(
vm_,
__opts__,
@ -2599,7 +2577,7 @@ def create(vm_=None, call=None):
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -2620,7 +2598,7 @@ def create(vm_=None, call=None):
# If data is a str, it's an error
if isinstance(data, six.string_types):
log.error('Error requesting instance: {0}'.format(data))
log.error('Error requesting instance: %s', data)
return {}
# Pull the instance ID, valid for both spot and normal instances
@ -2691,25 +2669,25 @@ def create(vm_=None, call=None):
# At this point, the node is created and tagged, and now needs to be
# bootstrapped, once the necessary port is available.
log.info('Created node {0}'.format(vm_['name']))
log.info('Created node %s', vm_['name'])
instance = data[0]['instancesSet']['item']
# Wait for the necessary port to become available to bootstrap
if ssh_interface(vm_) == 'private_ips':
ip_address = instance['privateIpAddress']
log.info('Salt node data. Private_ip: {0}'.format(ip_address))
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = instance['ipAddress']
log.info('Salt node data. Public_ip: {0}'.format(ip_address))
log.info('Salt node data. Public_ip: %s', ip_address)
vm_['ssh_host'] = ip_address
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = instance['privateIpAddress']
log.info('Salt interface set to: {0}'.format(salt_ip_address))
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = instance['ipAddress']
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
log.debug('Salt interface set to: %s', salt_ip_address)
vm_['salt_host'] = salt_ip_address
if deploy:
@ -2741,7 +2719,7 @@ def create(vm_=None, call=None):
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
@ -2759,7 +2737,7 @@ def create(vm_=None, call=None):
'ssm_document', vm_, __opts__, None, search_global=False
)
if ssm_document:
log.debug('Associating with ssm document: {0}'.format(ssm_document))
log.debug('Associating with ssm document: %s', ssm_document)
assoc = ssm_create_association(
vm_['name'],
{'ssm_document': ssm_document},
@ -2767,19 +2745,19 @@ def create(vm_=None, call=None):
call='action'
)
if isinstance(assoc, dict) and assoc.get('error', None):
log.error('Failed to associate instance {0} with ssm document {1}'.format(
log.error(
'Failed to associate instance %s with ssm document %s',
vm_['instance_id'], ssm_document
))
)
return {}
for key, value in six.iteritems(__utils__['cloud.bootstrap'](vm_, __opts__)):
ret.setdefault(key, value)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(instance)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(instance)
)
event_data = {
@ -2921,7 +2899,7 @@ def stop(name, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
log.info('Stopping node %s', name)
instance_id = _get_node(name)['instanceId']
@ -2945,7 +2923,7 @@ def start(name, call=None):
'The start action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
log.info('Starting node %s', name)
instance_id = _get_node(name)['instanceId']
@ -3010,7 +2988,7 @@ def set_tags(name=None,
params = {'Action': 'CreateTags',
'ResourceId.1': instance_id}
log.debug('Tags to set for {0}: {1}'.format(name, tags))
log.debug('Tags to set for %s: %s', name, tags)
if kwargs and not tags:
tags = kwargs
@ -3032,7 +3010,7 @@ def set_tags(name=None,
instance_id=instance_id, call='action', location=location
)
log.debug('Setting the tags returned: {0}'.format(settags))
log.debug('Setting the tags returned: %s', settags)
failed_to_set_tags = False
for tag in settags:
@ -3044,18 +3022,17 @@ def set_tags(name=None,
# This is a correctly set tag with no value
continue
if str(tags.get(tag['key'])) != str(tag['value']):
if six.text_type(tags.get(tag['key'])) != six.text_type(tag['value']):
# Not set to the proper value!?
log.debug('Setting the tag {0} returned {1} instead of {2}'.format(tag['key'], tags.get(tag['key']), tag['value']))
log.debug(
'Setting the tag %s returned %s instead of %s',
tag['key'], tags.get(tag['key']), tag['value']
)
failed_to_set_tags = True
break
if failed_to_set_tags:
log.warning(
'Failed to set tags. Remaining attempts {0}'.format(
attempts
)
)
log.warning('Failed to set tags. Remaining attempts %s', attempts)
attempts -= 1
# Just a little delay between attempts...
time.sleep(1)
@ -3179,7 +3156,7 @@ def rename(name, kwargs, call=None):
'The rename action must be called with -a or --action.'
)
log.info('Renaming {0} to {1}'.format(name, kwargs['newname']))
log.info('Renaming %s to %s', name, kwargs['newname'])
set_tags(name, {'Name': kwargs['newname']}, call='action')
@ -3244,10 +3221,8 @@ def destroy(name, call=None):
newname = '{0}-DEL{1}'.format(name, uuid.uuid4().hex)
rename(name, kwargs={'newname': newname}, call='action')
log.info(
'Machine will be identified as {0} until it has been '
'cleaned up.'.format(
newname
)
'Machine will be identified as %s until it has been '
'cleaned up.', newname
)
ret['newname'] = newname
@ -3386,7 +3361,7 @@ def _get_node(name=None, instance_id=None, location=None):
params = {'Action': 'DescribeInstances'}
if str(name).startswith('i-') and (len(name) == 10 or len(name) == 19):
if six.text_type(name).startswith('i-') and (len(name) == 10 or len(name) == 19):
instance_id = name
if instance_id:
@ -3412,10 +3387,8 @@ def _get_node(name=None, instance_id=None, location=None):
except IndexError:
attempts -= 1
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
instance_id or name, attempts
)
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', instance_id or name, attempts
)
# Just a little delay between attempts...
time.sleep(0.5)
@ -3624,10 +3597,8 @@ def show_term_protect(name=None, instance_id=None, call=None, quiet=False):
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Termination Protection is {0} for {1}'.format(
disable_protect == 'true' and 'enabled' or 'disabled',
name
)
'Termination Protection is %s for %s',
disable_protect == 'true' and 'enabled' or 'disabled', name
)
return disable_protect
@ -3642,7 +3613,7 @@ def show_detailed_monitoring(name=None, instance_id=None, call=None, quiet=False
'The show_detailed_monitoring action must be called with -a or --action.'
)
location = get_location()
if str(name).startswith('i-') and (len(name) == 10 or len(name) == 19):
if six.text_type(name).startswith('i-') and (len(name) == 10 or len(name) == 19):
instance_id = name
if not name and not instance_id:
@ -3653,7 +3624,7 @@ def show_detailed_monitoring(name=None, instance_id=None, call=None, quiet=False
matched = _get_node(name=name, instance_id=instance_id, location=location)
log.log(
logging.DEBUG if quiet is True else logging.INFO,
'Detailed Monitoring is {0} for {1}'.format(matched['monitoring'], name)
'Detailed Monitoring is %s for %s', matched['monitoring'], name
)
return matched['monitoring']
@ -4140,9 +4111,9 @@ def __attach_vol_to_instance(params, kws, instance_id):
sigver='4')
if data[0]:
log.warning(
('Error attaching volume {0} '
'to instance {1}. Retrying!').format(kws['volume_id'],
instance_id))
'Error attaching volume %s to instance %s. Retrying!',
kws['volume_id'], instance_id
)
return False
return data
@ -4368,7 +4339,7 @@ def import_keypair(kwargs=None, call=None):
if os.path.exists(public_key_file):
with salt.utils.files.fopen(public_key_file, 'r') as fh_:
public_key = fh_.read()
public_key = salt.utils.stringutils.to_unicode(fh_.read())
if public_key is not None:
params['PublicKeyMaterial'] = base64.b64encode(public_key)
@ -4757,7 +4728,7 @@ def get_password_data(
if 'key' not in kwargs:
if 'key_file' in kwargs:
with salt.utils.files.fopen(kwargs['key_file'], 'r') as kf_:
kwargs['key'] = kf_.read()
kwargs['key'] = salt.utils.stringutils.to_unicode(kf_.read())
if 'key' in kwargs:
pwdata = ret.get('passwordData', None)
@ -4925,7 +4896,7 @@ def show_pricing(kwargs=None, call=None):
update_pricing({'type': name}, 'function')
with salt.utils.files.fopen(pricefile, 'r') as fhi:
ec2_price = msgpack.load(fhi)
ec2_price = salt.utils.stringutils.to_unicode(msgpack.load(fhi))
region = get_location(profile)
size = profile.get('size', None)

View File

@ -47,7 +47,7 @@ Example Provider Configuration
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import re
@ -447,10 +447,10 @@ def __get_host(node, vm_):
'''
if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None:
ip_address = node.private_ips[0]
log.info('Salt node data. Private_ip: {0}'.format(ip_address))
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: {0}'.format(ip_address))
log.info('Salt node data. Public_ip: %s', ip_address)
if len(ip_address) > 0:
return ip_address
@ -551,7 +551,7 @@ def _parse_allow(allow):
if len(seen_protos[k]) > 0:
d['ports'] = seen_protos[k]
allow_dict.append(d)
log.debug("firewall allowed protocols/ports: {0}".format(allow_dict))
log.debug("firewall allowed protocols/ports: %s", allow_dict)
return allow_dict
@ -675,9 +675,8 @@ def delete_network(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Nework {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Nework %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -848,9 +847,8 @@ def delete_subnetwork(kwargs=None, call=None):
result = conn.ex_destroy_subnetwork(name, region)
except ResourceNotFoundError as exc:
log.error(
'Subnetwork {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Subnetwork %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1021,9 +1019,8 @@ def delete_fwrule(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Rule {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Rule %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1181,9 +1178,8 @@ def delete_hc(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Health check {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Health check %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1330,9 +1326,8 @@ def delete_address(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Address {0} in region {1} was not found. Exception was: {2}'.format(
name, ex_region, exc),
exc_info_on_loglevel=logging.DEBUG
'Address %s in region %s was not found. Exception was: %s',
name, ex_region, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1506,9 +1501,8 @@ def delete_lb(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Load balancer {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Load balancer %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1642,9 +1636,8 @@ def detach_lb(kwargs=None, call=None):
if not remove_member:
log.error(
'The specified member {0} was not a member of LB {1}.'.format(
kwargs['member'], kwargs['name']
)
'The specified member %s was not a member of LB %s.',
kwargs['member'], kwargs['name']
)
return False
@ -1711,9 +1704,8 @@ def delete_snapshot(kwargs=None, call=None):
)
except ResourceNotFoundError as exc:
log.error(
'Snapshot {0} was not found. Exception was: {1}'.format(
name, exc),
exc_info_on_loglevel=logging.DEBUG
'Snapshot %s was not found. Exception was: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1772,10 +1764,9 @@ def delete_disk(kwargs=None, call=None):
result = conn.destroy_volume(disk)
except ResourceInUseError as exc:
log.error(
'Disk {0} is in use and must be detached before deleting.\n'
'The following exception was thrown by libcloud:\n{1}'.format(
disk.name, exc),
exc_info_on_loglevel=logging.DEBUG
'Disk %s is in use and must be detached before deleting.\n'
'The following exception was thrown by libcloud:\n%s',
disk.name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -1916,9 +1907,8 @@ def create_snapshot(kwargs=None, call=None):
disk = conn.ex_get_volume(disk_name)
except ResourceNotFoundError as exc:
log.error(
'Disk {0} was not found. Exception was: {1}'.format(
disk_name, exc),
exc_info_on_loglevel=logging.DEBUG
'Disk %s was not found. Exception was: %s',
disk_name, exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -2287,12 +2277,10 @@ def destroy(vm_name, call=None):
node = conn.ex_get_node(vm_name)
except Exception as exc: # pylint: disable=W0703
log.error(
'Could not locate instance {0}\n\n'
'Could not locate instance %s\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_name, exc
),
exc_info_on_loglevel=logging.DEBUG
'run the initial deployment: \n%s',
vm_name, exc, exc_info_on_loglevel=logging.DEBUG
)
raise SaltCloudSystemExit(
'Could not find instance {0}.'.format(vm_name)
@ -2326,12 +2314,10 @@ def destroy(vm_name, call=None):
inst_deleted = conn.destroy_node(node)
except Exception as exc: # pylint: disable=W0703
log.error(
'Could not destroy instance {0}\n\n'
'Could not destroy instance %s\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_name, exc
),
exc_info_on_loglevel=logging.DEBUG
'run the initial deployment: \n%s',
vm_name, exc, exc_info_on_loglevel=logging.DEBUG
)
raise SaltCloudSystemExit(
'Could not destroy instance {0}.'.format(vm_name)
@ -2365,12 +2351,10 @@ def destroy(vm_name, call=None):
# to allow completion of instance deletion. Just log the error
# and keep going.
log.error(
'Could not destroy disk {0}\n\n'
'Could not destroy disk %s\n\n'
'The following exception was thrown by libcloud when trying '
'to run the initial deployment: \n{1}'.format(
vm_name, exc
),
exc_info_on_loglevel=logging.DEBUG
'to run the initial deployment: \n%s',
vm_name, exc, exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
@ -2513,10 +2497,11 @@ def request_instance(vm_):
'\'pd-standard\', \'pd-ssd\''
)
log.info('Creating GCE instance {0} in {1}'.format(vm_['name'],
kwargs['location'].name)
log.info(
'Creating GCE instance %s in %s',
vm_['name'], kwargs['location'].name
)
log.debug('Create instance kwargs {0}'.format(str(kwargs)))
log.debug('Create instance kwargs %s', kwargs)
__utils__['cloud.fire_event'](
'event',
@ -2531,12 +2516,10 @@ def request_instance(vm_):
node_data = conn.create_node(**kwargs)
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating {0} on GCE\n\n'
'Error creating %s on GCE\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
'run the initial deployment: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -2554,7 +2537,7 @@ def request_instance(vm_):
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
log.info('Create and attach volumes to node %s', vm_['name'])
create_attach_volumes(
vm_['name'],
{
@ -2598,11 +2581,10 @@ def create(vm_=None, call=None):
ret.update(node_dict)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.trace(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(node_dict)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(node_dict)
)
__utils__['cloud.fire_event'](

View File

@ -37,7 +37,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
argument should not be used on maps referencing GoGrid instances.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import pprint
@ -49,6 +49,7 @@ import salt.config as config
import salt.utils.cloud
import salt.utils.hashutils
from salt.exceptions import SaltCloudSystemExit, SaltCloudException
from salt.ext import six
# Get logging started
log = logging.getLogger(__name__)
@ -104,7 +105,7 @@ def create(vm_):
if len(vm_['name']) > 20:
raise SaltCloudException('VM names must not be longer than 20 characters')
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
image_id = avail_images()[vm_['image']]['id']
if 'assign_public_ip' in vm_:
host_ip = vm_['assign_public_ip']
@ -136,11 +137,9 @@ def create(vm_):
data = _query('grid', 'server/add', args=create_kwargs)
except Exception:
log.error(
'Error creating {0} on GOGRID\n\n'
'Error creating %s on GOGRID\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n'.format(
vm_['name']
),
'run the initial deployment:\n', vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -172,11 +171,10 @@ def create(vm_):
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -527,12 +525,12 @@ def _query(action=None,
if command:
path += '/{0}'.format(command)
log.debug('GoGrid URL: {0}'.format(path))
log.debug('GoGrid URL: %s', path)
if not isinstance(args, dict):
args = {}
epoch = str(int(time.time()))
epoch = six.text_type(int(time.time()))
hashtext = ''.join((apikey, sharedsecret, epoch))
args['sig'] = salt.utils.hashutils.md5_digest(hashtext)
args['format'] = 'json'
@ -562,10 +560,6 @@ def _query(action=None,
status=True,
opts=__opts__,
)
log.debug(
'GoGrid Response Status Code: {0}'.format(
result['status']
)
)
log.debug('GoGrid Response Status Code: %s', result['status'])
return result['dict']

View File

@ -51,7 +51,7 @@ included:
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
import base64
@ -138,7 +138,7 @@ def get_image(vm_):
vm_image = config.get_cloud_config_value('image', vm_, __opts__)
if vm_image and str(vm_image) in images:
if vm_image and six.text_type(vm_image) in images:
images[vm_image]['name'] = images[vm_image]['id']
return images[vm_image]
@ -156,7 +156,7 @@ def get_size(vm_):
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and str(vm_size) in sizes:
if vm_size and six.text_type(vm_size) in sizes:
return sizes[vm_size]
raise SaltCloudNotFound(
@ -196,13 +196,11 @@ def query_instance(vm_=None, call=None):
return False
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query {0}'.format(data.get('error'))
)
log.warning('There was an error in the query %s', data.get('error'))
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
log.debug('Returned query data: %s', data)
if 'primaryIp' in data[1]:
# Wait for SSH to be fully configured on the remote side
@ -227,7 +225,7 @@ def query_instance(vm_=None, call=None):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
return data
@ -266,10 +264,8 @@ def create(vm_):
)
log.info(
'Creating Cloud VM {0} in {1}'.format(
vm_['name'],
vm_.get('location', DEFAULT_LOCATION)
)
'Creating Cloud VM %s in %s',
vm_['name'], vm_.get('location', DEFAULT_LOCATION)
)
# added . for fqdn hostnames
@ -298,7 +294,7 @@ def create(vm_):
data = create_node(**kwargs)
if data == {}:
log.error('Error creating {0} on JOYENT'.format(vm_['name']))
log.error('Error creating %s on JOYENT', vm_['name'])
return False
query_instance(vm_)
@ -364,9 +360,7 @@ def create_node(**kwargs):
if ret[0] in VALID_RESPONSE_CODES:
return ret[1]
else:
log.error(
'Failed to create node {0}: {1}'.format(name, ret[1])
)
log.error('Failed to create node %s: %s', name, ret[1])
return {}
@ -510,13 +504,13 @@ def take_action(name=None, call=None, command=None, data=None, method='GET',
ret = query(command=command, data=data, method=method,
location=location)
log.info('Success {0} for node {1}'.format(caller, name))
log.info('Success %s for node %s', caller, name)
except Exception as exc:
if 'InvalidState' in str(exc):
if 'InvalidState' in six.text_type(exc):
ret = [200, {}]
else:
log.error(
'Failed to invoke {0} node {1}: {2}'.format(caller, name, exc),
'Failed to invoke %s node %s: %s', caller, name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -595,11 +589,7 @@ def has_method(obj, method_name):
if method_name in dir(obj):
return True
log.error(
'Method \'{0}\' not yet supported!'.format(
method_name
)
)
log.error('Method \'%s\' not yet supported!', method_name)
return False
@ -746,7 +736,7 @@ def list_nodes(full=False, call=None):
node['location'] = location
ret[node['name']] = reformat_node(item=node, full=full)
else:
log.error('Invalid response when listing Joyent nodes: {0}'.format(result[1]))
log.error('Invalid response when listing Joyent nodes: %s', result[1])
else:
location = get_location()
@ -947,13 +937,11 @@ def import_key(kwargs=None, call=None):
return False
if not os.path.isfile(kwargs['keyfile']):
log.error('The specified keyfile ({0}) does not exist.'.format(
kwargs['keyfile']
))
log.error('The specified keyfile (%s) does not exist.', kwargs['keyfile'])
return False
with salt.utils.files.fopen(kwargs['keyfile'], 'r') as fp_:
kwargs['key'] = fp_.read()
kwargs['key'] = salt.utils.stringutils.to_unicode(fp_.read())
send_data = {'name': kwargs['keyname'], 'key': kwargs['key']}
kwargs['data'] = salt.utils.json.dumps(send_data)
@ -1064,7 +1052,7 @@ def query(action=None,
if command:
path += '/{0}'.format(command)
log.debug('User: \'{0}\' on PATH: {1}'.format(user, path))
log.debug('User: \'%s\' on PATH: %s', user, path)
if (not user) or (not ssh_keyfile) or (not ssh_keyname) or (not location):
return None
@ -1117,11 +1105,7 @@ def query(action=None,
verify_ssl=verify_ssl,
opts=__opts__,
)
log.debug(
'Joyent Response Status Code: {0}'.format(
result['status']
)
)
log.debug('Joyent Response Status Code: %s', result['status'])
if 'headers' not in result:
return [result['status'], result['error']]

View File

@ -55,7 +55,7 @@ Tested on:
# manage domains that we actually created
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import uuid
@ -104,7 +104,7 @@ def libvirt_error_handler(ctx, error): # pylint: disable=unused-argument
'''
Redirect stderr prints from libvirt to salt logging.
'''
log.debug("libvirt error {0}".format(error))
log.debug("libvirt error %s", error)
if HAS_LIBVIRT:
@ -250,14 +250,14 @@ def get_domain_ips(domain, ip_source):
try:
addresses = domain.interfaceAddresses(ip_source, 0)
except libvirt.libvirtError as error:
log.info("Exception polling address {0}".format(error))
log.info("Exception polling address %s", error)
return ips
for (name, val) in six.iteritems(addresses):
if val['addrs']:
for addr in val['addrs']:
tp = to_ip_addr_type(addr['type'])
log.info("Found address {0}".format(addr))
log.info("Found address %s", addr)
if tp == "ipv4":
ips.append(addr['addr'])
return ips
@ -291,7 +291,7 @@ def create(vm_):
validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True
log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml))
log.info("Cloning '%s' with strategy '%s' validate_xml='%s'", vm_['name'], clone_strategy, validate_xml)
try:
# Check for required profile parameters before sending any API calls.
@ -358,7 +358,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.debug("Source machine XML '{0}'".format(xml))
log.debug("Source machine XML '%s'", xml)
domain_xml = ElementTree.fromstring(xml)
domain_xml.find('./name').text = name
@ -389,7 +389,7 @@ def create(vm_):
if source_element and 'path' in source_element.attrib:
path = source_element.attrib['path']
new_path = path.replace('/domain-{0}/'.format(base), '/domain-{0}/'.format(name))
log.debug("Rewriting agent socket path to {0}".format(new_path))
log.debug("Rewriting agent socket path to %s", new_path)
source_element.attrib['path'] = new_path
for disk in domain_xml.findall("""./devices/disk[@device='disk'][@type='file']"""):
@ -400,7 +400,7 @@ def create(vm_):
# Err on the safe side
raise SaltCloudExecutionFailure("Non qemu driver disk encountered bailing out.")
disk_type = driver.attrib.get('type')
log.info("disk attributes {0}".format(disk.attrib))
log.info("disk attributes %s", disk.attrib)
if disk_type == 'qcow2':
source = disk.find("./source").attrib['file']
pool, volume = find_pool_and_volume(conn, source)
@ -423,7 +423,7 @@ def create(vm_):
raise SaltCloudExecutionFailure("Disk type '{0}' not supported".format(disk_type))
clone_xml = ElementTree.tostring(domain_xml)
log.debug("Clone XML '{0}'".format(clone_xml))
log.debug("Clone XML '%s'", clone_xml)
validate_flags = libvirt.VIR_DOMAIN_DEFINE_VALIDATE if validate_xml else 0
clone_domain = conn.defineXMLFlags(clone_xml, validate_flags)
@ -431,7 +431,7 @@ def create(vm_):
cleanup.append({'what': 'domain', 'item': clone_domain})
clone_domain.createWithFlags(libvirt.VIR_DOMAIN_START_FORCE_BOOT)
log.debug("VM '{0}'".format(vm_))
log.debug("VM '%s'", vm_)
if ip_source == 'qemu-agent':
ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT
@ -446,7 +446,7 @@ def create(vm_):
interval_multiplier=config.get_cloud_config_value('wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
log.info('Address = {0}'.format(address))
log.info('Address = %s', address)
vm_['ssh_host'] = address
@ -492,23 +492,23 @@ def do_cleanup(cleanup):
what = leftover['what']
item = leftover['item']
if what == 'domain':
log.info('Cleaning up {0} {1}'.format(what, item.name()))
log.info('Cleaning up %s %s', what, item.name())
try:
item.destroy()
log.debug('{0} {1} forced off'.format(what, item.name()))
log.debug('%s %s forced off', what, item.name())
except libvirtError:
pass
try:
item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+
libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
log.debug('{0} {1} undefined'.format(what, item.name()))
log.debug('%s %s undefined', what, item.name())
except libvirtError:
pass
if what == 'volume':
try:
item.delete()
log.debug('{0} {1} cleaned up'.format(what, item.name()))
log.debug('%s %s cleaned up', what, item.name())
except libvirtError:
pass
@ -532,7 +532,7 @@ def destroy(name, call=None):
@return: True if all went well, otherwise an error message
@rtype: bool|str
"""
log.info("Attempting to delete instance {0}".format(name))
log.info("Attempting to delete instance %s", name)
if call == 'function':
raise SaltCloudSystemExit(
@ -546,7 +546,7 @@ def destroy(name, call=None):
providers_to_check = [_f for _f in [cfg.get('libvirt') for cfg in six.itervalues(providers)] if _f]
for provider in providers_to_check:
conn = __get_conn(provider['url'])
log.info("looking at {0}".format(provider['url']))
log.info("looking at %s", provider['url'])
try:
domain = conn.lookupByName(name)
found.append({'domain': domain, 'conn': conn})
@ -581,17 +581,17 @@ def destroy(name, call=None):
def destroy_domain(conn, domain):
log.info('Destroying domain {0}'.format(domain.name()))
log.info('Destroying domain %s', domain.name())
try:
domain.destroy()
except libvirtError:
pass
volumes = get_domain_volumes(conn, domain)
for volume in volumes:
log.debug('Removing volume {0}'.format(volume.name()))
log.debug('Removing volume %s', volume.name())
volume.delete()
log.debug('Undefining domain {0}'.format(domain.name()))
log.debug('Undefining domain %s', domain.name())
domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+
libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+
libvirt.VIR_DOMAIN_UNDEFINE_NVRAM)
@ -612,11 +612,11 @@ def create_volume_xml(volume):
volume_xml = ElementTree.fromstring(template)
# TODO: generate name
volume_xml.find('name').text = generate_new_name(volume.name())
log.debug("Volume: {0}".format(dir(volume)))
volume_xml.find('capacity').text = str(volume.info()[1])
log.debug("Volume: %s", dir(volume))
volume_xml.find('capacity').text = six.text_type(volume.info()[1])
volume_xml.find('./target/path').text = volume.path()
xml_string = ElementTree.tostring(volume_xml)
log.debug("Creating {0}".format(xml_string))
log.debug("Creating %s", xml_string)
return xml_string
@ -638,11 +638,11 @@ def create_volume_with_backing_store_xml(volume):
volume_xml = ElementTree.fromstring(template)
# TODO: generate name
volume_xml.find('name').text = generate_new_name(volume.name())
log.debug("volume: {0}".format(dir(volume)))
volume_xml.find('capacity').text = str(volume.info()[1])
log.debug("volume: %s", dir(volume))
volume_xml.find('capacity').text = six.text_type(volume.info()[1])
volume_xml.find('./backingStore/path').text = volume.path()
xml_string = ElementTree.tostring(volume_xml)
log.debug("Creating {0}".format(xml_string))
log.debug("Creating %s", xml_string)
return xml_string
@ -674,5 +674,5 @@ def get_domain_volumes(conn, domain):
pool, volume = find_pool_and_volume(conn, source)
volumes.append(volume)
except libvirtError:
log.warning("Disk not found '{0}'".format(source))
log.warning("Disk not found '%s'", source)
return volumes

View File

@ -26,7 +26,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/c
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
@ -266,7 +266,7 @@ def boot(name=None, kwargs=None, call=None):
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode {0}.'.format(linode_item))
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
@ -347,7 +347,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(name))
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
@ -394,14 +394,10 @@ def create(vm_):
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'{0}\' on Linode.\n\n'
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n'
'{1}'.format(
clonefrom_name,
err
),
exc_info_on_loglevel=logging.DEBUG
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
@ -415,25 +411,20 @@ def create(vm_):
})
except Exception as err:
log.error(
'Error creating {0} on Linode\n\n'
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n'
'{1}'.format(
name,
err
),
exc_info_on_loglevel=logging.DEBUG
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error('Error creating {0} on Linode\n\n'
'The Linode API returned the following: {1}\n'.format(
name,
error_data['ERRORMESSAGE']
)
)
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
@ -450,14 +441,14 @@ def create(vm_):
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating {0} on LINODE\n\n'
'while waiting for initial ready status'.format(name),
exc_info_on_loglevel=logging.DEBUG
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for {0} - was linode{1}.'.format(name, node_id))
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
@ -477,7 +468,7 @@ def create(vm_):
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for {0}'.format(name))
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
@ -531,12 +522,8 @@ def create(vm_):
ret.update(data)
log.info('Created Cloud VM \'{0}\''.format(name))
log.debug(
'\'{0}\' VM creation details:\n{1}'.format(
name, pprint.pformat(data)
)
)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
@ -901,7 +888,7 @@ def get_ips(linode_id=None):
ret = {}
for item in ips:
node_id = str(item['LINODEID'])
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
@ -915,7 +902,7 @@ def get_ips(linode_id=None):
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(str(linode_id))
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
@ -1177,7 +1164,7 @@ def list_nodes_min(call=None):
for node in nodes:
name = node['LABEL']
this_node = {
'id': str(node['LINODEID']),
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
@ -1221,7 +1208,7 @@ def reboot(name, call=None):
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for {0}.'.format(name))
log.error('Reboot failed for %s.', name)
return False
return data
@ -1434,7 +1421,7 @@ def _list_linodes(full=False):
ret = {}
for node in nodes:
this_node = {}
linode_id = str(node['LINODEID'])
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
@ -1514,11 +1501,7 @@ def _query(action=None,
opts=__opts__,
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug(
'Linode Response Status Code: {0}'.format(
result['status']
)
)
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
@ -1550,16 +1533,10 @@ def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
return True
time.sleep(interval)
if not quiet:
log.info('Still waiting on Job {0} for Linode {1}.'.format(
job_id,
linode_id)
)
else:
log.debug('Still waiting on Job {0} for Linode {1}.'.format(
job_id,
linode_id)
)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
@ -1596,18 +1573,11 @@ def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
if quiet:
log.info('Status for Linode {0} is \'{1}\', waiting for \'{2}\'.'.format(
linode_id,
status_desc_result,
status_desc_waiting)
)
else:
log.debug('Status for Linode {0} is \'{1}\', waiting for \'{2}\'.'.format(
linode_id,
status_desc_result,
status_desc_waiting)
)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
@ -1644,7 +1614,7 @@ def _validate_name(name):
name
The VM name to validate
'''
name = str(name)
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')

View File

@ -9,12 +9,12 @@ Please read :ref:`core config documentation <config_lxc>`.
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import os
import pprint
import time
from pprint import pformat
# Import salt cloud libs
import salt.utils.cloud
@ -185,7 +185,7 @@ def _salt(fun, *args, **kw):
except Exception:
ping = False
ping_retries += 1
log.error('{0} unreachable, retrying'.format(target))
log.error('%s unreachable, retrying', target)
if not ping:
raise SaltCloudSystemExit('Target {0} unreachable'.format(target))
jid = conn.cmd_async(tgt=target,
@ -213,7 +213,7 @@ def _salt(fun, *args, **kw):
break
if running and (time.time() > endto):
raise Exception('Timeout {0}s for {1} is elapsed'.format(
timeout, pformat(rkwargs)))
timeout, pprint.pformat(rkwargs)))
time.sleep(poll)
# timeout for the master to return data about a specific job
wait_for_res = float({
@ -448,7 +448,7 @@ def create(vm_, call=None):
ret = {'name': vm_['name'], 'changes': {}, 'result': True, 'comment': ''}
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for {0}'.format(vm_['name']))
log.debug('Generating minion keys for %s', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize', vm_, __opts__))
@ -564,6 +564,7 @@ def get_configured_provider(vm_=None):
return data
else:
log.error(
'Configured provider {0} minion: {1} is unreachable'.format(
__active_provider_name__, data['target']))
'Configured provider %s minion: %s is unreachable',
__active_provider_name__, data['target']
)
return False

View File

@ -39,7 +39,7 @@ Example ``/etc/salt/cloud.providers`` or
# pylint: disable=E0102
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import pprint
@ -432,7 +432,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
label = vm_.get('label', vm_['name'])
@ -528,7 +528,7 @@ def create(vm_):
if 'subnet_name' in vm_:
network_config.subnet_names.append(vm_['subnet_name'])
log.debug('vm_kwargs: {0}'.format(vm_kwargs))
log.debug('vm_kwargs: %s', vm_kwargs)
event_kwargs = {'service_kwargs': service_kwargs.copy(),
'vm_kwargs': vm_kwargs.copy()}
@ -543,7 +543,7 @@ def create(vm_):
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('vm_kwargs: {0}'.format(vm_kwargs))
log.debug('vm_kwargs: %s', vm_kwargs)
# Azure lets you open winrm on a new VM
# Can open up specific ports in Azure; but not on Windows
@ -553,31 +553,29 @@ def create(vm_):
log.debug('Cloud service already exists')
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in str(exc):
if error in six.text_type(exc):
log.error(
'Error creating {0} on Azure.\n\n'
'Error creating %s on Azure.\n\n'
'The hosted service name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.'.format(
vm_['name']
),
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating {0} on Azure\n\n'
'Error creating %s on Azure\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
try:
result = conn.create_virtual_machine_deployment(**vm_kwargs)
log.debug('Request ID for machine: {0}'.format(result.request_id))
log.debug('Request ID for machine: %s', result.request_id)
_wait_for_async(conn, result.request_id)
except AzureConflictHttpError:
log.debug('Conflict error. The deployment may already exist, trying add_role')
@ -589,28 +587,26 @@ def create(vm_):
_wait_for_async(conn, result.request_id)
except Exception as exc:
error = 'The hosted service name is invalid.'
if error in str(exc):
if error in six.text_type(exc):
log.error(
'Error creating {0} on Azure.\n\n'
'Error creating %s on Azure.\n\n'
'The VM name is invalid. The name can contain '
'only letters, numbers, and hyphens. The name must start with '
'a letter and must end with a letter or a number.'.format(
vm_['name']
),
'a letter and must end with a letter or a number.',
vm_['name'],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
else:
log.error(
'Error creating {0} on Azure.\n\n'
'Error creating %s on Azure.\n\n'
'The Virtual Machine could not be created. If you '
'are using an already existing Cloud Service, '
'make sure you set up the `port` variable corresponding '
'to the SSH port exists and that the port number is not '
'already in use.\nThe following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -623,7 +619,7 @@ def create(vm_):
try:
conn.get_role(service_name, service_name, vm_['name'])
data = show_instance(vm_['name'], call='action')
if 'url' in data and data['url'] != str(''):
if 'url' in data and data['url'] != six.text_type(''):
return data['url']
except AzureMissingResourceHttpError:
pass
@ -660,7 +656,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Create and attach volumes to node {0}'.format(vm_['name']))
log.info('Create and attach volumes to node %s', vm_['name'])
created = create_attach_volumes(
vm_['name'],
{
@ -676,12 +672,8 @@ def create(vm_):
ret['Attached Volumes'] = created
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
log.info('Created Cloud VM \'%s\'', vm_)
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
ret.update(data)
@ -787,7 +779,7 @@ def create_attach_volumes(name, kwargs, call=None, wait_to_finish=True):
log.info(msg)
ret.append(msg)
else:
log.error('Error attaching {0} on Azure'.format(volume_dict))
log.error('Error attaching %s on Azure', volume_dict)
return ret
@ -961,7 +953,7 @@ def destroy(name, conn=None, call=None, kwargs=None):
'cleanup_vhds',
get_configured_provider(), __opts__, search_global=False, default=False,
))
log.debug('Deleting disk {0}'.format(disk_name))
log.debug('Deleting disk %s', disk_name)
if cleanup_vhds:
log.debug('Deleting vhd')
@ -994,7 +986,7 @@ def destroy(name, conn=None, call=None, kwargs=None):
get_configured_provider(), __opts__, search_global=False, default=False
)
if cleanup_services:
log.debug('Deleting service {0}'.format(service_name))
log.debug('Deleting service %s', service_name)
def wait_for_disk_delete():
'''
@ -1591,7 +1583,10 @@ def cleanup_unattached_disks(kwargs=None, conn=None, call=None):
'name': disks[disk]['name'],
'delete_vhd': kwargs.get('delete_vhd', False)
}
log.info('Deleting disk {name}, deleting VHD: {delete_vhd}'.format(**del_kwargs))
log.info(
'Deleting disk %s, deleting VHD: %s',
del_kwargs['name'], del_kwargs['delete_vhd']
)
data = delete_disk(kwargs=del_kwargs, call='function')
return True
@ -2138,7 +2133,7 @@ def update_input_endpoint(kwargs=None, conn=None, call=None, activity='update'):
if 'enable_direct_server_return' not in kwargs:
kwargs['enable_direct_server_return'] = False
kwargs['enable_direct_server_return'] = str(kwargs['enable_direct_server_return']).lower()
kwargs['enable_direct_server_return'] = six.text_type(kwargs['enable_direct_server_return']).lower()
if 'timeout_for_tcp_idle_connection' not in kwargs:
kwargs['timeout_for_tcp_idle_connection'] = 4

View File

@ -201,7 +201,7 @@ rackconnect v3 cloud network as its variable.
# pylint: disable=E0102
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
import socket
@ -363,7 +363,7 @@ def get_image(conn, vm_):
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found: {1}'.format(
vm_image,
str(exc)
exc
)
)
@ -404,7 +404,7 @@ def get_size(conn, vm_):
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(sizes[size]['id']), str(size)):
if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)):
return sizes[size]['id']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
@ -444,9 +444,7 @@ def ignore_cidr(vm_, ip):
'ignore_cidr', vm_, __opts__, default='', search_global=False
)
if cidr != '' and all_matching_cidrs(ip, [cidr]):
log.warning(
'IP "{0}" found within "{1}"; ignoring it.'.format(ip, cidr)
)
log.warning('IP "%s" found within "%s"; ignoring it.', ip, cidr)
return True
return False
@ -532,7 +530,7 @@ def destroy(name, conn=None, call=None):
node = conn.server_by_name(name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.error('Unable to find the VM %s', name)
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
@ -542,18 +540,15 @@ def destroy(name, conn=None, call=None):
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
log.info('Clearing Salt Mine: %s', name)
salt_client = salt.client.get_local_client(__opts__['conf_file'])
minions = salt_client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(
name,
flush_mine_on_destroy
))
log.info('Destroying VM: {0}'.format(name))
log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy)
log.info('Destroying VM: %s', name)
ret = conn.delete(node.id)
if ret:
log.info('Destroyed VM: {0}'.format(name))
log.info('Destroyed VM: %s', name)
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
@ -570,7 +565,7 @@ def destroy(name, conn=None, call=None):
__utils__['cloud.cachedir_index_del'](name)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
log.error('Failed to Destroy VM: %s', name)
return False
@ -587,7 +582,7 @@ def request_instance(vm_=None, call=None):
raise SaltCloudSystemExit(
'The request_instance action must be called with -a or --action.'
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-')
conn = get_conn()
kwargs = vm_.copy()
@ -739,22 +734,15 @@ def request_instance(vm_=None, call=None):
floating_ip = fl_ip
break
if floating_ip is None:
log.error('No IP addresses available to allocate for this server: {0}'.format(vm_['name']))
log.error('No IP addresses available to allocate for this server: %s', vm_['name'])
def __query_node_data(vm_):
try:
node = show_instance(vm_['name'], 'action')
log.debug(
'Loaded node data for {0}:\n{1}'.format(
vm_['name'],
pprint.pformat(node)
)
)
log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node))
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
'Failed to get nodes list: %s', err,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -777,7 +765,7 @@ def request_instance(vm_=None, call=None):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
try:
conn.floating_ip_associate(vm_['name'], floating_ip)
@ -800,12 +788,13 @@ def request_instance(vm_=None, call=None):
def _query_node_data(vm_, data, conn):
try:
node = show_instance(vm_['name'], 'action')
log.debug('Loaded node data for {0}:'
'\n{1}'.format(vm_['name'], pprint.pformat(node)))
log.debug('Loaded node data for %s:\n%s', vm_['name'], pprint.pformat(node))
except Exception as err:
# Show the traceback if the debug logging level is enabled
log.error('Failed to get nodes list: {0}'.format(err),
exc_info_on_loglevel=logging.DEBUG)
log.error(
'Failed to get nodes list: %s', err,
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
@ -887,13 +876,13 @@ def _query_node_data(vm_, data, conn):
if private_ip is False:
continue
if salt.utils.cloud.is_public_ip(private_ip):
log.warning('{0} is a public IP'.format(private_ip))
log.warning('%s is a public IP', private_ip)
data.public_ips.append(private_ip)
log.warning('Public IP address was not ready when we last checked. '
'Appending public IP address now.')
public = data.public_ips
else:
log.warning('{0} is a private IP'.format(private_ip))
log.warning('%s is a private IP', private_ip)
ignore_ip = ignore_cidr(vm_, private_ip)
if private_ip not in data.private_ips and not ignore_ip:
result.append(private_ip)
@ -924,13 +913,13 @@ def _query_node_data(vm_, data, conn):
non_private_ips.append(fixed)
if non_private_ips:
log.debug('result = {0}'.format(non_private_ips))
log.debug('result = %s', non_private_ips)
data.private_ips = result
if ssh_interface(vm_) != 'private_ips':
return data
if result:
log.debug('result = {0}'.format(result))
log.debug('result = %s', result)
data.private_ips = result
if ssh_interface(vm_) == 'private_ips':
return data
@ -977,7 +966,7 @@ def create(vm_):
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
@ -1013,7 +1002,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('VM is now running')
@ -1025,20 +1014,20 @@ def create(vm_):
ip_address = preferred_ip(vm_, data.floating_ips)
else:
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address {0}'.format(ip_address))
log.debug('Using IP address %s', ip_address)
if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
log.info('Salt interface set to: %s', salt_ip_address)
elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips':
salt_ip_address = preferred_ip(vm_, data.fixed_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
log.info('Salt interface set to: %s', salt_ip_address)
elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips':
salt_ip_address = preferred_ip(vm_, data.floating_ips)
log.info('Salt interface set to: {0}'.format(salt_ip_address))
log.info('Salt interface set to: %s', salt_ip_address)
else:
salt_ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit('A valid IP address was not found')
@ -1053,11 +1042,10 @@ def create(vm_):
if 'password' in ret['extra']:
del ret['extra']['password']
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data.__dict__)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
event_data = {

View File

@ -80,7 +80,7 @@ Set ``deploy`` to False if Salt should not be installed on the node.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import pprint
@ -99,6 +99,7 @@ import salt.utils.files
# Import salt.cloud libs
import salt.utils.cloud
import salt.utils.stringutils
from salt.ext import six
try:
@ -545,12 +546,10 @@ def create(vm_):
data['id'])
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating {0} on 1and1\n\n'
'Error creating %s on 1and1\n\n'
'The following exception was thrown by the 1and1 library '
'when trying to run the initial deployment: \n{1}'.format(
vm_['name'], exc
),
exc_info_on_loglevel=logging.DEBUG
'when trying to run the initial deployment: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
@ -567,17 +566,14 @@ def create(vm_):
if not data:
return False
log.debug(
'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format(
vm_['name'],
pprint.pformat(data['name']),
data['status']['state']
)
'Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'],
pprint.pformat(data['name']),
data['status']['state']
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
'Failed to get nodes list: %s', err,
# Show the trackback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -609,15 +605,11 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc.message))
raise SaltCloudSystemExit(six.text_type(exc.message))
log.debug('VM is now running')
log.info('Created Cloud VM {0}'.format(vm_))
log.debug(
'{0} VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
log.info('Created Cloud VM %s', vm_)
log.debug('%s VM creation details:\n%s', vm_, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
@ -806,7 +798,7 @@ def load_public_key(vm_):
)
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
key = public_key.read().replace('\n', '')
key = salt.utils.stringutils.to_unicode(public_key.read().replace('\n', ''))
return key

View File

@ -61,7 +61,7 @@ to find the IP of the new VM.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import pprint
@ -80,6 +80,7 @@ import salt.utils.data
import salt.utils.files
# Import Third Party Libs
from salt.ext import six
try:
import salt.ext.six.moves.xmlrpc_client # pylint: disable=E0611
from lxml import etree
@ -446,7 +447,7 @@ def reboot(name, call=None):
'The start action must be called with -a or --action.'
)
log.info('Rebooting node {0}'.format(name))
log.info('Rebooting node %s', name)
return vm_action(name, kwargs={'action': 'reboot'}, call=call)
@ -471,7 +472,7 @@ def start(name, call=None):
'The start action must be called with -a or --action.'
)
log.info('Starting node {0}'.format(name))
log.info('Starting node %s', name)
return vm_action(name, kwargs={'action': 'resume'}, call=call)
@ -496,7 +497,7 @@ def stop(name, call=None):
'The start action must be called with -a or --action.'
)
log.info('Stopping node {0}'.format(name))
log.info('Stopping node %s', name)
return vm_action(name, kwargs={'action': 'stop'}, call=call)
@ -644,7 +645,7 @@ def get_image(vm_):
The VM dictionary for which to obtain an image.
'''
images = avail_images()
vm_image = str(config.get_cloud_config_value(
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
for image in images:
@ -699,7 +700,7 @@ def get_location(vm_):
The VM dictionary for which to obtain a location.
'''
locations = avail_locations()
vm_location = str(config.get_cloud_config_value(
vm_location = six.text_type(config.get_cloud_config_value(
'location', vm_, __opts__, search_global=False
))
@ -833,7 +834,7 @@ def get_template(vm_):
The VM dictionary for which to obtain a template.
'''
vm_template = str(config.get_cloud_config_value(
vm_template = six.text_type(config.get_cloud_config_value(
'template', vm_, __opts__, search_global=False
))
try:
@ -1011,7 +1012,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
kwargs = {
'name': vm_['name'],
'template_id': get_template(vm_),
@ -1050,7 +1051,7 @@ def create(vm_):
for disk in get_disks:
template.append(_get_device_template(disk, get_disks[disk],
template=template_name))
if 'CLONE' not in str(template):
if 'CLONE' not in six.text_type(template):
raise SaltCloudSystemExit(
'Missing an image disk to clone. Must define a clone disk alongside all other disk definitions.'
)
@ -1067,24 +1068,20 @@ def create(vm_):
template_args)
if not cret[0]:
log.error(
'Error creating {0} on OpenNebula\n\n'
'Error creating %s on OpenNebula\n\n'
'The following error was returned when trying to '
'instantiate the template: {1}'.format(
vm_['name'],
cret[1]
),
'instantiate the template: %s',
vm_['name'], cret[1],
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
except Exception as exc:
log.error(
'Error creating {0} on OpenNebula\n\n'
'Error creating %s on OpenNebula\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: {1}'.format(
vm_['name'],
str(exc)
),
'run the initial deployment: %s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -1120,7 +1117,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
@ -1165,11 +1162,10 @@ def create(vm_):
ret['private_ips'] = private_ip
ret['public_ips'] = []
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -2729,7 +2725,7 @@ def vm_action(name, kwargs=None, call=None):
response = server.one.vm.action(auth, action, vm_id)
data = {
'action': 'vm.action.' + str(action),
'action': 'vm.action.' + six.text_type(action),
'actioned': response[0],
'vm_id': response[1],
'error_code': response[2],
@ -4488,10 +4484,8 @@ def _get_node(name):
except KeyError:
attempts -= 1
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
name, attempts
)
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', name, attempts
)
# Just a little delay between attempts...
@ -4598,7 +4592,7 @@ def _xml_to_dict(xml):
key = item.tag.lower()
idx = 1
while key in dicts:
key += str(idx)
key += six.text_type(idx)
idx += 1
if item.text is None:
dicts[key] = _xml_to_dict(item)

View File

@ -190,7 +190,7 @@ Anything else from the create_server_ docs can be passed through here.
.. _vendor: https://docs.openstack.org/os-client-config/latest/user/vendor-support.html
.. _os-client-config: https://docs.openstack.org/os-client-config/latest/user/configuration.html#config-files
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import copy
@ -202,7 +202,7 @@ import socket
# Import Salt Libs
import salt.utils.json
import salt.config as config
import salt.ext.six as six
from salt.ext import six
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionTimeout,
@ -585,7 +585,7 @@ def _clean_create_kwargs(**kwargs):
if key in VALID_OPTS:
if isinstance(value, VALID_OPTS[key]):
continue
log.error('Error {0}: {1} is not of type {2}'.format(key, value, VALID_OPTS[key]))
log.error('Error %s: %s is not of type %s', key, value, VALID_OPTS[key])
kwargs.pop(key)
return __utils__['dictupdate.update'](kwargs, extra)
@ -601,7 +601,7 @@ def request_instance(vm_, conn=None, call=None):
'The request_instance action must be called with -a or --action.'
)
kwargs = copy.deepcopy(vm_)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.check_name'](vm_['name'], 'a-zA-Z0-9._-')
conn = get_conn()
userdata = config.get_cloud_config_value(
@ -627,7 +627,7 @@ def request_instance(vm_, conn=None, call=None):
except shade.exc.OpenStackCloudException as exc:
log.error('Error creating server %s: %s', vm_['name'], exc)
destroy(vm_['name'], conn=conn, call='action')
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
return show_instance(vm_['name'], conn=conn, call='action')
@ -663,7 +663,7 @@ def create(vm_):
# This was probably created via another process, and doesn't have
# things like salt keys created yet, so let's create them now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'{0[name]}\''.format(vm_))
log.debug('Generating minion keys for \'%s\'', vm_['name'])
vm_['priv_key'], vm_['pub_key'] = __utils__['cloud.gen_keys'](
config.get_cloud_config_value(
'keysize',
@ -693,12 +693,12 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
log.debug('Using IP address {0}'.format(ip_address))
raise SaltCloudSystemExit(six.text_type(exc))
log.debug('Using IP address %s', ip_address)
salt_interface = __utils__['cloud.get_salt_interface'](vm_, __opts__)
salt_ip_address = preferred_ip(vm_, data[salt_interface])
log.debug('Salt interface set to: {0}'.format(salt_ip_address))
log.debug('Salt interface set to: %s', salt_ip_address)
if not ip_address:
raise SaltCloudSystemExit('A valid IP address was not found')
@ -709,11 +709,10 @@ def create(vm_):
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
event_data = {
@ -761,10 +760,10 @@ def destroy(name, conn=None, call=None):
if not conn:
conn = get_conn()
node = show_instance(name, conn=conn, call='action')
log.info('Destroying VM: {0}'.format(name))
log.info('Destroying VM: %s', name)
ret = conn.delete_server(name)
if ret:
log.info('Destroyed VM: {0}'.format(name))
log.info('Destroyed VM: %s', name)
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
@ -781,7 +780,7 @@ def destroy(name, conn=None, call=None):
__utils__['cloud.cachedir_index_del'](name)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
log.error('Failed to Destroy VM: %s', name)
return False
@ -824,4 +823,4 @@ def call(conn=None, call=None, kwargs=None):
return getattr(conn, func)(**kwargs)
except shade.exc.OpenStackCloudException as exc:
log.error('Error running %s: %s', func, exc)
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))

View File

@ -49,7 +49,7 @@ This driver requires Packet's client library: https://pypi.python.org/pypi/packe
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import time
@ -255,18 +255,11 @@ def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=Tru
return obj
time.sleep(interval)
if quiet:
log.info('Status for Packet {0} is \'{1}\', waiting for \'{2}\'.'.format(
object_id,
obj.state,
status)
)
else:
log.debug('Status for Packet {0} is \'{1}\', waiting for \'{2}\'.'.format(
object_id,
obj.state,
status)
)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Packet %s is \'%s\', waiting for \'%s\'.',
object_id, obj.state, status
)
return obj
@ -290,8 +283,9 @@ def is_profile_configured(vm_):
for key in required_keys:
if profile_data.get(key) is None:
log.error(
'both storage_size and storage_tier required for profile {profile}. '
'Please check your profile configuration'.format(profile=vm_['profile'])
'both storage_size and storage_tier required for '
'profile %s. Please check your profile configuration',
vm_['profile']
)
return False
@ -301,10 +295,10 @@ def is_profile_configured(vm_):
if location['code'] == profile_data['location']:
if 'storage' not in location['features']:
log.error(
'Choosen location {location} for profile {profile} does not support storage feature. '
'Please check your profile configuration'.format(
location=location['code'], profile=vm_['profile']
)
'Chosen location %s for profile %s does not '
'support storage feature. Please check your '
'profile configuration',
location['code'], vm_['profile']
)
return False
@ -314,8 +308,10 @@ def is_profile_configured(vm_):
for key in required_keys:
if profile_data.get(key) is None:
log.error(
'both storage_snapshot_count and storage_snapshot_frequency required for profile {profile}. '
'Please check your profile configuration'.format(profile=vm_['profile'])
'both storage_snapshot_count and '
'storage_snapshot_frequency required for profile '
'%s. Please check your profile configuration',
vm_['profile']
)
return False
@ -343,7 +339,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Packet VM {0}'.format(name))
log.info('Creating Packet VM %s', name)
manager = packet.Manager(auth_token=vm_['token'])
@ -365,9 +361,9 @@ def create(vm_):
if device.state != "active":
log.error(
'Error creating {0} on PACKET\n\n'
'while waiting for initial ready status'.format(name),
exc_info_on_loglevel=logging.DEBUG
'Error creating %s on PACKET\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Define which ssh_interface to use
@ -412,19 +408,18 @@ def create(vm_):
if volume.state != "active":
log.error(
'Error creating {0} on PACKET\n\n'
'while waiting for initial ready status'.format(name),
exc_info_on_loglevel=logging.DEBUG
'Error creating %s on PACKET\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
ret.update({'volume': volume.__dict__})
log.info('Created Cloud VM \'{0}\''.format(name))
log.info('Created Cloud VM \'%s\'', name)
log.debug(
'\'{0}\' VM creation details:\n{1}'.format(
name, pprint.pformat(device.__dict__)
)
'\'%s\' VM creation details:\n%s',
name, pprint.pformat(device.__dict__)
)
__utils__['cloud.fire_event'](

View File

@ -21,7 +21,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
@ -176,7 +176,7 @@ def get_image(vm_):
'image', vm_, __opts__, search_global=False
)
for image in images:
if str(vm_image) in (images[image]['name'], images[image]['id']):
if six.text_type(vm_image) in (images[image]['name'], images[image]['id']):
return images[image]['id']
raise SaltCloudNotFound('The specified image could not be found.')
@ -293,17 +293,16 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
try:
data = create_node(vm_)
except Exception as exc:
log.error(
'Error creating {0} on PARALLELS\n\n'
'Error creating %s on PARALLELS\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -340,7 +339,7 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
comps = data['network']['public-ip']['address'].split('/')
public_ip = comps[0]
@ -348,11 +347,10 @@ def create(vm_):
vm_['ssh_host'] = public_ip
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -412,17 +410,13 @@ def query(action=None, command=None, args=None, method='GET', data=None):
req.get_method = lambda: method
log.debug('{0} {1}'.format(method, req.get_full_url()))
log.debug('%s %s', method, req.get_full_url())
if data:
log.debug(data)
try:
result = _urlopen(req)
log.debug(
'PARALLELS Response Status Code: {0}'.format(
result.getcode()
)
)
log.debug('PARALLELS Response Status Code: %s', result.getcode())
if 'content-length' in result.headers:
content = result.read()
@ -432,12 +426,7 @@ def query(action=None, command=None, args=None, method='GET', data=None):
return {}
except URLError as exc:
log.error(
'PARALLELS Response Status Code: {0} {1}'.format(
exc.code,
exc.msg
)
)
log.error('PARALLELS Response Status Code: %s %s', exc.code, exc.msg)
root = ET.fromstring(exc.read())
log.error(root)
return {'error': root}

View File

@ -94,7 +94,7 @@ Set ``deploy`` to False if Salt should not be installed on the node.
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import pprint
@ -104,6 +104,7 @@ from salt.utils.versions import LooseVersion
# Import salt libs
import salt.utils.cloud
import salt.utils.files
import salt.utils.stringutils
import salt.config as config
from salt.exceptions import (
SaltCloudConfigError,
@ -347,7 +348,7 @@ def get_size(vm_):
return sizes['Small Instance']
for size in sizes:
if vm_size and str(vm_size) in (str(sizes[size]['id']), str(size)):
if vm_size and six.text_type(vm_size) in (six.text_type(sizes[size]['id']), six.text_type(size)):
return sizes[size]
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
@ -567,8 +568,7 @@ def list_nodes(conn=None, call=None):
try:
nodes = conn.list_servers(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get nodes list from datacenter: {0}'.format(
datacenter_id))
log.error('Failed to get nodes list from datacenter: %s', datacenter_id)
raise
for item in nodes['items']:
@ -735,7 +735,7 @@ def get_public_keys(vm_):
ssh_keys = []
with salt.utils.files.fopen(key_filename) as rfh:
for key in rfh.readlines():
ssh_keys.append(key)
ssh_keys.append(salt.utils.stringutils.to_unicode(key))
return ssh_keys
@ -821,19 +821,19 @@ def create(vm_):
try:
data = conn.create_server(datacenter_id=datacenter_id, server=server)
log.info('Create server request ID: {0}'.format(data['requestId']),
exc_info_on_loglevel=logging.DEBUG)
log.info(
'Create server request ID: %s',
data['requestId'], exc_info_on_loglevel=logging.DEBUG
)
_wait_for_completion(conn, data, get_wait_timeout(vm_),
'create_server')
except PBError as exc:
log.error(
'Error creating {0} on ProfitBricks\n\n'
'Error creating %s on ProfitBricks\n\n'
'The following exception was thrown by the profitbricks library '
'when trying to run the initial deployment: \n{1}:\n{2}'.format(
vm_['name'], exc, exc.content
),
exc_info_on_loglevel=logging.DEBUG
'when trying to run the initial deployment: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
except Exception as exc: # pylint: disable=W0703
@ -858,17 +858,12 @@ def create(vm_):
if not data:
return False
log.debug(
'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format(
vm_['name'],
pprint.pformat(data['name']),
data['state']
)
'Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(data['name']), data['state']
)
except Exception as err:
log.error(
'Failed to get nodes list: {0}'.format(
err
),
'Failed to get nodes list: %s', err,
# Show the trackback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -904,15 +899,11 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc.message))
raise SaltCloudSystemExit(six.text_type(exc.message))
log.debug('VM is now running')
log.info('Created Cloud VM {0}'.format(vm_))
log.debug(
'{0} VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
log.info('Created Cloud VM %s', vm_)
log.debug('%s VM creation details:\n%s', vm_, pprint.pformat(data))
signal_event(vm_, 'created', 'created instance')
@ -1227,12 +1218,13 @@ def _wait_for_completion(conn, promise, wait_timeout, msg):
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
"Request: {0}, requestId: {1} failed to complete:\n{2}".format(
msg, str(promise['requestId']),
msg, six.text_type(promise['requestId']),
operation_result['metadata']['message']
)
)
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
'Timed out waiting for async operation {0} "{1}" to complete.'.format(
msg, six.text_type(promise['requestId'])
)
)

View File

@ -27,7 +27,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
@ -137,7 +137,7 @@ def _authenticate():
full_url, verify=verify_ssl, data=connect_data).json()
ticket = {'PVEAuthCookie': returned_data['data']['ticket']}
csrf = str(returned_data['data']['CSRFPreventionToken'])
csrf = six.text_type(returned_data['data']['CSRFPreventionToken'])
def query(conn_type, option, post_data=None):
@ -150,7 +150,7 @@ def query(conn_type, option, post_data=None):
full_url = 'https://{0}:8006/api2/json/{1}'.format(url, option)
log.debug('{0}: {1} ({2})'.format(conn_type, full_url, post_data))
log.debug('%s: %s (%s)', conn_type, full_url, post_data)
httpheaders = {'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
@ -199,7 +199,7 @@ def _get_vm_by_name(name, allDetails=False):
if name in vms:
return vms[name]
log.info('VM with name "{0}" could not be found.'.format(name))
log.info('VM with name "%s" could not be found.', name)
return False
@ -208,10 +208,10 @@ def _get_vm_by_id(vmid, allDetails=False):
Retrieve a VM based on the ID.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)):
if str(vm_details['vmid']) == str(vmid):
if six.text_type(vm_details['vmid']) == six.text_type(vmid):
return vm_details
log.info('VM with ID "{0}" could not be found.'.format(vmid))
log.info('VM with ID "%s" could not be found.', vmid)
return False
@ -232,10 +232,10 @@ def _check_ip_available(ip_addr):
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
vm_config = vm_details['config']
if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr:
log.debug('IP "{0}" is already defined'.format(ip_addr))
log.debug('IP "%s" is already defined', ip_addr)
return False
log.debug('IP \'{0}\' is available to be defined'.format(ip_addr))
log.debug('IP \'%s\' is available to be defined', ip_addr)
return True
@ -250,18 +250,18 @@ def _parse_proxmox_upid(node, vm_=None):
# Parse node response
node = node.split(':')
if node[0] == 'UPID':
ret['node'] = str(node[1])
ret['pid'] = str(node[2])
ret['pstart'] = str(node[3])
ret['starttime'] = str(node[4])
ret['type'] = str(node[5])
ret['vmid'] = str(node[6])
ret['user'] = str(node[7])
ret['node'] = six.text_type(node[1])
ret['pid'] = six.text_type(node[2])
ret['pstart'] = six.text_type(node[3])
ret['starttime'] = six.text_type(node[4])
ret['type'] = six.text_type(node[5])
ret['vmid'] = six.text_type(node[6])
ret['user'] = six.text_type(node[7])
# include the upid again in case we'll need it again
ret['upid'] = str(upid)
ret['upid'] = six.text_type(upid)
if vm_ is not None and 'technology' in vm_:
ret['technology'] = str(vm_['technology'])
ret['technology'] = six.text_type(vm_['technology'])
return ret
@ -271,13 +271,13 @@ def _lookup_proxmox_task(upid):
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
'''
log.debug('Getting creation status for upid: {0}'.format(upid))
log.debug('Getting creation status for upid: %s', upid)
tasks = query('get', 'cluster/tasks')
if tasks:
for task in tasks:
if task['upid'] == upid:
log.debug('Found upid task: {0}'.format(task))
log.debug('Found upid task: %s', task)
return task
return False
@ -292,7 +292,7 @@ def get_resources_nodes(call=None, resFilter=None):
salt-cloud -f get_resources_nodes my-proxmox-config
'''
log.debug('Getting resource: nodes.. (filter: {0})'.format(resFilter))
log.debug('Getting resource: nodes.. (filter: %s)', resFilter)
resources = query('get', 'cluster/resources')
ret = {}
@ -302,11 +302,11 @@ def get_resources_nodes(call=None, resFilter=None):
ret[name] = resource
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
log.debug('Filter given: %s, returning requested '
'resource: nodes', resFilter)
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
log.debug('Filter not given: %s, returning all resource: nodes', ret)
return ret
@ -320,7 +320,7 @@ def get_resources_vms(call=None, resFilter=None, includeConfig=True):
salt-cloud -f get_resources_vms my-proxmox-config
'''
log.debug('Getting resource: vms.. (filter: {0})'.format(resFilter))
log.debug('Getting resource: vms.. (filter: %s)', resFilter)
resources = query('get', 'cluster/resources')
ret = {}
@ -338,11 +338,11 @@ def get_resources_vms(call=None, resFilter=None, includeConfig=True):
)
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
log.debug('Filter given: %s, returning requested '
'resource: nodes', resFilter)
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
log.debug('Filter not given: %s, returning all resource: nodes', ret)
return ret
@ -431,15 +431,15 @@ def list_nodes(call=None):
ret = {}
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
log.debug('VM_Name: {0}'.format(vm_name))
log.debug('vm_details: {0}'.format(vm_details))
log.debug('VM_Name: %s', vm_name)
log.debug('vm_details: %s', vm_details)
# Limit resultset on what Salt-cloud demands:
ret[vm_name] = {}
ret[vm_name]['id'] = str(vm_details['vmid'])
ret[vm_name]['image'] = str(vm_details['vmid'])
ret[vm_name]['size'] = str(vm_details['disk'])
ret[vm_name]['state'] = str(vm_details['status'])
ret[vm_name]['id'] = six.text_type(vm_details['vmid'])
ret[vm_name]['image'] = six.text_type(vm_details['vmid'])
ret[vm_name]['size'] = six.text_type(vm_details['disk'])
ret[vm_name]['state'] = six.text_type(vm_details['status'])
# Figure out which is which to put it in the right column
private_ips = []
@ -449,9 +449,9 @@ def list_nodes(call=None):
ips = vm_details['config']['ip_address'].split(' ')
for ip_ in ips:
if IP(ip_).iptype() == 'PRIVATE':
private_ips.append(str(ip_))
private_ips.append(six.text_type(ip_))
else:
public_ips.append(str(ip_))
public_ips.append(six.text_type(ip_))
ret[vm_name]['private_ips'] = private_ips
ret[vm_name]['public_ips'] = public_ips
@ -523,29 +523,28 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
if 'use_dns' in vm_ and 'ip_address' not in vm_:
use_dns = vm_['use_dns']
if use_dns:
from socket import gethostbyname, gaierror
try:
ip_address = gethostbyname(str(vm_['name']))
ip_address = gethostbyname(six.text_type(vm_['name']))
except gaierror:
log.debug('Resolving of {hostname} failed'.format(hostname=str(vm_['name'])))
log.debug('Resolving of %s failed', vm_['name'])
else:
vm_['ip_address'] = str(ip_address)
vm_['ip_address'] = six.text_type(ip_address)
try:
newid = _get_next_vmid()
data = create_node(vm_, newid)
except Exception as exc:
log.error(
'Error creating {0} on PROXMOX\n\n'
'Error creating %s on PROXMOX\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -562,15 +561,15 @@ def create(vm_):
# Determine which IP to use in order of preference:
if 'ip_address' in vm_:
ip_address = str(vm_['ip_address'])
ip_address = six.text_type(vm_['ip_address'])
elif 'public_ips' in data:
ip_address = str(data['public_ips'][0]) # first IP
ip_address = six.text_type(data['public_ips'][0]) # first IP
elif 'private_ips' in data:
ip_address = str(data['private_ips'][0]) # first IP
ip_address = six.text_type(data['private_ips'][0]) # first IP
else:
raise SaltCloudExecutionFailure # err.. not a good idea i reckon
log.debug('Using IP address {0}'.format(ip_address))
log.debug('Using IP address %s', ip_address)
# wait until the vm has been created so we can start it
if not wait_for_created(data['upid'], timeout=300):
@ -578,11 +577,11 @@ def create(vm_):
# VM has been created. Starting..
if not start(name, vmid, call='action'):
log.error('Node {0} ({1}) failed to start!'.format(name, vmid))
log.error('Node %s (%s) failed to start!', name, vmid)
raise SaltCloudExecutionFailure
# Wait until the VM has fully started
log.debug('Waiting for state "running" for vm {0} on {1}'.format(vmid, host))
log.debug('Waiting for state "running" for vm %s on %s', vmid, host)
if not wait_for_state(vmid, 'running'):
return {'Error': 'Unable to start {0}, command timed out'.format(name)}
@ -602,11 +601,10 @@ def create(vm_):
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Report success!
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -659,7 +657,7 @@ def _get_properties(path="", method="GET", forced_params=None):
# get list of properties for requested method
props = sub['info'][method]['parameters']['properties'].keys()
except KeyError as exc:
log.error('method not found: "{0}"'.format(str(exc)))
log.error('method not found: "%s"', exc)
except:
raise
for prop in props:
@ -668,7 +666,7 @@ def _get_properties(path="", method="GET", forced_params=None):
# "prop[n]"
if numerical:
for i in range(10):
parameters.add(numerical.group(1) + str(i))
parameters.add(numerical.group(1) + six.text_type(i))
else:
parameters.add(prop)
return parameters
@ -766,8 +764,7 @@ def create_node(vm_, newid):
sock_dir=__opts__['sock_dir'],
)
log.debug('Preparing to generate a node using these parameters: {0} '.format(
newnode))
log.debug('Preparing to generate a node using these parameters: %s ', newnode)
if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu':
postParams = {}
postParams['newid'] = newnode['vmid']
@ -847,18 +844,16 @@ def wait_for_state(vmid, state, timeout=300):
while True:
if node['status'] == state:
log.debug('Host {0} is now in "{1}" state!'.format(
node['name'], state
))
log.debug('Host %s is now in "%s" state!', node['name'], state)
return True
time.sleep(1)
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for {0} to '
'become {1}'.format(node['name'], state))
log.debug('Timeout reached while waiting for %s to become %s',
node['name'], state)
return False
node = get_vm_status(vmid=vmid)
log.debug('State for {0} is: "{1}" instead of "{2}"'.format(
node['name'], node['status'], state))
log.debug('State for %s is: "%s" instead of "%s"',
node['name'], node['status'], state)
def destroy(name, call=None):
@ -921,30 +916,30 @@ def set_vm_status(status, name=None, vmid=None):
'''
Convenience function for setting VM status
'''
log.debug('Set status to {0} for {1} ({2})'.format(status, name, vmid))
log.debug('Set status to %s for %s (%s)', status, name, vmid)
if vmid is not None:
log.debug('set_vm_status: via ID - VMID {0} ({1}): {2}'.format(
vmid, name, status))
log.debug('set_vm_status: via ID - VMID %s (%s): %s',
vmid, name, status)
vmobj = _get_vm_by_id(vmid)
else:
log.debug('set_vm_status: via name - VMID {0} ({1}): {2}'.format(
vmid, name, status))
log.debug('set_vm_status: via name - VMID %s (%s): %s',
vmid, name, status)
vmobj = _get_vm_by_name(name)
if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj:
log.error('Unable to set status {0} for {1} ({2})'.format(
status, name, vmid))
log.error('Unable to set status %s for %s (%s)',
status, name, vmid)
raise SaltCloudExecutionTimeout
log.debug("VM_STATUS: Has desired info ({0}). Setting status..".format(vmobj))
log.debug("VM_STATUS: Has desired info (%s). Setting status..", vmobj)
data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format(
vmobj['node'], vmobj['type'], vmobj['vmid'], status))
result = _parse_proxmox_upid(data, vmobj)
if result is not False and result is not None:
log.debug('Set_vm_status action result: {0}'.format(result))
log.debug('Set_vm_status action result: %s', result)
return True
return False
@ -955,20 +950,20 @@ def get_vm_status(vmid=None, name=None):
Get the status for a VM, either via the ID or the hostname
'''
if vmid is not None:
log.debug('get_vm_status: VMID {0}'.format(vmid))
log.debug('get_vm_status: VMID %s', vmid)
vmobj = _get_vm_by_id(vmid)
elif name is not None:
log.debug('get_vm_status: name {0}'.format(name))
log.debug('get_vm_status: name %s', name)
vmobj = _get_vm_by_name(name)
else:
log.debug("get_vm_status: No ID or NAME given")
raise SaltCloudExecutionFailure
log.debug('VM found: {0}'.format(vmobj))
log.debug('VM found: %s', vmobj)
if vmobj is not None and 'node' in vmobj:
log.debug("VM_STATUS: Has desired info. Retrieving.. ({0})".format(
vmobj['name']))
log.debug("VM_STATUS: Has desired info. Retrieving.. (%s)",
vmobj['name'])
data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format(
vmobj['node'], vmobj['type'], vmobj['vmid']))
return data
@ -992,9 +987,9 @@ def start(name, vmid=None, call=None):
'The start action must be called with -a or --action.'
)
log.debug('Start: {0} ({1}) = Start'.format(name, vmid))
log.debug('Start: %s (%s) = Start', name, vmid)
if not set_vm_status('start', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) up..'.format(name, vmid))
log.error('Unable to bring VM %s (%s) up..', name, vmid)
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'started'
@ -1018,7 +1013,7 @@ def stop(name, vmid=None, call=None):
)
if not set_vm_status('stop', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) down..'.format(name, vmid))
log.error('Unable to bring VM %s (%s) down..', name, vmid)
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
@ -1042,7 +1037,7 @@ def shutdown(name=None, vmid=None, call=None):
)
if not set_vm_status('shutdown', name, vmid=vmid):
log.error('Unable to shut VM {0} ({1}) down..'.format(name, vmid))
log.error('Unable to shut VM %s (%s) down..', name, vmid)
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'

View File

@ -10,7 +10,7 @@ module instead.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.data

View File

@ -27,7 +27,7 @@ Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import time
import pprint
import logging
@ -36,6 +36,7 @@ import base64
from hashlib import sha256
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves.urllib.parse import quote as _quote # pylint: disable=import-error,no-name-in-module
from salt.ext.six.moves import range
import salt.utils.cloud
@ -114,7 +115,7 @@ def _compute_signature(parameters, access_key_secret, method, path):
keys = sorted(parameters.keys())
pairs = []
for key in keys:
val = str(parameters[key]).encode('utf-8')
val = six.text_type(parameters[key]).encode('utf-8')
pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
@ -226,7 +227,7 @@ def avail_locations(call=None):
for region in items['zone_set']:
result[region['zone_id']] = {}
for key in region:
result[region['zone_id']][key] = str(region[key])
result[region['zone_id']][key] = six.text_type(region[key])
return result
@ -237,7 +238,7 @@ def _get_location(vm_=None):
'''
locations = avail_locations()
vm_location = str(config.get_cloud_config_value(
vm_location = six.text_type(config.get_cloud_config_value(
'zone', vm_, __opts__, search_global=False
))
@ -308,7 +309,7 @@ def _get_image(vm_):
Return the VM's image. Used by create().
'''
images = avail_images()
vm_image = str(config.get_cloud_config_value(
vm_image = six.text_type(config.get_cloud_config_value(
'image', vm_, __opts__, search_global=False
))
@ -429,7 +430,7 @@ def _get_size(vm_):
'''
sizes = avail_sizes()
vm_size = str(config.get_cloud_config_value(
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
@ -496,9 +497,7 @@ def list_nodes_full(call=None):
}
items = query(params=params)
log.debug('Total {0} instances found in zone {1}'.format(
items['total_count'], zone)
)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
@ -675,7 +674,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
# params
params = {
@ -721,24 +720,20 @@ def create(vm_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
private_ip = data['private_ips'][0]
log.debug('VM {0} is now running'.format(private_ip))
log.debug('VM %s is now running', private_ip)
vm_['ssh_host'] = private_ip
# The instance is booted and accessible, let's Salt it!
__utils__['cloud.bootstrap'](vm_, __opts__)
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
@ -783,7 +778,7 @@ def start(instance_id, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Starting instance {0}'.format(instance_id))
log.info('Starting instance %s', instance_id)
params = {
'action': 'StartInstances',
@ -811,7 +806,7 @@ def stop(instance_id, force=False, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Stopping instance {0}'.format(instance_id))
log.info('Stopping instance %s', instance_id)
params = {
'action': 'StopInstances',
@ -839,7 +834,7 @@ def reboot(instance_id, call=None):
'The stop action must be called with -a or --action.'
)
log.info('Rebooting instance {0}'.format(instance_id))
log.info('Rebooting instance %s', instance_id)
params = {
'action': 'RestartInstances',

View File

@ -18,7 +18,7 @@ files as described in the
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time

View File

@ -24,13 +24,14 @@ the cloud configuration at ``/etc/salt/cloud.providers`` or
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import os
import time
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves import range
import salt.utils.cloud
import salt.utils.json
@ -103,7 +104,7 @@ def avail_images(call=None):
for image in items['images']:
ret[image['id']] = {}
for item in image:
ret[image['id']][item] = str(image[item])
ret[image['id']][item] = six.text_type(image[item])
return ret
@ -177,7 +178,7 @@ def get_image(server_):
''' Return the image object to use.
'''
images = avail_images()
server_image = str(config.get_cloud_config_value(
server_image = six.text_type(config.get_cloud_config_value(
'image', server_, __opts__, search_global=False
))
for image in images:
@ -226,7 +227,7 @@ def create(server_):
transport=__opts__['transport']
)
log.info('Creating a BareMetal server {0}'.format(server_['name']))
log.info('Creating a BareMetal server %s', server_['name'])
access_key = config.get_cloud_config_value(
'access_key', get_configured_provider(), __opts__, search_global=False
@ -273,12 +274,10 @@ def create(server_):
ret = create_node(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on Scaleway\n\n'
'Error creating %s on Scaleway\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: {1}'.format(
server_['name'],
str(exc)
),
'run the initial deployment: %s',
server_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -308,7 +307,7 @@ def create(server_):
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
raise SaltCloudSystemExit(six.text_type(exc))
server_['ssh_host'] = data['public_ip']['address']
server_['ssh_password'] = ssh_password
@ -317,11 +316,10 @@ def create(server_):
ret.update(data)
log.info('Created BareMetal server \'{0[name]}\''.format(server_))
log.info('Created BareMetal server \'%s\'', server_['name'])
log.debug(
'\'{0[name]}\' BareMetal server creation details:\n{1}'.format(
server_, pprint.pformat(data)
)
'\'%s\' BareMetal server creation details:\n%s',
server_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
@ -340,7 +338,7 @@ def query(method='servers', server_id=None, command=None, args=None,
http_method='get'):
''' Make a call to the Scaleway API.
'''
base_path = str(config.get_cloud_config_value(
base_path = six.text_type(config.get_cloud_config_value(
'api_root',
get_configured_provider(),
__opts__,
@ -419,10 +417,8 @@ def _get_node(name):
return list_nodes_full()[name]
except KeyError:
log.debug(
'Failed to get the data for node \'{0}\'. Remaining '
'attempts: {1}'.format(
name, attempt
)
'Failed to get the data for node \'%s\'. Remaining '
'attempts: %s', name, attempt
)
# Just a little delay between attempts...
time.sleep(0.5)

View File

@ -26,7 +26,7 @@ SoftLayer salt.cloud modules. See: https://pypi.python.org/pypi/SoftLayer
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
@ -278,7 +278,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(name))
log.info('Creating Cloud VM %s', name)
conn = get_conn()
kwargs = {
'hostname': hostname,
@ -299,7 +299,7 @@ def create(vm_):
disks = vm_['disk_size']
if isinstance(disks, int):
disks = [str(disks)]
disks = [six.text_type(disks)]
elif isinstance(disks, six.string_types):
disks = [size.strip() for size in disks.split(',')]
@ -308,18 +308,18 @@ def create(vm_):
# device number '1' is reserved for the SWAP disk
if count == 1:
count += 1
block_device = {'device': str(count),
'diskImage': {'capacity': str(disk)}}
block_device = {'device': six.text_type(count),
'diskImage': {'capacity': six.text_type(disk)}}
kwargs['blockDevices'].append(block_device)
count += 1
# Upper bound must be 5 as we're skipping '1' for the SWAP disk ID
if count > 5:
log.warning('More that 5 disks were specified for {0} .'
log.warning('More that 5 disks were specified for %s .'
'The first 5 disks will be applied to the VM, '
'but the remaining disks will be ignored.\n'
'Please adjust your cloud configuration to only '
'specify a maximum of 5 disks.'.format(name))
'specify a maximum of 5 disks.', name)
break
elif 'global_identifier' in vm_:
@ -392,11 +392,9 @@ def create(vm_):
response = conn.createObject(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on SoftLayer\n\n'
'Error creating %s on SoftLayer\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
name, str(exc)
),
'run the initial deployment: \n%s', name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -551,7 +549,7 @@ def list_nodes(call=None):
if 'primaryBackendIpAddress' in nodes[node]:
ret[node]['private_ips'] = nodes[node]['primaryBackendIpAddress']
if 'status' in nodes[node]:
ret[node]['state'] = str(nodes[node]['status']['name'])
ret[node]['state'] = six.text_type(nodes[node]['status']['name'])
return ret

View File

@ -26,7 +26,7 @@ SoftLayer salt.cloud modules. See: https://pypi.python.org/pypi/SoftLayer
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import decimal
@ -246,7 +246,7 @@ def create(vm_):
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(name))
log.info('Creating Cloud VM %s', name)
conn = get_conn(service='SoftLayer_Product_Order')
kwargs = {
'complexType': 'SoftLayer_Container_Product_Order_Hardware_Server',
@ -338,11 +338,9 @@ def create(vm_):
#response = conn.verifyOrder(kwargs)
except Exception as exc:
log.error(
'Error creating {0} on SoftLayer\n\n'
'Error creating %s on SoftLayer\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
name, str(exc)
),
'run the initial deployment: \n%s', name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)

View File

@ -16,7 +16,7 @@ files as described in the
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import tempfile

View File

@ -18,7 +18,7 @@ Dicts provided by salt:
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
@ -200,7 +200,7 @@ def create(vm_info):
if len(ips):
ip = ips[interface_index]
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
log.info("[ %s ] IPv4 is: %s", vm_name, ip)
# ssh or smb using ip and install salt only if deploy is True
if deploy:
vm_info['key_filename'] = key_filename
@ -363,13 +363,13 @@ def destroy(name, call=None):
def start(name, call=None):
"""
'''
Start a machine.
@param name: Machine to start
@type name: str
@param call: Must be "action"
@type call: str
"""
'''
if call != 'action':
raise SaltCloudSystemExit(
'The instance action must be called with -a or --action.'

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ Set up the cloud profile at ``/etc/salt/cloud.profiles`` or
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import pprint
import logging
import time
@ -218,7 +218,7 @@ def _lookup_vultrid(which_key, availkey, keyname):
if DETAILS == {}:
_cache_provider_details()
which_key = str(which_key)
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
@ -255,17 +255,17 @@ def create(vm_):
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name {0}'.format(vm_['image']))
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name {0}'.format(vm_['size']))
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name {0}'.format(vm_['location']))
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
@ -277,7 +277,7 @@ def create(vm_):
'enable_private_network': enable_private_network,
}
log.info('Creating Cloud VM {0}'.format(vm_['name']))
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
@ -293,8 +293,10 @@ def create(vm_):
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error('Error creating {0} on Vultr\n\n'
'Vultr API returned {1}\n'.format(vm_['name'], data))
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
@ -309,11 +311,10 @@ def create(vm_):
return False
except Exception as exc:
log.error(
'Error creating {0} on Vultr\n\n'
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
'run the initial deployment:\n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
@ -332,7 +333,7 @@ def create(vm_):
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
main_ip = str(data.get('main_ip', '0'))
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
@ -345,7 +346,7 @@ def create(vm_):
data = show_instance(vm_['name'], call='action')
# print("Waiting for default password")
# pprint.pprint(data)
if str(data.get('default_password', '')) == '':
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
@ -357,7 +358,7 @@ def create(vm_):
data = show_instance(vm_['name'], call='action')
# print("Waiting for status normal")
# pprint.pprint(data)
if str(data.get('status', '')) != 'active':
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
@ -369,7 +370,7 @@ def create(vm_):
data = show_instance(vm_['name'], call='action')
# print("Waiting for server state ok")
# pprint.pprint(data)
if str(data.get('server_state', '')) != 'ok':
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
@ -408,11 +409,10 @@ def create(vm_):
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](

View File

@ -60,13 +60,14 @@ Example profile configuration:
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
@ -160,23 +161,21 @@ def _get_session():
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug('url: {} user: {} password: {}, originator: {}'.format(
url,
user,
'XXX-pw-redacted-XXX',
originator))
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = str(ex.__dict__['details'][1])
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug('session is -> url: {} user: {} password: {}, originator:{}'.format(
new_url,
user,
'XXX-pw-redacted-XXX',
originator))
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
@ -201,8 +200,10 @@ def list_nodes():
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
@ -241,7 +242,7 @@ def get_vm_ip(name=None, session=None, call=None):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: {} ip: {}'.format(name, ret))
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
@ -249,9 +250,9 @@ def get_vm_ip(name=None, session=None, call=None):
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: {} 0/ip: {}'.format(
name,
net["0/ip"]))
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
@ -274,8 +275,10 @@ def set_vm_ip(name=None,
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug('Setting name: {} ipv4_cidr: {} ipv4_gw: {} mode: {}'.format(
name, ipv4_cidr, ipv4_gw, mode))
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
@ -286,8 +289,8 @@ def set_vm_ip(name=None,
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
log.debug('There are {} vifs.'.format(len(vifs)))
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
@ -321,8 +324,10 @@ def list_nodes_full(session=None):
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
@ -339,9 +344,9 @@ def list_nodes_full(session=None):
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: {}'.format(ret))
log.debug('provider: {}'.format(provider))
log.debug('__opts__: {}'.format(__opts__))
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
@ -378,7 +383,7 @@ def vdi_list(call=None, kwargs=None):
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is {}'.format(kwargs))
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
@ -476,7 +481,7 @@ def show_instance(name, session=None, call=None):
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: {} session: {}'.format(name, session))
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
@ -486,8 +491,10 @@ def show_instance(name, session=None, call=None):
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug('VM {}, doesnt have base_template_name attribute'.format(
record['name_label']))
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
@ -519,7 +526,7 @@ def _determine_resource_pool(session, vm_):
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: {}'.format(pool_record['name_label']))
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
@ -535,11 +542,11 @@ def _determine_storage_repo(session, resource_pool, vm_):
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: {}'.format(sr_record['name_label']))
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: {}'.format(storage_repo))
log.debug('storage repository: %s', storage_repo)
return storage_repo
@ -576,7 +583,7 @@ def create(vm_):
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding {} to cloud cache.'.format(name))
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
@ -595,7 +602,7 @@ def create(vm_):
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: {} '.format(clone))
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
@ -630,15 +637,15 @@ def create(vm_):
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to {}'.format(deploy))
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: {}'.format(
vm_['deploy'])
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
@ -667,12 +674,12 @@ def _deploy_salt_minion(name, session, vm_):
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
log.debug('{} has IP of {}'.format(name, vm_['ssh_host']))
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on {0}'.format(name))
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: {}'.format(boot_ret))
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
@ -705,8 +712,10 @@ def _wait_for_ip(name, session):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug('Waited {} seconds for {} to report ip address...'.format(
delta.seconds, name))
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warn('Timeout getting IP address')
break
@ -720,12 +729,12 @@ def _run_async_task(task=None, session=None):
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running {}'.format(task_name))
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress {}%'.format(str(progress)))
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task {}'.format(task_name))
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
@ -739,7 +748,7 @@ def _clone_vm(image=None, name=None, session=None):
'''
if session is None:
session = _get_session()
log.debug('Creating VM {0} by cloning {1}'.format(name, image))
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
@ -759,7 +768,7 @@ def _copy_vm(template=None, name=None, session=None, sr=None):
'''
if session is None:
session = _get_session()
log.debug('Creating VM {0} by copying {1}'.format(name, template))
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
@ -771,7 +780,7 @@ def _provision_vm(name=None, session=None):
'''
if session is None:
session = _get_session()
log.info('Provisioning VM {0}'.format(name))
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
@ -792,7 +801,7 @@ def start(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Starting VM {0}'.format(name))
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
@ -814,7 +823,7 @@ def pause(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Pausing VM {0}'.format(name))
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
@ -836,7 +845,7 @@ def unpause(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Unpausing VM {0}'.format(name))
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
@ -858,7 +867,7 @@ def suspend(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Suspending VM {0}'.format(name))
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
@ -880,7 +889,7 @@ def resume(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Resuming VM {0}'.format(name))
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
@ -919,7 +928,7 @@ def shutdown(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Starting VM {0}'.format(name))
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
@ -941,7 +950,7 @@ def reboot(name, call=None, session=None):
)
if session is None:
session = _get_session()
log.info('Starting VM {0}'.format(name))
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':

View File

@ -7,7 +7,7 @@
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.defaults.exitcodes

View File

@ -3,12 +3,11 @@
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import logging
from salt.ext.six import string_types
from salt.ext import six
from salt.ext.six.moves import zip
@ -80,7 +79,7 @@ def node_state(id_):
'paused': 'PAUSED',
'reconfiguring': 'RECONFIGURING'
}
return states_str[id_] if isinstance(id_, string_types) else states_int[id_]
return states_str[id_] if isinstance(id_, six.string_types) else states_int[id_]
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
@ -99,7 +98,7 @@ def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([str(num) for num in reqver])
'.'.join([six.text_type(num) for num in reqver])
)
)
@ -108,7 +107,7 @@ def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([str(num) for num in reqver])
'.'.join([six.text_type(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
@ -144,10 +143,10 @@ def avail_locations(conn=None, call=None):
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, string_types) and not six.PY3:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
@ -155,7 +154,7 @@ def avail_locations(conn=None, call=None):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types) and not six.PY3:
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
@ -181,17 +180,17 @@ def avail_images(conn=None, call=None):
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, string_types) and not six.PY3:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types) and not six.PY3:
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
@ -216,10 +215,10 @@ def avail_sizes(conn=None, call=None):
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, string_types) and not six.PY3:
if isinstance(size.name, six.string_types) and not six.PY3:
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name)
size_name = str(size.name) # future lint: disable=blacklisted-function
ret[size_name] = {}
for attr in dir(size):
@ -231,7 +230,7 @@ def avail_sizes(conn=None, call=None):
except Exception:
pass
if isinstance(attr_value, string_types) and not six.PY3:
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
@ -251,15 +250,15 @@ def get_location(conn, vm_):
)
for img in locations:
if isinstance(img.id, string_types) and not six.PY3:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, string_types) and not six.PY3:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_location and vm_location in (img_id, img_name):
return img
@ -282,15 +281,15 @@ def get_image(conn, vm_):
vm_image = vm_image.encode('ascii', 'salt-cloud-force-ascii')
for img in images:
if isinstance(img.id, string_types) and not six.PY3:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, string_types) and not six.PY3:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_image and vm_image in (img_id, img_name):
return img
@ -310,7 +309,7 @@ def get_size(conn, vm_):
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)):
if vm_size and str(vm_size) in (str(size.id), str(size.name)): # pylint: disable=blacklisted-function
return size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
@ -358,7 +357,7 @@ def destroy(name, conn=None, call=None):
node = get_node(conn, name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.error('Unable to find the VM %s', name)
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
@ -368,7 +367,7 @@ def destroy(name, conn=None, call=None):
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
log.info('Clearing Salt Mine: %s', name)
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
@ -378,11 +377,11 @@ def destroy(name, conn=None, call=None):
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(name, flush_mine_on_destroy))
log.info('Destroying VM: {0}'.format(name))
log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: {0}'.format(name))
log.info('Destroyed VM: %s', name)
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
@ -406,7 +405,7 @@ def destroy(name, conn=None, call=None):
return True
log.error('Failed to Destroy VM: {0}'.format(name))
log.error('Failed to Destroy VM: %s', name)
return False
@ -419,11 +418,11 @@ def reboot(name, conn=None):
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.info('Rebooting VM: {0}'.format(name))
log.error('Unable to find the VM %s', name)
log.info('Rebooting VM: %s', name)
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: {0}'.format(name))
log.info('Rebooted VM: %s', name)
# Fire reboot action
__utils__['cloud.fire_event'](
'event',
@ -435,7 +434,7 @@ def reboot(name, conn=None):
)
return True
log.error('Failed to reboot VM: {0}'.format(name))
log.error('Failed to reboot VM: %s', name)
return False
@ -524,9 +523,5 @@ def conn_has_method(conn, method_name):
if method_name in dir(conn):
return True
log.error(
'Method \'{0}\' not yet supported!'.format(
method_name
)
)
log.error('Method \'%s\' not yet supported!', method_name)
return False

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
from salt.utils.schema import (Schema,

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.schema import (Schema,

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.schema import (Schema,

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.schema import (DefinitionsSchema,

View File

@ -9,7 +9,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from salt.utils.schema import (DefinitionsSchema,
ComplexSchemaItem,

View File

@ -9,7 +9,7 @@
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
from salt.utils.schema import (Schema,

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.schema import (Schema,

View File

@ -10,7 +10,7 @@
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.utils.schema import (Schema,

View File

@ -3,7 +3,7 @@
The daemons package is used to store implementations of the Salt Master and
Minion enabling different transports.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import Python Libs
import sys
from collections import namedtuple, Iterable, Sequence, Mapping

View File

@ -19,7 +19,7 @@ opts['caller_floscript']
'''
# Import Python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import modules

View File

@ -6,7 +6,7 @@ The core behaviors used by minion and master
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import time
import random
@ -399,7 +399,7 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
ha=mha,
kind=kinds.applKinds.master))
except gaierror as ex:
log.warning("Unable to connect to master {0}: {1}".format(mha, ex))
log.warning("Unable to connect to master %s: %s", mha, ex)
if self.opts.value.get(u'master_type') not in (u'failover', u'distributed'):
raise ex
if not stack.remotes:
@ -664,10 +664,8 @@ class SaltLoadModules(ioflo.base.deeding.Deed):
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.value.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of {0}'.format(
self.opts.value['modules_max_memory'])
)
log.debug('modules_max_memory set, enforcing a maximum of %s',
self.opts.value['modules_max_memory'])
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
@ -1106,22 +1104,22 @@ class SaltRaetRouterMaster(SaltRaetRouter):
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
log.error('Received invalid message: %s', msg)
return
if s_estate is None: # drop
return
log.debug("**** Road Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg= {3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
log.debug(
'**** Road Router rxMsg **** id=%s estate=%s yard=%s\nmsg=%s',
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg
)
if d_estate is not None and d_estate != self.road_stack.value.local.name:
log.error(
'Road Router Received message for wrong estate: {0}'.format(d_estate))
log.error('Road Router Received message for wrong estate: %s', d_estate)
return
if d_yard is not None:
@ -1132,15 +1130,15 @@ class SaltRaetRouterMaster(SaltRaetRouter):
return
if d_share is None:
# No queue destination!
log.error('Received message without share: {0}'.format(msg))
log.error('Received message without share: %s', msg)
return
elif d_share == 'event_fire': # rebroadcast events from other masters
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
#log.debug("\n**** Event Fire \n %s\n", msg)
return
elif d_share == 'local_cmd':
# Refuse local commands over the wire
log.error('Received local command remotely! Ignoring: {0}'.format(msg))
log.error('Received local command remotely! Ignoring: %s', msg)
return
elif d_share == 'remote_cmd':
# Send it to a remote worker
@ -1162,7 +1160,7 @@ class SaltRaetRouterMaster(SaltRaetRouter):
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
log.error('Lane Router Received invalid message: %s', msg)
return
if s_yard is None:
@ -1172,12 +1170,13 @@ class SaltRaetRouterMaster(SaltRaetRouter):
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug("**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg={3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
log.debug(
'**** Lane Router rxMsg **** id=%s estate=%s yard=%s\nmsg=%s',
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg
)
if d_estate is None:
pass
@ -1204,23 +1203,23 @@ class SaltRaetRouterMaster(SaltRaetRouter):
return
if d_share is None:
# No queue destination!
log.error('Lane Router Received message without share: {0}'.format(msg))
log.error('Lane Router Received message without share: %s', msg)
return
elif d_share == 'local_cmd':
self.lane_stack.value.transmit(msg,
self.lane_stack.value.fetchUidByName(next(self.workers.value)))
elif d_share == 'event_req':
self.event_req.value.append(msg)
#log.debug("\n**** Event Subscribe \n {0}\n".format(msg))
#log.debug("\n**** Event Subscribe \n %s\n", msg)
elif d_share == 'event_fire':
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
#log.debug("\n**** Event Fire \n %s\n", msg)
elif d_share == 'presence_req':
self.presence_req.value.append(msg)
#log.debug("\n**** Presence Request \n {0}\n".format(msg))
#log.debug("\n**** Presence Request \n %s\n", msg)
elif d_share == 'stats_req':
self.stats_req.value.append(msg)
#log.debug("\n**** Stats Request \n {0}\n".format(msg))
#log.debug("\n**** Stats Request \n %s\n", msg)
class SaltRaetRouterMinion(SaltRaetRouter):
@ -1241,22 +1240,22 @@ class SaltRaetRouterMinion(SaltRaetRouter):
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
log.error('Received invalid message: %s', msg)
return
if s_estate is None: # drop
return
log.debug("**** Road Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg= {3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
log.debug(
'**** Road Router rxMsg **** id=%s estate=%s yard=%s\nmsg=%s',
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg
)
if d_estate is not None and d_estate != self.road_stack.value.local.name:
log.error(
'Road Router Received message for wrong estate: {0}'.format(d_estate))
log.error('Road Router Received message for wrong estate: %s', d_estate)
return
if d_yard is not None:
@ -1268,7 +1267,7 @@ class SaltRaetRouterMinion(SaltRaetRouter):
return
if d_share is None:
# No queue destination!
log.error('Received message without share: {0}'.format(msg))
log.error('Received message without share: %s', msg)
return
elif d_share == 'fun':
@ -1276,7 +1275,7 @@ class SaltRaetRouterMinion(SaltRaetRouter):
self.fun.value.append(msg)
elif d_share == 'stats_req':
self.stats_req.value.append(msg)
#log.debug("\n**** Stats Request \n {0}\n".format(msg))
#log.debug("\n**** Stats Request \n %s\n", msg)
def _process_lane_rxmsg(self, msg, sender):
'''
@ -1290,7 +1289,7 @@ class SaltRaetRouterMinion(SaltRaetRouter):
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
log.error('Lane Router Received invalid message: %s', msg)
return
if s_yard is None:
@ -1300,12 +1299,13 @@ class SaltRaetRouterMinion(SaltRaetRouter):
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug("**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg={3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
log.debug(
'**** Lane Router rxMsg **** id=%s estate=%s yard=%s\nmsg=%s',
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg
)
if d_estate is None:
pass
@ -1327,31 +1327,31 @@ class SaltRaetRouterMinion(SaltRaetRouter):
return
if d_share is None:
# No queue destination!
log.error('Lane Router Received message without share: {0}'.format(msg))
log.error('Lane Router Received message without share: %s', msg)
return
elif d_share == 'event_req':
self.event_req.value.append(msg)
#log.debug("\n**** Event Subscribe \n {0}\n".format(msg))
#log.debug("\n**** Event Subscribe \n %s\n", msg)
elif d_share == 'event_fire':
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
#log.debug("\n**** Event Fire \n %s\n", msg)
elif d_share == 'remote_cmd': # assume minion to master or salt-call
if not self.road_stack.value.remotes:
log.error("**** Lane Router: Missing joined master. Unable to route "
"remote_cmd. Requeuing".format())
"remote_cmd. Requeuing")
self.laters.value.append((msg, sender))
return
d_estate = self._get_master_estate_name(clustered=self.opts.get('cluster_mode', False))
if not d_estate:
log.error("**** Lane Router: No available destination estate for 'remote_cmd'."
"Unable to route. Requeuing".format())
"Unable to route. Requeuing")
self.laters.value.append((msg, sender))
return
msg['route']['dst'] = (d_estate, d_yard, d_share)
log.debug("**** Lane Router: Missing destination estate for 'remote_cmd'. "
"Using default route={0}.".format(msg['route']['dst']))
"Using default route=%s.", msg['route']['dst'])
self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid)
@ -1527,7 +1527,7 @@ class SaltRaetPresenter(ioflo.base.deeding.Deed):
minions = states[state].value
except KeyError:
# error: wrong/unknown state requested
log.error('Lane Router Received invalid message: {0}'.format(msg))
log.error('Lane Router Received invalid message: %s', msg)
return
result = odict()
@ -1581,7 +1581,7 @@ class SaltRaetStatsEventer(ioflo.base.deeding.Deed):
elif tag == tagify('lane', 'stats'):
return self.lane_stack.value.stats
else:
log.error('Missing or invalid tag: {0}'.format(tag))
log.error('Missing or invalid tag: %s', tag)
return None
def action(self):

View File

@ -15,7 +15,7 @@ without the need for a swarm of real minions.
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
@ -39,7 +39,7 @@ class SaltDummyPublisher(ioflo.base.deeding.Deed):
def action(self):
while self.publish.value:
pub = self.publish.value.popleft()
log.debug('Dummy publisher publishing: {0}'.format(pub))
log.debug('Dummy publisher publishing: %s', pub)
msg = self._fill_tmpl(pub)
self.lane_stack.value.transmit(msg, self.lane_stack.value.fetchUidByName(next(self.workers.value)))
@ -64,5 +64,5 @@ class SaltDummyPublisher(ioflo.base.deeding.Deed):
}
}
log.debug('Dummy publisher faking return with: {0}'.format(msg))
log.debug('Dummy publisher faking return with: %s', msg)
return msg

View File

@ -6,7 +6,7 @@ Jobber Behaviors
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import types
@ -238,13 +238,15 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
continue
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data))
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
if is_windows():
# SaltRaetNixJobber is not picklable. Pickling is necessary
@ -255,9 +257,7 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
try:
self.proc_run(msg)
except Exception as exc:
log.error(
'Exception caught by jobber: {0}'.format(exc),
exc_info=True)
log.error('Exception caught by jobber: %s', exc, exc_info=True)
else:
process = multiprocessing.Process(
target=self.proc_run,
@ -302,11 +302,13 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
raise SaltInvocationError(
'Wrong executors specification: {0}. String or '
'non-empty list expected'.format(executors)
)
if self.opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo.get' # replace
log.trace("Executors list {0}".format(executors))
log.trace("Executors list %s", executors)
for name in executors:
if name not in self.module_executors.value:
@ -326,7 +328,7 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
iret = []
iret.append(single)
tag = tagify(
[data['jid'], 'prog', self.opts['id'], str(ind)],
[data['jid'], 'prog', self.opts['id'], six.text_type(ind)],
'job')
event_data = {'return': single}
self._fire_master(event_data, tag) # Need to look into this
@ -347,19 +349,15 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
ret['return'] = '{0}: {1}'.format(msg, exc)
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
@ -389,12 +387,7 @@ class SaltRaetNixJobber(ioflo.base.deeding.Deed):
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error('The return failed for job %s %s', data['jid'], exc)
console.concise("Closing Jobber Stack {0}\n".format(stack.name))
stack.server.close()
salt.transport.jobber_stack = None

View File

@ -3,7 +3,7 @@
Define the behaviors used in the maintenance process
'''
# pylint: disable=3rd-party-module-not-gated
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import multiprocessing
import os

View File

@ -3,7 +3,7 @@
Start the reactor!
'''
# pylint: disable=3rd-party-module-not-gated
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.utils.reactor
import salt.utils.event

View File

@ -5,7 +5,7 @@ The core behaviors used by minion and master
# pylint: disable=W0232
# pylint: disable=3rd-party-module-not-gated
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import time
@ -210,10 +210,10 @@ class SaltRaetWorkerRouter(ioflo.base.deeding.Deed):
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
log.error('Received invalid message: %s', msg)
return
log.debug("**** Worker Router rxMsg\n msg= {0}\n".format(msg))
log.debug("**** Worker Router rxMsg\nmsg=%s", msg)
if 'load' in msg:
cmd = msg['load'].get('cmd')

View File

@ -6,7 +6,7 @@ IoFlo behaviors for running a ZeroMQ based master
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import os
import logging
import hashlib
@ -86,7 +86,7 @@ class ZmqRet(multiprocessing.Process):
'''
self.context = zmq.Context(self.opts['worker_threads'])
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
log.info('ZMQ Ret port binding to {0}'.format(self.uri))
log.info('ZMQ Ret port binding to %s', self.uri)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
@ -186,7 +186,7 @@ class SaltZmqPublisher(ioflo.base.deeding.Deed):
self.pub_sock.setsockopt(zmq.IPV4ONLY, 0)
self.pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
self.pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts.value)
log.info('Starting the Salt ZeroMQ Publisher on {0}'.format(self.pub_uri))
log.info('Starting the Salt ZeroMQ Publisher on %s', self.pub_uri)
self.pub_sock.bind(self.pub_uri)
self.created = True
# Don't pop the publish messages! The raet behavior still needs them

View File

@ -3,7 +3,7 @@
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import fnmatch
@ -99,13 +99,13 @@ def clean_fsbackend(opts):
'envs.p'
)
if os.path.isfile(env_cache):
log.debug('Clearing {0}fs env cache'.format(backend))
log.debug('Clearing %sfs env cache', backend)
try:
os.remove(env_cache)
except OSError as exc:
log.critical(
'Unable to clear env cache file {0}: {1}'
.format(env_cache, exc)
'Unable to clear env cache file %s: %s',
env_cache, exc
)
file_lists_dir = os.path.join(
@ -123,8 +123,8 @@ def clean_fsbackend(opts):
os.remove(cache_file)
except OSError as exc:
log.critical(
'Unable to file_lists cache file {0}: {1}'
.format(cache_file, exc)
'Unable to file_lists cache file %s: %s',
cache_file, exc
)
@ -194,7 +194,7 @@ def mk_key(opts, user):
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
log.debug('Removing stale keyfile: %s', keyfile)
if salt.utils.platform.is_windows() and not os.access(keyfile, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(keyfile, stat.S_IRUSR | stat.S_IWUSR)
@ -203,7 +203,7 @@ def mk_key(opts, user):
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.files.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
fp_.write(salt.utils.stringutils.to_str(key))
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
@ -252,8 +252,8 @@ def access_keys(opts):
def fileserver_update(fileserver):
'''
Update the fileserver backends, requires that a built fileserver object
be passed in
Update the fileserver backends, requires that a salt.fileserver.Fileserver
object be passed in
'''
try:
if not fileserver.servers:
@ -265,7 +265,7 @@ def fileserver_update(fileserver):
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc),
'Exception %s occurred in file server update', exc,
exc_info_on_loglevel=logging.DEBUG
)
@ -311,8 +311,7 @@ class AutoKey(object):
return False
if not self.check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warning(message.format(signing_file))
log.warning('Wrong permissions for %s, ignoring content', signing_file)
return False
with salt.utils.files.fopen(signing_file, 'r') as fp_:
@ -340,7 +339,7 @@ class AutoKey(object):
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if mtime < min_time:
log.warning('Autosign keyid expired {0}'.format(stub_file))
log.warning('Autosign keyid expired %s', stub_file)
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
@ -353,24 +352,26 @@ class AutoKey(object):
'''
Check for matching grains in the autosign_grains_dir.
'''
if not autosign_grains or u'autosign_grains_dir' not in self.opts:
if not autosign_grains or 'autosign_grains_dir' not in self.opts:
return False
autosign_grains_dir = self.opts[u'autosign_grains_dir']
autosign_grains_dir = self.opts['autosign_grains_dir']
for root, dirs, filenames in os.walk(autosign_grains_dir):
for grain in filenames:
if grain in autosign_grains:
grain_file = os.path.join(autosign_grains_dir, grain)
if not self.check_permissions(grain_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warning(message.format(grain_file))
log.warning(
'Wrong permissions for %s, ignoring content',
grain_file
)
continue
with salt.utils.files.fopen(grain_file, u'r') as f:
with salt.utils.files.fopen(grain_file, 'r') as f:
for line in f:
line = line.strip()
if line.startswith(u'#'):
if line.startswith('#'):
continue
if autosign_grains[grain] == line:
return True
@ -535,10 +536,8 @@ class RemoteFuncs(object):
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
'Top function %s failed with error %s for minion %s',
fun, exc, load['id']
)
return ret
@ -566,11 +565,11 @@ class RemoteFuncs(object):
expr_form = load.get('expr_form')
if expr_form is not None and 'tgt_type' not in load:
salt.utils.versions.warn_until(
u'Neon',
u'_mine_get: minion {0} uses pre-Nitrogen API key '
u'"expr_form". Accepting for backwards compatibility '
u'but this is not guaranteed '
u'after the Neon release'.format(load['id'])
'Neon',
'_mine_get: minion {0} uses pre-Nitrogen API key '
'"expr_form". Accepting for backwards compatibility '
'but this is not guaranteed '
'after the Neon release'.format(load['id'])
)
match_type = expr_form
else:
@ -664,9 +663,8 @@ class RemoteFuncs(object):
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
'Exceeding file_recv_max_size limit: %s',
file_recv_max_size
)
return False
# Normalize Windows paths
@ -704,7 +702,7 @@ class RemoteFuncs(object):
if any(key not in load for key in ('id', 'grains')):
return False
# pillar = salt.pillar.Pillar(
log.debug('Master _pillar using ext: {0}'.format(load.get('ext')))
log.debug('Master _pillar using ext: %s', load.get('ext'))
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
@ -762,7 +760,7 @@ class RemoteFuncs(object):
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
log.info('Got return from %s for job %s', load['id'], load['jid'])
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
@ -822,11 +820,7 @@ class RemoteFuncs(object):
if not good:
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
# Prepare the runner object
opts = {}
@ -912,7 +906,7 @@ class RemoteFuncs(object):
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(ret['jid']))
jid_fn = os.path.join(auth_cache, six.text_type(ret['jid']))
with salt.utils.files.fopen(jid_fn, 'w+') as fp_:
fp_.write(load['id'])
return ret
@ -1077,11 +1071,10 @@ class LocalFuncs(object):
load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
log.exception('Exception occurred while introspecting %s')
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': str(exc)}}
'message': six.text_type(exc)}}
def wheel(self, load):
'''
@ -1136,8 +1129,7 @@ class LocalFuncs(object):
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
log.exception('Exception occurred while introspecting %s', fun)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
@ -1179,12 +1171,9 @@ class LocalFuncs(object):
if publisher_acl.user_is_blacklisted(load['user']) or \
publisher_acl.cmd_is_blacklisted(load['fun']):
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=load['user'],
function=load['fun']
)
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in error.',
load['user'], load['fun']
)
return ''
@ -1237,7 +1226,7 @@ class LocalFuncs(object):
if auth_type == 'token':
username = auth_check.get('username')
load['user'] = username
log.debug('Minion tokenized user = "{0}"'.format(username))
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
load['user'] = self.loadauth.load_name(extra)
@ -1284,13 +1273,12 @@ class LocalFuncs(object):
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
'The specified returner threw a stack trace:',
exc_info=True
)
@ -1301,13 +1289,12 @@ class LocalFuncs(object):
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
'The specified returner threw a stack trace:',
exc_info=True
)
# Altering the contents of the publish load is serious!! Changes here
@ -1347,18 +1334,16 @@ class LocalFuncs(object):
if 'user' in load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**load
)
'User %s Published command %s with jid %s',
load['user'], load['fun'], load['jid']
)
pub_load['user'] = load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**load
)
'Published command %s with jid %s',
load['fun'], load['jid']
)
log.debug('Published command details {0}'.format(pub_load))
log.debug('Published command details %s', pub_load)
return {'ret': {
'jid': load['jid'],

View File

@ -3,7 +3,7 @@
salting.py module of salt specific interfaces to raet
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# pylint: skip-file
# pylint: disable=W0611

View File

@ -9,14 +9,14 @@ from salt.daemons import test
test.run()
'''
# pylint: skip-file
# pylint: disable=C0103,3rd-party-module-not-gated
from __future__ import absolute_import, print_function, unicode_literals
import sys
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# pylint: enable=blacklisted-import
import os
@ -24,6 +24,7 @@ from ioflo.base.consoling import getConsole
console = getConsole()
console.reinit(verbosity=console.Wordage.concise)
def run(start=None):
'''
Run unittests starting at directory given by start
@ -37,7 +38,7 @@ def run(start=None):
console.terse("\nRunning all salt.daemons unit tests in '{0}', starting at '{1}'\n".format(top, start))
loader = unittest.TestLoader()
suite = loader.discover(start, 'test_*.py', top )
suite = loader.discover(start, 'test_*.py', top)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
from . import actors
__all__ = ['actors']

View File

@ -2,9 +2,7 @@
'''
Test behaviors used by test plans
'''
# pylint: skip-file
# pylint: disable=C0103
from __future__ import absolute_import, print_function, unicode_literals
import os
import stat
import time
@ -28,7 +26,7 @@ from salt.daemons import salting
from salt.utils.event import tagify
class DeedTestWrapper():
class DeedTestWrapper(object):
def assertTrue(self, condition):
if not condition:
self.failure.value = 'Fail'
@ -94,7 +92,7 @@ class TestOptsSetup(ioflo.base.deeding.Deed):
raet_port=self.raet_port,
transport='raet',
client_acl=dict(),
publisher_acl = dict(),
publisher_acl=dict(),
pki_dir=pkiDirpath,
sock_dir=sockDirpath,
cachedir=cacheDirpath,
@ -243,9 +241,9 @@ class TestPresenceAvailable(ioflo.base.deeding.Deed):
'ival': set()}}
def action(self):
"""
'''
Test Presenter 'available' request (A1, B*)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -306,9 +304,9 @@ class TestPresenceJoined(ioflo.base.deeding.Deed):
'ival': odict()}}
def action(self):
"""
'''
Test Presenter 'joined' request (A2)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -350,9 +348,9 @@ class TestPresenceAllowed(ioflo.base.deeding.Deed):
'ival': odict()}}
def action(self):
"""
'''
Test Presenter 'allowed' request (A3)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -394,9 +392,9 @@ class TestPresenceAlived(ioflo.base.deeding.Deed):
'ival': odict()}}
def action(self):
"""
'''
Test Presenter 'alived' request (A4)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -438,9 +436,9 @@ class TestPresenceReaped(ioflo.base.deeding.Deed):
'ival': odict()}}
def action(self):
"""
'''
Test Presenter 'reaped' request (A5)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -479,14 +477,11 @@ class TestPresenceNoRequest(ioflo.base.deeding.Deed):
Ioinits = {}
def action(self):
"""
'''
Test Presenter with no requests (C1)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
pass # do nothing
class TestPresenceNoRequestCheck(ioflo.base.deeding.Deed, DeedTestWrapper):
Ioinits = {'event_stack': '.salt.test.lane.stack',
@ -505,9 +500,9 @@ class TestPresenceUnknownSrc(ioflo.base.deeding.Deed, DeedTestWrapper):
'failure': '.meta.failure'}
def action(self):
"""
'''
Test Presenter handles request from unknown (disconnected) source (C2)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -537,9 +532,9 @@ class TestPresenceAvailableNoMinions(ioflo.base.deeding.Deed):
'event_stack': '.salt.test.lane.stack'}
def action(self):
"""
'''
Test Presenter 'available' request with no minions in the state (D1)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -580,9 +575,9 @@ class TestPresenceAvailableOneMinion(ioflo.base.deeding.Deed):
'ival': set()}}
def action(self):
"""
'''
Test Presenter 'available' request with one minions in the state (D2)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -626,9 +621,9 @@ class TestPresenceAvailableUnknownIp(ioflo.base.deeding.Deed):
'ival': set()}}
def action(self):
"""
'''
Test Presenter 'available' request with one minions in the state (D3)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -673,9 +668,9 @@ class TestPresenceAllowedNoMinions(ioflo.base.deeding.Deed):
'event_stack': '.salt.test.lane.stack'}
def action(self):
"""
'''
Test Presenter 'allowed' request with no minions in the state (D4)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -713,9 +708,9 @@ class TestPresenceAllowedOneMinion(ioflo.base.deeding.Deed):
'ival': odict()}}
def action(self):
"""
'''
Test Presenter 'allowed' request with one minion in the state (D5)
"""
'''
console.terse("{0}\n".format(self.action.__doc__))
# Prepare
@ -745,7 +740,7 @@ class TestPresenceAllowedOneMinionCheck(ioflo.base.deeding.Deed, DeedTestWrapper
self.assertTrue(msg == {'route': {'src': [None, 'manor', None],
'dst': [None, None, 'event_fire']},
'tag': tag,
'data': {'allowed': {'alpha':'1.1.1.1'}}})
'data': {'allowed': {'alpha': '1.1.1.1'}}})
class StatsMasterTestSetup(ioflo.base.deeding.Deed):

View File

@ -4,10 +4,7 @@
Runs minion floscript
'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# pylint: skip-file
import os
import stat

View File

@ -3,15 +3,11 @@
'''
Runs minion floscript
'''
from __future__ import absolute_import
from __future__ import print_function
# pylint: skip-file
from __future__ import absolute_import, print_function, unicode_literals
import os
import stat
from ioflo.aid.odicting import odict
from ioflo.base.consoling import getConsole
console = getConsole()
@ -21,6 +17,7 @@ FLO_DIR_PATH = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'flo'
)
def test():
""" Execute run.start """
@ -48,37 +45,35 @@ def test():
mode = os.stat(localFilepath).st_mode
print(mode)
cacheDirpath = os.path.join('/tmp/raet', 'cache', 'minion')
if not os.path.exists(cacheDirpath):
os.makedirs(cacheDirpath)
sockDirpath = os.path.join('/tmp/raet', 'sock', 'minion')
if not os.path.exists(sockDirpath):
os.makedirs(sockDirpath)
os.makedirs(sockDirpath)
#filepath = os.path.join(FLO_DIR_PATH, 'minion.flo')
filepath = 'minion.flo'
opts = dict(
id="minion",
__role='minion',
ioflo_period=0.1,
ioflo_realtime=True,
minion_floscript=filepath,
ioflo_verbose=2,
interface="",
raet_port=7531,
master_port=7530,
master='127.0.0.1',
transport='raet',
client_acl=dict(),
publisher_acl=dict(),
pki_dir=pkiDirpath,
sock_dir=sockDirpath,
cachedir=cacheDirpath,
open_mode=True,
auto_accept=True)
id="minion",
__role='minion',
ioflo_period=0.1,
ioflo_realtime=True,
minion_floscript=filepath,
ioflo_verbose=2,
interface="",
raet_port=7531,
master_port=7530,
master='127.0.0.1',
transport='raet',
client_acl=dict(),
publisher_acl=dict(),
pki_dir=pkiDirpath,
sock_dir=sockDirpath,
cachedir=cacheDirpath,
open_mode=True,
auto_accept=True)
minion = salt.daemons.flo.IofloMinion(opts=opts)
minion.start(behaviors=['raet.flo.behaving'])

View File

@ -3,41 +3,33 @@
Tests of utilities that support multiple masters in Salt Raet
'''
from __future__ import absolute_import
# pylint: skip-file
# pylint: disable=C0103
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# pylint: enable=blacklisted-import
import os
import stat
import time
import tempfile
import shutil
from ioflo.aid.odicting import odict
from ioflo.aid.timing import Timer, StoreTimer
from ioflo.aid.timing import StoreTimer
from ioflo.base import storing
from ioflo.base.consoling import getConsole
console = getConsole()
from raet import raeting
from salt.daemons import parse_hostname, extract_masters
from salt.daemons import parse_hostname, extract_masters
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
""""""
class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
def setUp(self):
self.store = storing.Store(stamp=0.0)
@ -48,8 +40,6 @@ class BasicTestCase(unittest.TestCase):
def tearDown(self):
pass
def testParseHostname(self):
'''
Test parsing hostname provided according to syntax for opts['master']
@ -88,7 +78,6 @@ class BasicTestCase(unittest.TestCase):
self.assertEquals(parse_hostname(' fe80::1%lo0 ', self.port),
('fe80::1%lo0', 4506))
self.assertEquals(parse_hostname('localhost 4510', self.port),
('localhost', 4510))
self.assertEquals(parse_hostname('127.0.0.1 4510', self.port),
@ -105,7 +94,6 @@ class BasicTestCase(unittest.TestCase):
self.assertEquals(parse_hostname('fe80::1%lo0 4510', self.port),
('fe80::1%lo0', 4510))
self.assertEquals(parse_hostname(' localhost 4510 ', self.port),
('localhost', 4510))
self.assertEquals(parse_hostname(' 127.0.0.1 4510 ', self.port),
@ -122,7 +110,6 @@ class BasicTestCase(unittest.TestCase):
self.assertEquals(parse_hostname(' fe80::1%lo0 4510 ', self.port),
('fe80::1%lo0', 4510))
self.assertEquals(parse_hostname('localhost abcde', self.port), None)
self.assertEquals(parse_hostname('127.0.0.1 a4510', self.port), None)
self.assertEquals(parse_hostname(list([1, 2, 3]), self.port), None)
@ -150,7 +137,6 @@ class BasicTestCase(unittest.TestCase):
self.assertEquals(parse_hostname('localhost::4510', self.port),
('localhost::4510', 4506))
def testExtractMastersSingle(self):
'''
Test extracting from master provided according to syntax for opts['master']
@ -189,7 +175,6 @@ class BasicTestCase(unittest.TestCase):
internal=None),
])
master = '10.0.2.23'
self.opts.update(master=master)
self.assertEquals(extract_masters(self.opts),
@ -240,8 +225,7 @@ class BasicTestCase(unittest.TestCase):
master = dict(internal='10.0.2.23 4510')
self.opts.update(master=master)
self.assertEquals(extract_masters(self.opts),[])
self.assertEquals(extract_masters(self.opts), [])
def testExtractMastersMultiple(self):
'''
@ -332,22 +316,24 @@ def runOne(test):
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
'''
Unittest runner
'''
tests = []
tests = []
names = [
'testParseHostname',
'testExtractMastersSingle',
'testExtractMastersMultiple',
]
'testParseHostname',
'testExtractMastersSingle',
'testExtractMastersMultiple',
]
tests.extend(list(list(map(BasicTestCase, names))))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
'''
Unittest runner
@ -357,12 +343,13 @@ def runAll():
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() #run all unittests
#runAll() # run all unittests
runSome()#only run some
runSome() # only run some
#runOne('testParseHostname')

View File

@ -9,7 +9,7 @@ Runs all the example FloScripts
# pylint: disable=3rd-party-module-not-gated
# Import Python Libs
from __future__ import absolute_import, print_function
from __future__ import absolute_import, print_function, unicode_literals
import os
# Import 3rd-party libs

View File

@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
"""
'''
Raet Ioflo Behavior Unittests
"""
# pylint: skip-file
# pylint: disable=C0103
'''
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# pylint: enable=blacklisted-import
from ioflo.base.consoling import getConsole
console = getConsole()
@ -21,37 +22,31 @@ from raet.stacking import Stack
from salt.utils.event import tagify
# Import Ioflo Deeds
from salt.daemons.flo import core
from salt.daemons.test.plan import actors
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class PresenterTestCase(testing.FrameIofloTestCase):
"""
'''
Test case for Salt Raet Presenter deed
"""
'''
def setUp(self):
"""
'''
Call super if override so House Framer and Frame are setup correctly
"""
'''
super(PresenterTestCase, self).setUp()
def tearDown(self):
"""
'''
Call super if override so House Framer and Frame are torn down correctly
"""
'''
super(PresenterTestCase, self).tearDown()
def addPresenceInfo(self, stateGrp, name, ip, port):
self.assertIn(stateGrp, ('alloweds', 'aliveds', 'reapeds'))
group = self.store.fetch('.salt.var.presence.{0}'.format(stateGrp))
@ -61,19 +56,17 @@ class PresenterTestCase(testing.FrameIofloTestCase):
remote.ha = (ip, port)
group.value[name] = remote
def addAvailable(self, name):
availables = self.store.fetch('.salt.var.presence.availables')
if availables.value is None:
availables.value = set()
availables.value.add(name)
def testContextSetup(self):
"""
'''
Test the context setup procedure used in all the consequence tests works as expected
This test intended to avoid some checks in other tests
"""
'''
console.terse("{0}\n".format(self.testContextSetup.__doc__))
act = self.addEnterDeed("TestOptsSetupMaster")
@ -118,11 +111,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAvailable(self):
"""
'''
Test Presenter 'available' request (A1, B*)
"""
'''
console.terse("{0}\n".format(self.testPresenceAvailable.__doc__))
# Bootstrap
@ -187,11 +179,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceJoined(self):
"""
'''
Test Presenter 'joined' request (A2)
"""
'''
console.terse("{0}\n".format(self.testPresenceJoined.__doc__))
# Bootstrap
@ -238,11 +229,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAllowed(self):
"""
'''
Test Presenter 'allowed' request (A3)
"""
'''
console.terse("{0}\n".format(self.testPresenceAllowed.__doc__))
# Bootstrap
@ -288,11 +278,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAlived(self):
"""
'''
Test Presenter 'alived' request (A4)
"""
'''
console.terse("{0}\n".format(self.testPresenceAlived.__doc__))
# Bootstrap
@ -338,11 +327,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceReaped(self):
"""
'''
Test Presenter 'reaped' request (A5)
"""
'''
console.terse("{0}\n".format(self.testPresenceReaped.__doc__))
# Bootstrap
@ -388,11 +376,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceNoRequest(self):
"""
'''
Test Presenter with no requests (C1)
"""
'''
console.terse("{0}\n".format(self.testPresenceNoRequest.__doc__))
# Bootstrap
@ -418,11 +405,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceUnknownSrc(self):
"""
'''
Test Presenter handles request from unknown (disconnected) source (C2)
"""
'''
console.terse("{0}\n".format(self.testPresenceUnknownSrc.__doc__))
# Bootstrap
@ -458,11 +444,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAvailableNoMinions(self):
"""
'''
Test Presenter 'available' request with no minions in the state (D1)
"""
'''
console.terse("{0}\n".format(self.testPresenceAvailableNoMinions.__doc__))
# Bootstrap
@ -503,11 +488,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAvailableOneMinion(self):
"""
'''
Test Presenter 'available' request with one minion in the state (D2)
"""
'''
console.terse("{0}\n".format(self.testPresenceAvailableOneMinion.__doc__))
# Bootstrap
@ -551,11 +535,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAvailableSomeIpUnknown(self):
"""
'''
Test Presenter 'available' request with some minion addresses aren't known (D3)
"""
'''
console.terse("{0}\n".format(self.testPresenceAvailableSomeIpUnknown.__doc__))
# Bootstrap
@ -604,11 +587,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAllowedNoMinions(self):
"""
'''
Test Presenter 'allowed' request with no minions in the state (D4)
"""
'''
console.terse("{0}\n".format(self.testPresenceAllowedNoMinions.__doc__))
# Bootstrap
@ -650,11 +632,10 @@ class PresenterTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def testPresenceAllowedOneMinion(self):
"""
'''
Test Presenter 'allowed' request with one minion in the state (D5)
"""
'''
console.terse("{0}\n".format(self.testPresenceAllowedOneMinion.__doc__))
# Bootstrap
@ -709,7 +690,9 @@ def runOne(test):
def runSome():
""" Unittest runner """
'''
Unittest runner
'''
tests = []
names = [
'testContextSetup',
@ -725,14 +708,16 @@ def runSome():
'testPresenceAvailableSomeIpUnknown',
'testPresenceAllowedNoMinions',
'testPresenceAllowedOneMinion',
]
tests.extend(map(PresenterTestCase, names))
]
tests.extend(list(map(PresenterTestCase, names)))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
""" Unittest runner """
'''
Unittest runner
'''
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(PresenterTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)

View File

@ -3,7 +3,7 @@
Tests to try out salt key.RaetKey Potentially ephemeral
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# pylint: skip-file
# pylint: disable=C0103
import sys

View File

@ -1,19 +1,16 @@
# -*- coding: utf-8 -*-
'''
Tests to try out salt key.RaetKey Potentially ephemeral
'''
from __future__ import print_function
from __future__ import absolute_import
# pylint: skip-file
# pylint: disable=C0103
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# pylint: enable=blacklisted-import
import os
import stat
@ -22,27 +19,27 @@ import tempfile
import shutil
from ioflo.aid.odicting import odict
from ioflo.aid.timing import Timer, StoreTimer
from ioflo.aid.timing import StoreTimer
from ioflo.base import storing
from ioflo.base.consoling import getConsole
console = getConsole()
from raet import raeting, nacling
from raet.road import estating, keeping, stacking
from raet.road import estating, stacking
from salt.key import RaetKey
from salt.daemons import salting
from salt import daemons
import salt.utils.kinds as kinds
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
def tearDownModule():
pass
class BasicTestCase(unittest.TestCase):
""""""
class BasicTestCase(unittest.TestCase): # pylint: disable=moved-test-case-class
def setUp(self):
self.store = storing.Store(stamp=0.0)
@ -68,7 +65,7 @@ class BasicTestCase(unittest.TestCase):
'''
pkiDirpath = os.path.join(dirpath, 'pki', role, 'raet')
if not os.path.exists(pkiDirpath):
os.makedirs(pkiDirpath)
os.makedirs(pkiDirpath)
acceptedDirpath = os.path.join(pkiDirpath, 'accepted')
if not os.path.exists(acceptedDirpath):
@ -103,7 +100,7 @@ class BasicTestCase(unittest.TestCase):
)
return opts
def createRoadData(self, role, kind=kinds.APPL_KIND_NAMES[kinds.applKinds.master], cachedirpath=''):
def createRoadData(self, role, kind=kinds.APPL_KIND_NAMES[kinds.applKinds.master], cachedirpath=''):
'''
Creates odict and populates with data to setup road stack
{
@ -116,9 +113,9 @@ class BasicTestCase(unittest.TestCase):
}
'''
data = odict()
data['name'] = "{0}_{1}".format(role, kind )
data['name'] = "{0}_{1}".format(role, kind)
data['role'] = role
data['kind'] = kinds.APPL_KINDS[kind] # convert to integer from kind name
data['kind'] = kinds.APPL_KINDS[kind] # convert to integer from kind name
data['basedirpath'] = os.path.join(cachedirpath, 'raet')
signer = nacling.Signer()
data['sighex'] = signer.keyhex
@ -127,10 +124,9 @@ class BasicTestCase(unittest.TestCase):
data['prihex'] = privateer.keyhex
data['pubhex'] = privateer.pubhex
return data
def createRoadStack(self, data, keep, uid=None, main=None, ha=None, mutable=None):
def createRoadStack(self, data, keep, uid=None, main=None, ha=None, mutable=None):
'''
Creates stack and local estate from data with
local estate.uid = uid
@ -144,7 +140,6 @@ class BasicTestCase(unittest.TestCase):
returns stack
'''
stack = stacking.RoadStack(store=self.store,
name=data['name'],
keep=keep,
@ -166,8 +161,8 @@ class BasicTestCase(unittest.TestCase):
console.terse("\nJoin Transaction **************\n")
if not initiator.remotes:
remote = initiator.addRemote(estating.RemoteEstate(stack=initiator,
fuid=0, # vacuous join
sid=0, # always 0 for join
fuid=0, # vacuous join
sid=0, # always 0 for join
ha=correspondent.local.ha))
deid = remote.uid
initiator.join(uid=deid)
@ -181,7 +176,7 @@ class BasicTestCase(unittest.TestCase):
other.allow()
self.service(main, other, duration=duration)
def message(self, main, other, mains, others, duration=2.0):
def message(self, main, other, mains, others, duration=2.0):
'''
Utility to send messages both ways
'''
@ -218,7 +213,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=False)
mainData = self.createRoadData(cachedirpath=opts['cachedir'],
role=opts['id'],
kind=opts['__role'] )
kind=opts['__role'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -228,7 +223,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -341,7 +336,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=False)
otherData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -468,7 +463,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -579,7 +574,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
otherData = self.createRoadData(role='other',
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -596,7 +591,7 @@ class BasicTestCase(unittest.TestCase):
other.name, other.keep.dirpath))
self.assertTrue(other.keep.dirpath.endswith(os.path.join('other', 'raet', 'other_minion')))
self.assertEqual(other.ha, ("0.0.0.0", raeting.RAET_TEST_PORT))
self.assertIs(other.keep.auto,raeting.AutoMode.always.value)
self.assertIs(other.keep.auto, raeting.AutoMode.always.value)
self.assertDictEqual(other.keep.loadLocalData(),
{
@ -696,7 +691,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -706,14 +701,14 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
main.name, main.keep.dirpath))
self.assertTrue(main.keep.dirpath.endswith(os.path.join('main', 'raet', 'main_master')))
self.assertTrue(main.ha, ("0.0.0.0", raeting.RAET_PORT))
self.assertIs(main.keep.auto, raeting.AutoMode.once.value)
self.assertIs(main.keep.auto, raeting.AutoMode.once.value)
self.assertDictEqual(main.keep.loadLocalData(), {
'name': mainData['name'],
'uid': 1,
@ -819,7 +814,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
otherData = self.createRoadData(role='other',
kind=kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -947,7 +942,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -982,7 +977,7 @@ class BasicTestCase(unittest.TestCase):
role=data1['role'],
verkey=data1['verhex'],
pubkey=data1['pubhex'],
) )
))
data2 = self.createRoadData(role='primary',
kind=kinds.APPL_KIND_NAMES[kinds.applKinds.caller],
@ -995,7 +990,7 @@ class BasicTestCase(unittest.TestCase):
role=data2['role'],
verkey=data2['verhex'],
pubkey=data2['pubhex'],
) )
))
main.dumpRemotes()
@ -1095,7 +1090,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1129,7 +1124,7 @@ class BasicTestCase(unittest.TestCase):
ha=('127.0.0.1', 7532),
role=data1['role'],
verkey=data1['verhex'],
pubkey=data1['pubhex'],) )
pubkey=data1['pubhex'],))
data2 = self.createRoadData(role='primary',
kind='syndic',
@ -1140,9 +1135,9 @@ class BasicTestCase(unittest.TestCase):
ha=('127.0.0.1', 7533),
role=data2['role'],
verkey=data2['verhex'],
pubkey=data2['pubhex'],) )
pubkey=data2['pubhex'],))
main.dumpRemotes() # second one keys will clobber first one keys
main.dumpRemotes() # second one keys will clobber first one keys
self.assertDictEqual(main.keep.loadAllRemoteData(),
{
@ -1239,7 +1234,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1275,7 +1270,7 @@ class BasicTestCase(unittest.TestCase):
role=data1['role'],
verkey=data1['verhex'],
pubkey=data1['pubhex'],
) )
))
data2 = self.createRoadData(role='primary',
kind='syndic',
@ -1288,7 +1283,7 @@ class BasicTestCase(unittest.TestCase):
role=data2['role'],
verkey=data2['verhex'],
pubkey=data2['pubhex'],
) )
))
main.dumpRemotes()
@ -1365,7 +1360,6 @@ class BasicTestCase(unittest.TestCase):
self.assertEqual(remote.pubber.keyhex, data1['pubhex'])
self.assertEqual(remote.verfer.keyhex, data1['verhex'])
main.server.close()
def testBootstrapNever(self):
@ -1381,7 +1375,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=False)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -1391,7 +1385,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1422,7 +1416,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
otherData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -1439,7 +1433,7 @@ class BasicTestCase(unittest.TestCase):
other.name, other.keep.dirpath))
self.assertTrue(other.keep.dirpath.endswith(os.path.join('other', 'raet', 'other_minion')))
self.assertEqual(other.ha, ("0.0.0.0", raeting.RAET_TEST_PORT))
self.assertIs(other.keep.auto, raeting.AutoMode.once.value)
self.assertIs(other.keep.auto, raeting.AutoMode.once.value)
self.assertDictEqual(other.keep.loadLocalData(),
{
'name': otherData['name'],
@ -1458,7 +1452,7 @@ class BasicTestCase(unittest.TestCase):
})
self.join(other, main)
self.assertEqual(len(main.transactions), 1) # pending
self.assertEqual(len(main.transactions), 1) # pending
main.keep.acceptRemote(main.nameRemotes[other.local.name])
self.service(main, other, duration=1.0)
@ -1482,7 +1476,6 @@ class BasicTestCase(unittest.TestCase):
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path))
# now delete a key and see if road keep file is also deleted
main.keep.saltRaetKey.delete_key(match=other.local.role)
remote = main.remotes[2]
@ -1507,7 +1500,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -1517,7 +1510,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1548,7 +1541,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
otherData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -1604,7 +1597,6 @@ class BasicTestCase(unittest.TestCase):
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path))
# now delete a key and see if road keep file is also deleted
main.keep.saltRaetKey.delete_key(match=other.local.role)
remote = main.remotes[2]
@ -1629,7 +1621,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -1639,7 +1631,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1670,7 +1662,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
otherData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
otherKeep = salting.SaltKeep(opts=opts,
basedirpath=otherData['basedirpath'],
stackname=otherData['name'])
@ -1726,7 +1718,6 @@ class BasicTestCase(unittest.TestCase):
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path))
# now delete a key and see if road keep file is also deleted
main.keep.saltRaetKey.delete_key(match=other.local.role)
remote = main.remotes[2]
@ -1751,7 +1742,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=False)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -1761,7 +1752,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -1792,7 +1783,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
other1Data = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
other1Keep = salting.SaltKeep(opts=opts,
basedirpath=other1Data['basedirpath'],
stackname=other1Data['name'])
@ -1828,7 +1819,7 @@ class BasicTestCase(unittest.TestCase):
})
self.join(other1, main)
self.assertEqual(len(main.transactions), 1) # pending
self.assertEqual(len(main.transactions), 1) # pending
main.keep.acceptRemote(main.nameRemotes[other1.local.name])
self.service(main, other1, duration=1.0)
@ -1860,7 +1851,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
other2Data = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
other2Keep = salting.SaltKeep(opts=opts,
basedirpath=other2Data['basedirpath'],
stackname=other2Data['name'])
@ -1897,7 +1888,7 @@ class BasicTestCase(unittest.TestCase):
# should not join since role same but keys different
self.join(other2, main)
self.assertEqual(len(main.transactions), 0) # rejected since not same keys
self.assertEqual(len(main.transactions), 0) # rejected since not same keys
self.assertEqual(len(other2.remotes), 0)
self.assertEqual(len(main.remotes), 1)
#main.removeRemote(main.nameRemotes[other2.local.name], clear=True)
@ -1918,7 +1909,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
other2Data = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
other2Data['sighex'] = other1Data['sighex']
other2Data['prihex'] = other1Data['prihex']
other2Keep = salting.SaltKeep(opts=opts,
@ -2006,7 +1997,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
mainData = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
mainKeep = salting.SaltKeep(opts=opts,
basedirpath=mainData['basedirpath'],
stackname=mainData['name'])
@ -2016,7 +2007,7 @@ class BasicTestCase(unittest.TestCase):
main = self.createRoadStack(data=mainData,
main=True,
ha=None, #default ha is ("", raeting.RAET_PORT)
ha=None, # default ha is ("", raeting.RAET_PORT)
keep=mainKeep)
console.terse("{0}\nkeep dirpath = {1}\n".format(
@ -2047,7 +2038,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
other1Data = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
other1Keep = salting.SaltKeep(opts=opts,
basedirpath=other1Data['basedirpath'],
stackname=other1Data['name'])
@ -2111,7 +2102,7 @@ class BasicTestCase(unittest.TestCase):
autoAccept=True)
other2Data = self.createRoadData(role=opts['id'],
kind=opts['__role'],
cachedirpath=opts['cachedir'] )
cachedirpath=opts['cachedir'])
other2Data['sighex'] = other1Data['sighex']
other2Data['prihex'] = other1Data['prihex']
@ -2172,7 +2163,6 @@ class BasicTestCase(unittest.TestCase):
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
self.assertTrue(os.path.exists(path))
# now delete a key and see if both road keep file are also deleted
main.keep.saltRaetKey.delete_key(match=other1.local.role)
remote = main.remotes[2]
@ -2188,6 +2178,7 @@ class BasicTestCase(unittest.TestCase):
stack.server.close()
stack.clearAllKeeps()
def runOne(test):
'''
Unittest Runner
@ -2196,11 +2187,12 @@ def runOne(test):
suite = unittest.TestSuite([test])
unittest.TextTestRunner(verbosity=2).run(suite)
def runSome():
'''
Unittest runner
'''
tests = []
tests = []
names = ['testBasic',
'testBasicOpen',
'testBasicAuto',
@ -2214,11 +2206,12 @@ def runSome():
'testBootstrapRoleAuto',
]
tests.extend(map(BasicTestCase, names))
tests.extend(list(map(BasicTestCase, names)))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
'''
Unittest runner
@ -2232,8 +2225,8 @@ if __name__ == '__main__' and __package__ is None:
#console.reinit(verbosity=console.Wordage.concise)
#runAll() #run all unittests
#runAll() # run all unittests
runSome()#only run some
runSome() # only run some
#runOne('testBootstrapRoleAuto')

View File

@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
"""
'''
Raet Ioflo Behavior Unittests
"""
# pylint: skip-file
# pylint: disable=C0103
'''
from __future__ import absolute_import, print_function, unicode_literals
import sys
from salt.ext.six.moves import map
# pylint: disable=blacklisted-import
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# pylint: enable=blacklisted-import
import time
from ioflo.base.consoling import getConsole
@ -20,14 +21,9 @@ from ioflo.test import testing
from raet.abiding import ns2u
from raet.lane.stacking import LaneStack
from raet.road.stacking import RoadStack
from raet.stacking import Stack
from salt.utils.event import tagify
# Import Ioflo Deeds
from salt.daemons.flo import core
from salt.daemons.test.plan import actors
def setUpModule():
console.reinit(verbosity=console.Wordage.concise)
@ -38,29 +34,27 @@ def tearDownModule():
class StatsEventerTestCase(testing.FrameIofloTestCase):
"""
'''
Test case for Salt Raet Stats Eventer Master and Minion deeds
"""
'''
def setUp(self):
"""
'''
Call super if override so House Framer and Frame are setup correctly
"""
'''
super(StatsEventerTestCase, self).setUp()
def tearDown(self):
"""
'''
Call super if override so House Framer and Frame are torn down correctly
"""
'''
super(StatsEventerTestCase, self).tearDown()
def testMasterContextSetup(self):
"""
'''
Test the context setup procedure used in all the consequence tests works as expected
This test intended to avoid some checks in other tests
"""
'''
console.terse("{0}\n".format(self.testMasterContextSetup.__doc__))
act = self.addEnterDeed("TestOptsSetupMaster")
@ -105,9 +99,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
act.actor.road_stack.value.server.close()
def testMasterRoadStats(self):
"""
'''
Test Master Road Stats request (A1)
"""
'''
console.terse("{0}\n".format(self.testMasterRoadStats.__doc__))
# Bootstrap
@ -160,9 +154,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
time.sleep(0.1)
def testMasterLaneStats(self):
"""
'''
Test Master Road Stats request (A2)
"""
'''
console.terse("{0}\n".format(self.testMasterLaneStats.__doc__))
# Bootstrap
@ -214,9 +208,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMasterStatsWrongMissingTag(self):
"""
'''
Test Master Stats requests with unknown and missing tag (A3, A4)
"""
'''
console.terse("{0}\n".format(self.testMasterStatsWrongMissingTag.__doc__))
# Bootstrap
@ -268,9 +262,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMasterStatsUnknownRemote(self):
"""
'''
Test Master Stats request with unknown remote (B1)
"""
'''
console.terse("{0}\n".format(self.testMasterStatsUnknownRemote.__doc__))
# Bootstrap
@ -318,9 +312,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMasterStatsNoRequest(self):
"""
'''
Test Master Stats no requests (nothing to do) (B2)
"""
'''
console.terse("{0}\n".format(self.testMasterStatsNoRequest.__doc__))
# Bootstrap
@ -364,10 +358,10 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMinionContextSetup(self):
"""
'''
Test the context setup procedure used in all the consequence tests works as expected
This test intended to avoid some checks in other tests
"""
'''
console.terse("{0}\n".format(self.testMinionContextSetup.__doc__))
act = self.addEnterDeed("TestOptsSetupMinion")
@ -412,9 +406,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
act.actor.road_stack.value.server.close()
def testMinionRoadStats(self):
"""
'''
Test Minion Road Stats request (A1)
"""
'''
console.terse("{0}\n".format(self.testMinionRoadStats.__doc__))
# Bootstrap
@ -469,9 +463,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMinionLaneStats(self):
"""
'''
Test Minion Road Stats request (A2)
"""
'''
console.terse("{0}\n".format(self.testMinionLaneStats.__doc__))
# Bootstrap
@ -526,9 +520,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMinionStatsWrongMissingTag(self):
"""
'''
Test Minion Stats requests with unknown and missing tag (A3, A4)
"""
'''
console.terse("{0}\n".format(self.testMinionStatsWrongMissingTag.__doc__))
# Bootstrap
@ -582,9 +576,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMinionStatsUnknownRemote(self):
"""
'''
Test Minion Stats request with unknown remote (B1)
"""
'''
console.terse("{0}\n".format(self.testMinionStatsUnknownRemote.__doc__))
# Bootstrap
@ -633,9 +627,9 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
testStack.value.server.close()
def testMinionStatsNoRequest(self):
"""
'''
Test Minion Stats no requests (nothing to do) (B2)
"""
'''
console.terse("{0}\n".format(self.testMinionStatsNoRequest.__doc__))
# Bootstrap
@ -679,6 +673,7 @@ class StatsEventerTestCase(testing.FrameIofloTestCase):
if testStack:
testStack.value.server.close()
def runOne(test):
'''
Unittest Runner
@ -689,7 +684,9 @@ def runOne(test):
def runSome():
""" Unittest runner """
'''
Unittest runner
'''
tests = []
names = [
'testMasterContextSetup',
@ -705,13 +702,15 @@ def runSome():
'testMinionStatsUnknownRemote',
'testMinionStatsNoRequest',
]
tests.extend(map(StatsEventerTestCase, names))
tests.extend(list(map(StatsEventerTestCase, names)))
suite = unittest.TestSuite(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def runAll():
""" Unittest runner """
'''
Unittest runner
'''
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(StatsEventerTestCase))
unittest.TextTestRunner(verbosity=2).run(suite)
@ -723,6 +722,6 @@ if __name__ == '__main__' and __package__ is None:
#runAll() # run all unittests
runSome() #only run some
runSome() # only run some
#runOne('testMasterLaneStats')

View File

@ -4,7 +4,7 @@ Initialize the engines system. This plugin system allows for
complex services to be encapsulated within the salt plugin environment
'''
# Import python libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import multiprocessing
import logging
@ -50,7 +50,7 @@ def start_engines(opts, proc_mgr, proxy=None):
if fun in engines:
start_func = engines[fun]
name = '{0}.Engine({1})'.format(__name__, start_func.__module__)
log.info('Starting Engine {0}'.format(name))
log.info('Starting Engine %s', name)
proc_mgr.add_process(
Engine,
args=(
@ -127,4 +127,4 @@ class Engine(SignalHandlingMultiprocessingProcess):
try:
self.engine[self.fun](**kwargs)
except Exception as exc:
log.critical('Engine {0} could not be started! Error: {1}'.format(self.engine, exc))
log.critical('Engine %s could not be started! Error: %s', self.engine, exc)

View File

@ -5,7 +5,7 @@ Send events from Docker events
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import traceback

View File

@ -35,7 +35,7 @@ keys make the engine interactive.
wait_time: 1
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import os
@ -52,6 +52,7 @@ import salt.utils.event
import salt.utils.files
import salt.utils.http
import salt.utils.json
import salt.utils.stringutils
import salt.runner
import salt.client
import salt.loader
@ -70,7 +71,9 @@ _DEFAULT_MAX_ROOMS = 1000
def _publish_file(token, room, filepath, message='', outputter=None, api_url=None):
""" Send file to a HipChat room via API version 2
'''
Send file to a HipChat room via API version 2
Parameters
----------
token : str
@ -83,7 +86,7 @@ def _publish_file(token, room, filepath, message='', outputter=None, api_url=Non
Message to send to room
api_url: str, optional
Hipchat API URL to use, defaults to http://api.hipchat.com
"""
'''
if not os.path.isfile(filepath):
raise ValueError("File '{0}' does not exist".format(filepath))
@ -97,7 +100,7 @@ def _publish_file(token, room, filepath, message='', outputter=None, api_url=Non
# future lint: disable=blacklisted-function
with salt.utils.files.fopen(filepath, 'rb') as rfh:
payload = str("""\
payload = str('''\
--boundary123456
Content-Type: application/json; charset=UTF-8
Content-Disposition: attachment; name="metadata"
@ -110,7 +113,7 @@ Content-Disposition: attachment; name="file"; filename="{1}"
{2}
--boundary123456--\
""").format(msg,
''').format(msg,
os.path.basename(salt.utils.stringutils.to_str(filepath)),
salt.utils.stringutils.to_str(rfh.read()))
# future lint: enable=blacklisted-function
@ -313,7 +316,7 @@ def start(token,
if a_room['name'] == room:
target_room = a_room
if not target_room:
log.debug("Unable to connect to room {0}".format(room))
log.debug("Unable to connect to room %s", room)
# wait for a bit as to not burn through api calls
time.sleep(30)
raise UserWarning("Unable to connect to room {0}".format(room))

View File

@ -29,7 +29,7 @@ them onto a logstash endpoint via HTTP requests.
- bgp.config
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python lib
import fnmatch

View File

@ -54,7 +54,7 @@ Example of usage
[DEBUG ] Sending event: tag = salt/engines/ircbot/test/tag/ircbot; data = {'_stamp': '2016-11-28T14:34:16.633623', 'data': [u'irc', u'is', u'usefull']}
'''
from __future__ import absolute_import, print_function
from __future__ import absolute_import, print_function, unicode_literals
# Import python libraries
import base64

View File

@ -83,11 +83,11 @@ Below is a sample syslog event which is received from the junos device:
The source for parsing the syslog messages is taken from:
https://gist.github.com/leandrosilva/3651640#file-xlog-py
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import re
from time import strftime
import logging
import time
try:
from twisted.internet.protocol import DatagramProtocol
@ -178,7 +178,7 @@ class _Parser(object):
payload["priority"] = int(parsed[0])
payload["severity"] = payload["priority"] & 0x07
payload["facility"] = payload["priority"] >> 3
payload["timestamp"] = strftime("%Y-%m-%d %H:%M:%S")
payload["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
payload["hostname"] = parsed[4]
payload["daemon"] = 'unknown'
payload["message"] = parsed[5]
@ -190,7 +190,7 @@ class _Parser(object):
payload["priority"] = int(parsed[0])
payload["severity"] = payload["priority"] & 0x07
payload["facility"] = payload["priority"] >> 3
payload["timestamp"] = strftime("%Y-%m-%d %H:%M:%S")
payload["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
payload["hostname"] = parsed[4]
payload["daemon"] = parsed[5]
payload["message"] = parsed[6]
@ -205,7 +205,7 @@ class _Parser(object):
payload["priority"] = int(parsed[0])
payload["severity"] = payload["priority"] & 0x07
payload["facility"] = payload["priority"] >> 3
payload["timestamp"] = strftime("%Y-%m-%d %H:%M:%S")
payload["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
payload["hostname"] = parsed[4]
payload["daemon"] = parsed[5]
payload["pid"] = parsed[6]
@ -223,7 +223,7 @@ class _Parser(object):
payload["priority"] = int(parsed[1])
payload["severity"] = payload["priority"] & 0x07
payload["facility"] = payload["priority"] >> 3
payload["timestamp"] = strftime("%Y-%m-%d %H:%M:%S")
payload["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
payload["hostname"] = parsed[5]
payload["daemon"] = parsed[6]
payload["pid"] = parsed[7]
@ -302,19 +302,20 @@ class _SyslogServerFactory(DatagramProtocol):
data = self.obj.parse(data)
data['hostip'] = host
log.debug(
'Junos Syslog - received {0} from {1}, \
sent from port {2}'.format(data, host, port))
'Junos Syslog - received %s from %s, sent from port %s',
data, host, port
)
send_this_event = True
for key in options:
if key in data:
if isinstance(options[key], (six.string_types, int)):
if str(options[key]) != str(data[key]):
if six.text_type(options[key]) != six.text_type(data[key]):
send_this_event = False
break
elif isinstance(options[key], list):
for opt in options[key]:
if str(opt) == str(data[key]):
if six.text_type(opt) == six.text_type(data[key]):
break
else:
send_this_event = False
@ -332,10 +333,11 @@ class _SyslogServerFactory(DatagramProtocol):
topic = 'jnpr/syslog'
for i in range(2, len(self.title)):
topic += '/' + str(data[self.title[i]])
topic += '/' + six.text_type(data[self.title[i]])
log.debug(
'Junos Syslog - sending this event on the bus: \
{0} from {1}'.format(data, host))
'Junos Syslog - sending this event on the bus: %s from %s',
data, host
)
result = {'send': True, 'data': data, 'topic': topic}
return result
else:
@ -386,6 +388,6 @@ class _SyslogServerFactory(DatagramProtocol):
def start(port=516, **kwargs):
log.info('Starting junos syslog engine (port {0})'.format(port))
log.info('Starting junos syslog engine (port %s)', port)
reactor.listenUDP(port, _SyslogServerFactory(kwargs))
reactor.run()

View File

@ -21,7 +21,7 @@ them onto a logstash endpoint.
'''
# Import python libraries
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs

View File

@ -167,7 +167,7 @@ the variable name ``openconfig_structure``. Inside the Jinja template, the user
can process the object from ``openconfig_structure`` and define the bussiness
logic as required.
'''
from __future__ import absolute_import
from __future__ import absolute_import, print_function, unicode_literals
# Import python stdlib
import logging
@ -233,7 +233,7 @@ def _get_transport_recv(name='zmq',
port=49017,
**kwargs):
if name not in TRANSPORT_FUN_MAP:
log.error('Invalid transport: {0}. Falling back to ZeroMQ.'.format(name))
log.error('Invalid transport: %s. Falling back to ZeroMQ.', name)
name = 'zmq'
return TRANSPORT_FUN_MAP[name](address, port, **kwargs)
@ -342,7 +342,7 @@ def start(transport='zmq',
whitelist=os_whitelist,
blacklist=os_blacklist)
if not valid_os:
log.info('Ignoring NOS {} as per whitelist/blacklist'.format(event_os))
log.info('Ignoring NOS %s as per whitelist/blacklist', event_os)
continue
event_error = dict_object['error']
if error_blacklist or error_whitelist:
@ -351,7 +351,7 @@ def start(transport='zmq',
whitelist=error_whitelist,
blacklist=error_blacklist)
if not valid_error:
log.info('Ignoring error {} as per whitelist/blacklist'.format(event_error))
log.info('Ignoring error %s as per whitelist/blacklist', event_error)
continue
event_host = dict_object.get('host') or dict_object.get('ip')
if host_blacklist or host_whitelist:
@ -360,7 +360,7 @@ def start(transport='zmq',
whitelist=host_whitelist,
blacklist=host_blacklist)
if not valid_host:
log.info('Ignoring messages from {} as per whitelist/blacklist'.format(event_host))
log.info('Ignoring messages from %s as per whitelist/blacklist', event_host)
continue
tag = 'napalm/syslog/{os}/{error}/{host}'.format(
os=event_os,
@ -371,7 +371,7 @@ def start(transport='zmq',
log.warning('Missing keys from the napalm-logs object:', exc_info=True)
log.warning(dict_object)
continue # jump to the next object in the queue
log.debug('Sending event {0}'.format(tag))
log.debug('Sending event %s', tag)
log.debug(raw_object)
if master:
event.get_master_event(__opts__,

Some files were not shown because too many files have changed in this diff Show More