From ad168ed2ac2b1409d92d2c7f5ae3abab0d5c6557 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Thu, 8 Jun 2017 15:30:44 -0500 Subject: [PATCH 001/639] nfs3: create add_exports() function --- salt/modules/nfs3.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index f4f7d227ee..855c853709 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -71,6 +71,22 @@ def del_export(exports='/etc/exports', path=None): _write_exports(exports, edict) return edict +def add_export(exports='/etc/exports', path=None, hosts=None, options=['ro']): + ''' + Add an export + + CLI Example: + + .. code-block:: bash + + salt '*' nfs3.add_export path='/srv/test' hosts=['127.0.0.1'] options=['rw'] + ''' + edict = list_exports(exports) + new = [{'hosts': hosts, 'options': options}] + edict[path] = new + _write_exports(exports, edict) + + return new def _write_exports(exports, edict): ''' From 13788d49a6bb4cd818e1346e00406c4c5b3ae989 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Thu, 8 Jun 2017 15:31:16 -0500 Subject: [PATCH 002/639] nfs3: add reload_exports() function This should be all the execution modules needed to write an nfs exports State. --- salt/modules/nfs3.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 855c853709..5bfce511d7 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -109,3 +109,26 @@ def _write_exports(exports, edict): options = ','.join(perms['options']) line += ' {0}({1})'.format(hosts, options) efh.write('{0}\n'.format(line)) + +# exportfs doesn't take a file path argument +# so we don't either +def reload_exports(): + ''' + Trigger a reload of the exports file to apply changes + + CLI Example: + + .. code-block:: bash + + salt '*' nfs3.reload_exports + ''' + ret = {} + + command = 'exportfs -r' + + output = __salt__['cmd.run_all'](command) + ret['stdout'] = output['stdout'] + ret['stderr'] = output['stderr'] + ret['result'] = not output['retcode'] + + return ret From dbe436cbc832ee61950a855701679cc214b09960 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 13:51:12 -0500 Subject: [PATCH 003/639] nfs_export: first draft of arguments --- salt/states/nfs_export.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 salt/states/nfs_export.py diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py new file mode 100644 index 0000000000..4ed3b5fd2c --- /dev/null +++ b/salt/states/nfs_export.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +''' +Management of NFS exports +=============================================== + +.. code-block:: yaml + +To ensure an NFS export exists: + + add_export: + nfs_export.present: + - name: '/srv/nfs' + - hosts: + - '10.0.2.0/24' + - options: + - 'rw' + +To have different options for different hosts on the same export, define a separate state. + +To ensure an NFS export is absent: + + delete_export: + nfs_export.absent: + - name: '/srv/nfs' + +''' + +#from __future__ import absolute_import From 1daa0333d32d20b392a015d5f06e24ab361b2c03 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 16:06:25 -0500 Subject: [PATCH 004/639] nfs3: fix bug in list_exports() with similar lines Before, defining multiple exports with the same path, which is valid in /etc/exports, would result in all but the last one being lost. --- salt/modules/nfs3.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 5bfce511d7..2e10a4574b 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -40,7 +40,11 @@ def list_exports(exports='/etc/exports'): if line.startswith('#'): continue comps = line.split() - ret[comps[0]] = [] + + # Handle the case where the same path is given twice + if not comps[0] in ret: + ret[comps[0]] = [] + newshares = [] for perm in comps[1:]: if perm.startswith('/'): From 7ff25466cfed3ae80af29433e9034c74e7a3493b Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 16:20:16 -0500 Subject: [PATCH 005/639] nfs_export: second draft of argument layout --- salt/states/nfs_export.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 4ed3b5fd2c..1d18767b80 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -3,22 +3,30 @@ Management of NFS exports =============================================== -.. code-block:: yaml - To ensure an NFS export exists: +.. code-block:: yaml + add_export: nfs_export.present: - name: '/srv/nfs' - - hosts: - - '10.0.2.0/24' - - options: - - 'rw' + - exports: + - hosts: + - '10.0.2.0/24' + - options: + - 'rw' -To have different options for different hosts on the same export, define a separate state. +This creates the following in /etc/exports: + +.. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) + +Any export of the given path will be modified to match the one specified. To ensure an NFS export is absent: +.. code-block:: yaml + delete_export: nfs_export.absent: - name: '/srv/nfs' From c0d38cf8f62431facd3c8da4724421395020bab5 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 16:32:18 -0500 Subject: [PATCH 006/639] nfs_export: third draft, with simple & complex modes --- salt/states/nfs_export.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 1d18767b80..d5c49355e0 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -7,14 +7,32 @@ To ensure an NFS export exists: .. code-block:: yaml - add_export: + add_simple_export: + nfs_export.present: + - name: '/srv/nfs' + - hosts: '10.0.2.0/24' + - options: 'rw' + +For more complex exports with multiple groups of hosts: + +.. code-block:: yaml + + add_complex_export: nfs_export.present: - name: '/srv/nfs' - exports: + # First export, same as simple one above - hosts: - - '10.0.2.0/24' - - options: - - 'rw' + - '10.0.2.0/24' + options: + - 'rw' + # Second export + - hosts: + - '192.168.0.0/24' + - '172.19.0.0/16' + options: + - 'ro' + - 'subtree_check' This creates the following in /etc/exports: From da69e6cb0e2694c5428a32677eb154e4a45b9b85 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 15:43:05 -0500 Subject: [PATCH 007/639] nfs_export: first draft of absent() --- salt/states/nfs_export.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index d5c49355e0..4656be1447 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -52,3 +52,22 @@ To ensure an NFS export is absent: ''' #from __future__ import absolute_import + +def absent(name, exports='/etc/exports'): + path = name + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': ''} + + old = __salt__['nfs3.list_exports'](exports) + if path in old: + __salt__['nfs3.del_export'](exports, path) + ret['comment'] = 'Export {0} removed'.format(path) + ret['changes'] = {'path': path} + ret['result'] = True + else: + ret['comment'] = 'Export {0} already absent'.format(path) + ret['result'] = True + + return ret From 1873f8eca4ff96ff987d50e119882bfee363d0b4 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:03:11 -0500 Subject: [PATCH 008/639] nfs_export.absent: return better Changes and support test=True --- salt/states/nfs_export.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 4656be1447..6bee5ff2ba 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -62,9 +62,14 @@ def absent(name, exports='/etc/exports'): old = __salt__['nfs3.list_exports'](exports) if path in old: + if __opts__['test']: + ret['comment'] = 'Export {0} would be removed'.format(path) + ret['result'] = None + return ret + __salt__['nfs3.del_export'](exports, path) ret['comment'] = 'Export {0} removed'.format(path) - ret['changes'] = {'path': path} + ret['changes'][path] = old[path] ret['result'] = True else: ret['comment'] = 'Export {0} already absent'.format(path) From b0bcccb54978c34f4efbaf237d399c773f2d0391 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:06:31 -0500 Subject: [PATCH 009/639] nfs_export.absent: add docstring --- salt/states/nfs_export.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 6bee5ff2ba..2f00a4b527 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -Management of NFS exports +Management of NFS exports =============================================== To ensure an NFS export exists: @@ -54,6 +54,13 @@ To ensure an NFS export is absent: #from __future__ import absolute_import def absent(name, exports='/etc/exports'): + ''' + Ensure that the named path is not exported + + name + The export path to remove + ''' + path = name ret = {'name': name, 'changes': {}, From 460d5d519e0dd491f7f0f4dcb908577072964af3 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:45:57 -0500 Subject: [PATCH 010/639] nfs_export: first draft of docs for present() --- salt/states/nfs_export.py | 71 +++++++++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 2f00a4b527..9ee3e234eb 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -11,25 +11,28 @@ To ensure an NFS export exists: nfs_export.present: - name: '/srv/nfs' - hosts: '10.0.2.0/24' - - options: 'rw' + - options: + - 'rw' -For more complex exports with multiple groups of hosts: +This creates the following in /etc/exports: + +.. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) + +For more complex exports with multiple groups of hosts, use 'clients': .. code-block:: yaml add_complex_export: nfs_export.present: - name: '/srv/nfs' - - exports: + - clients: # First export, same as simple one above - - hosts: - - '10.0.2.0/24' + - hosts: '10.0.2.0/24' options: - 'rw' # Second export - - hosts: - - '192.168.0.0/24' - - '172.19.0.0/16' + - hosts: '*.example.com' options: - 'ro' - 'subtree_check' @@ -37,7 +40,7 @@ For more complex exports with multiple groups of hosts: This creates the following in /etc/exports: .. code-block:: bash - /srv/nfs 10.0.2.0/24(rw) + /srv/nfs 10.0.2.0/24(rw) 192.168.0.0/24,172.19.0.0/16(ro,subtree_check) Any export of the given path will be modified to match the one specified. @@ -51,7 +54,55 @@ To ensure an NFS export is absent: ''' -#from __future__ import absolute_import +def present(name, clients=None, hosts=None, options=None, exports='/etc/exports'): + ''' + Ensure that the named export is present with the given options + + name + The export path to configure + + clients + A list of hosts and the options applied to them. + This option may not be used in combination with + the 'hosts' or 'options' shortcuts. + + ... code-block:: yaml + + - clients: + # First export + - hosts: '10.0.2.0/24' + options: + - 'rw' + # Second export + - hosts: '*.example.com' + options: + - 'ro' + - 'subtree_check' + + hosts + A string matching a number of hosts, for example: + + ... code-block:: yaml + + hosts: '10.0.2.123' + + hosts: '10.0.2.0/24' + + hosts: 'minion1.example.com' + + hosts: '*.example.com' + + options + A list of NFS options, for example: + + ... code-block:: yaml + + options: + - 'rw' + - 'subtree_check' + + ''' + def absent(name, exports='/etc/exports'): ''' From 2736a0d8094175ab5fb067ca753bbb742ffb834d Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 10:12:33 -0500 Subject: [PATCH 011/639] nfs3.add_export(): fix use of list as default argument --- salt/modules/nfs3.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 2e10a4574b..3538e525b3 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -75,7 +75,7 @@ def del_export(exports='/etc/exports', path=None): _write_exports(exports, edict) return edict -def add_export(exports='/etc/exports', path=None, hosts=None, options=['ro']): +def add_export(exports='/etc/exports', path=None, hosts=None, options=None): ''' Add an export @@ -85,6 +85,8 @@ def add_export(exports='/etc/exports', path=None, hosts=None, options=['ro']): salt '*' nfs3.add_export path='/srv/test' hosts=['127.0.0.1'] options=['rw'] ''' + if options == None: + options = [] edict = list_exports(exports) new = [{'hosts': hosts, 'options': options}] edict[path] = new From 3fb4eb1ca817334b12c17c7d31c958b44fc8060b Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 10:58:00 -0500 Subject: [PATCH 012/639] nfs3: bugfix: /etc/exports does not support comma-sep hosts --- salt/modules/nfs3.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 3538e525b3..8b3774ba9d 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -52,7 +52,10 @@ def list_exports(exports='/etc/exports'): continue permcomps = perm.split('(') permcomps[1] = permcomps[1].replace(')', '') - hosts = permcomps[0].split(',') + hosts = permcomps[0] + if not type(hosts) is str: + # Lists, etc would silently mangle /etc/exports + raise TypeError('hosts argument must be a string') options = permcomps[1].split(',') ret[comps[0]].append({'hosts': hosts, 'options': options}) for share in newshares: @@ -83,10 +86,13 @@ def add_export(exports='/etc/exports', path=None, hosts=None, options=None): .. code-block:: bash - salt '*' nfs3.add_export path='/srv/test' hosts=['127.0.0.1'] options=['rw'] + salt '*' nfs3.add_export path='/srv/test' hosts='127.0.0.1' options=['rw'] ''' if options == None: options = [] + if not type(hosts) is str: + # Lists, etc would silently mangle /etc/exports + raise TypeError('hosts argument must be a string') edict = list_exports(exports) new = [{'hosts': hosts, 'options': options}] edict[path] = new @@ -111,7 +117,7 @@ def _write_exports(exports, edict): for export in edict: line = export for perms in edict[export]: - hosts = ','.join(perms['hosts']) + hosts = perms['hosts'] options = ','.join(perms['options']) line += ' {0}({1})'.format(hosts, options) efh.write('{0}\n'.format(line)) From 0ced12c1a6f4bbfd6a8b81df1f9b2d825de7e165 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 14:52:09 -0500 Subject: [PATCH 013/639] nfs_export: fix docstring syntax and add to index --- doc/ref/states/all/index.rst | 1 + doc/ref/states/all/salt.states.nfs_export.rst | 6 ++++++ salt/states/nfs_export.py | 10 +++++++--- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 doc/ref/states/all/salt.states.nfs_export.rst diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 8c02f0d7e3..84a3107ab5 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -174,6 +174,7 @@ state modules netusers network netyang + nfs_export nftables npm ntp diff --git a/doc/ref/states/all/salt.states.nfs_export.rst b/doc/ref/states/all/salt.states.nfs_export.rst new file mode 100644 index 0000000000..231992626b --- /dev/null +++ b/doc/ref/states/all/salt.states.nfs_export.rst @@ -0,0 +1,6 @@ +====================== +salt.states.nfs_export +====================== + +.. automodule:: salt.states.nfs_export + :members: \ No newline at end of file diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 9ee3e234eb..a1fc5116b2 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -17,6 +17,7 @@ To ensure an NFS export exists: This creates the following in /etc/exports: .. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) For more complex exports with multiple groups of hosts, use 'clients': @@ -40,6 +41,7 @@ For more complex exports with multiple groups of hosts, use 'clients': This creates the following in /etc/exports: .. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) 192.168.0.0/24,172.19.0.0/16(ro,subtree_check) Any export of the given path will be modified to match the one specified. @@ -66,7 +68,7 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' This option may not be used in combination with the 'hosts' or 'options' shortcuts. - ... code-block:: yaml + .. code-block:: yaml - clients: # First export @@ -82,7 +84,7 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' hosts A string matching a number of hosts, for example: - ... code-block:: yaml + .. code-block:: yaml hosts: '10.0.2.123' @@ -92,10 +94,12 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' hosts: '*.example.com' + hosts: '*' + options A list of NFS options, for example: - ... code-block:: yaml + .. code-block:: yaml options: - 'rw' From f752a4b73139116daa388b13f77a19ef0ceacc62 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 16:12:03 -0500 Subject: [PATCH 014/639] nfs_export: add present() --- salt/states/nfs_export.py | 43 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index a1fc5116b2..5252a04e6e 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -106,7 +106,50 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' - 'subtree_check' ''' + path = name + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': ''} + if not clients: + if not hosts: + ret['result'] = False + ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined' + return ret + # options being None is handled by add_export() + clients = [{'hosts': hosts, 'options': options}] + + old = __salt__['nfs3.list_exports'](exports) + if path in old: + if old[path] == clients: + ret['result'] = True + ret['comment'] = 'Export {0} already configured'.format(path) + return ret + + ret['changes']['new'] = clients + ret['changes']['old'] = old[path] + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Export {0} would be changed'.format(path) + return ret + + __salt__['nfs3.del_export'](exports, path) + + else: + ret['changes']['old'] = None + ret['changes']['new'] = clients + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Export {0} would be added'.format(path) + return ret + + for exp in clients: + __salt__['nfs3.add_export'](exports, path, exp['hosts'], exp['options']) + + ret['result'] = True + ret['changes']['new'] = clients + return ret def absent(name, exports='/etc/exports'): ''' From 5e0d5c8dc5e329b92c79ce8a49f2684fd2382941 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 16:13:40 -0500 Subject: [PATCH 015/639] nfs3: allow multiple exports of the same path --- salt/modules/nfs3.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 8b3774ba9d..10469ccf9d 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -94,8 +94,10 @@ def add_export(exports='/etc/exports', path=None, hosts=None, options=None): # Lists, etc would silently mangle /etc/exports raise TypeError('hosts argument must be a string') edict = list_exports(exports) - new = [{'hosts': hosts, 'options': options}] - edict[path] = new + if not path in edict: + edict[path] = [] + new = {'hosts': hosts, 'options': options} + edict[path].append(new) _write_exports(exports, edict) return new From cb060793f4e683a7f311a0d9d7a46dc3c8b388e3 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Thu, 15 Jun 2017 12:48:00 -0500 Subject: [PATCH 016/639] nfs3: linting --- salt/modules/nfs3.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 10469ccf9d..090cd4c87f 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -53,7 +53,7 @@ def list_exports(exports='/etc/exports'): permcomps = perm.split('(') permcomps[1] = permcomps[1].replace(')', '') hosts = permcomps[0] - if not type(hosts) is str: + if type(hosts) is not str: # Lists, etc would silently mangle /etc/exports raise TypeError('hosts argument must be a string') options = permcomps[1].split(',') @@ -78,6 +78,7 @@ def del_export(exports='/etc/exports', path=None): _write_exports(exports, edict) return edict + def add_export(exports='/etc/exports', path=None, hosts=None, options=None): ''' Add an export @@ -102,6 +103,7 @@ def add_export(exports='/etc/exports', path=None, hosts=None, options=None): return new + def _write_exports(exports, edict): ''' Write an exports file to disk @@ -124,8 +126,7 @@ def _write_exports(exports, edict): line += ' {0}({1})'.format(hosts, options) efh.write('{0}\n'.format(line)) -# exportfs doesn't take a file path argument -# so we don't either + def reload_exports(): ''' Trigger a reload of the exports file to apply changes From adf7911cbbe4ce4de07488297c9e29b261e76061 Mon Sep 17 00:00:00 2001 From: Rico Gloeckner Date: Tue, 20 Jun 2017 08:22:01 +0200 Subject: [PATCH 017/639] adding (new) parameter attrs from modules/file.py:get_managed() to modules/napalm_network.py --- salt/modules/napalm_network.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index 5042eee025..e76272efcb 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -1036,6 +1036,7 @@ def load_template(template_name, template_user='root', template_group='root', template_mode='755', + template_attrs="--------------e----", saltenv=None, template_engine='jinja', skip_verify=False, @@ -1340,6 +1341,7 @@ def load_template(template_name, user=template_user, group=template_group, mode=template_mode, + attrs=template_attrs, template=template_engine, context=template_vars, defaults=defaults, From 9f146d7919e15f7bf3ad3db801bd8d692be38ba4 Mon Sep 17 00:00:00 2001 From: Rico Gloeckner Date: Tue, 27 Jun 2017 16:03:06 +0200 Subject: [PATCH 018/639] per request in PR#41845, propagate new template_attrs up, amend documentation and add versionadded tag --- salt/modules/napalm_network.py | 5 +++++ salt/states/netconfig.py | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index e76272efcb..a27657ddf2 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -1128,6 +1128,11 @@ def load_template(template_name, .. versionadded:: 2016.11.2 + template_attrs: "--------------e----" + attributes of file. (see `man lsattr`) + + .. versionadded: oxygen + saltenv: base Specifies the template environment. This will influence the relative imports inside the templates. diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index 9318e227b4..c232b272b7 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -60,6 +60,7 @@ def _update_config(template_name, template_user='root', template_group='root', template_mode='755', + template_attrs='--------------e----', saltenv=None, template_engine='jinja', skip_verify=False, @@ -83,6 +84,7 @@ def _update_config(template_name, template_user=template_user, template_group=template_group, template_mode=template_mode, + template_attrs=template_attrs, saltenv=saltenv, template_engine=template_engine, skip_verify=skip_verify, @@ -107,6 +109,7 @@ def managed(name, template_user='root', template_group='root', template_mode='755', + template_attrs="--------------e----", saltenv=None, template_engine='jinja', skip_verify=True, @@ -181,6 +184,11 @@ def managed(name, template_user: 755 Permissions of file + template_attrs: "--------------e----" + Attributes of file (see `man lsattr`) + + .. versionadded: oxygen + saltenv: base Specifies the template environment. This will influence the relative imports inside the templates. @@ -337,6 +345,7 @@ def managed(name, template_user=template_user, template_group=template_group, template_mode=template_mode, + tempalte_attrs=template_attrs, saltenv=saltenv, template_engine=template_engine, skip_verify=skip_verify, From b2c10ce99ae361564a2f61725cf1b01623b5f094 Mon Sep 17 00:00:00 2001 From: Rico Gloeckner Date: Tue, 27 Jun 2017 16:07:15 +0200 Subject: [PATCH 019/639] another amend for PR#41845: fix slight syntax error in versionadded tag; also fix for (old) copy&paste error --- salt/modules/napalm_network.py | 4 ++-- salt/states/netconfig.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index a27657ddf2..63479ae4ac 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -1123,7 +1123,7 @@ def load_template(template_name, .. versionadded:: 2016.11.2 - template_user: 755 + template_mode: 755 Permissions of file. .. versionadded:: 2016.11.2 @@ -1131,7 +1131,7 @@ def load_template(template_name, template_attrs: "--------------e----" attributes of file. (see `man lsattr`) - .. versionadded: oxygen + .. versionadded:: oxygen saltenv: base Specifies the template environment. diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index c232b272b7..80fc2d9117 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -181,13 +181,13 @@ def managed(name, template_user: root Group owner of file. - template_user: 755 + template_mode: 755 Permissions of file template_attrs: "--------------e----" Attributes of file (see `man lsattr`) - .. versionadded: oxygen + .. versionadded:: oxygen saltenv: base Specifies the template environment. This will influence the relative imports inside the templates. From 508347e834f91e7c73fdbac413e7d2802748d0b5 Mon Sep 17 00:00:00 2001 From: Rico Gloeckner Date: Tue, 27 Jun 2017 16:09:52 +0200 Subject: [PATCH 020/639] amend PR#41845: make quotation marks "" / '' consistent --- salt/modules/napalm_network.py | 2 +- salt/states/netconfig.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py index 63479ae4ac..f5ee2ae92d 100644 --- a/salt/modules/napalm_network.py +++ b/salt/modules/napalm_network.py @@ -1036,7 +1036,7 @@ def load_template(template_name, template_user='root', template_group='root', template_mode='755', - template_attrs="--------------e----", + template_attrs='--------------e----', saltenv=None, template_engine='jinja', skip_verify=False, diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index 80fc2d9117..781ad274d1 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -109,7 +109,7 @@ def managed(name, template_user='root', template_group='root', template_mode='755', - template_attrs="--------------e----", + template_attrs='--------------e----', saltenv=None, template_engine='jinja', skip_verify=True, From f3a031077f5b6e5770d0263fef74f053c4a40773 Mon Sep 17 00:00:00 2001 From: Arnold Bechtoldt Date: Tue, 4 Jul 2017 14:47:36 +0200 Subject: [PATCH 021/639] stripping whitespaces/newlines in authorized key file lines --- salt/modules/ssh.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index afe00a035b..249a9e1fd6 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -151,6 +151,9 @@ def _replace_auth_key( # open the file for both reading AND writing with salt.utils.fopen(full, 'r') as _fh: for line in _fh: + # We don't need any whitespace-only containing lines or arbitrary newlines + line = line.strip() + if line.startswith('#'): # Commented Line lines.append(line) @@ -181,6 +184,9 @@ def _validate_keys(key_file, fingerprint_hash_type): try: with salt.utils.fopen(key_file, 'r') as _fh: for line in _fh: + # We don't need any whitespace-only containing lines or arbitrary newlines + line = line.strip() + if line.startswith('#'): # Commented Line continue @@ -570,6 +576,9 @@ def rm_auth_key(user, # and then write out the correct one. Open the file once with salt.utils.fopen(full, 'r') as _fh: for line in _fh: + # We don't need any whitespace-only containing lines or arbitrary newlines + line = line.strip() + if line.startswith('#'): # Commented Line lines.append(line) @@ -777,6 +786,9 @@ def _parse_openssh_output(lines, fingerprint_hash_type=None): and yield dict with keys information, one by one. ''' for line in lines: + # We don't need any whitespace-only containing lines or arbitrary newlines + line = line.strip() + if line.startswith('#'): continue try: From 844e3f65bca1d469ad7a401c4e97c4bb61ffd70d Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 26 Jun 2017 12:32:38 -0600 Subject: [PATCH 022/639] Fix unit tests for Windows --- tests/unit/returners/test_local_cache.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/unit/returners/test_local_cache.py b/tests/unit/returners/test_local_cache.py index 20d5ad1720..14f37640ac 100644 --- a/tests/unit/returners/test_local_cache.py +++ b/tests/unit/returners/test_local_cache.py @@ -72,6 +72,12 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin): # Make sure there are no files in the directory before continuing self.assertEqual(jid_file, None) + # Windows needs some time to release the file handles to the new temp + # dir before trying to clean the job cache to avoid a race condition + if salt.utils.is_windows(): + import time + time.sleep(1) + # Call clean_old_jobs function, patching the keep_jobs value with a # very small value to force the call to clean the job. with patch.dict(local_cache.__opts__, {'keep_jobs': 0.00000001}): @@ -95,7 +101,10 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin): local_cache.clean_old_jobs() # Get the name of the JID directory that was created to test against - jid_dir_name = jid_dir.rpartition('/')[2] + if salt.utils.is_windows(): + jid_dir_name = jid_dir.rpartition('\\')[2] + else: + jid_dir_name = jid_dir.rpartition('/')[2] # Assert the JID directory is still present to be cleaned after keep_jobs interval self.assertEqual([jid_dir_name], os.listdir(TMP_JID_DIR)) From 9b61533b0920cc0bbae65fba3d6c41151399a566 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 5 Jul 2017 12:19:20 -0600 Subject: [PATCH 023/639] Get more accurate currnet time in local_cache --- salt/returners/local_cache.py | 3 ++- tests/unit/returners/test_local_cache.py | 6 ------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index bd66d26756..053c3e4563 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -391,7 +391,6 @@ def clean_old_jobs(): Clean out the old jobs from the job cache ''' if __opts__['keep_jobs'] != 0: - cur = time.time() jid_root = _job_dir() if not os.path.exists(jid_root): @@ -421,6 +420,7 @@ def clean_old_jobs(): shutil.rmtree(t_path) elif os.path.isfile(jid_file): jid_ctime = os.stat(jid_file).st_ctime + cur = time.time() hours_difference = (cur - jid_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs'] and os.path.exists(t_path): # Remove the entire t_path from the original JID dir @@ -435,6 +435,7 @@ def clean_old_jobs(): # Checking the time again prevents a possible race condition where # t_path JID dirs were created, but not yet populated by a jid file. t_path_ctime = os.stat(t_path).st_ctime + cur = time.time() hours_difference = (cur - t_path_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs']: shutil.rmtree(t_path) diff --git a/tests/unit/returners/test_local_cache.py b/tests/unit/returners/test_local_cache.py index 14f37640ac..54b3550925 100644 --- a/tests/unit/returners/test_local_cache.py +++ b/tests/unit/returners/test_local_cache.py @@ -72,12 +72,6 @@ class LocalCacheCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin): # Make sure there are no files in the directory before continuing self.assertEqual(jid_file, None) - # Windows needs some time to release the file handles to the new temp - # dir before trying to clean the job cache to avoid a race condition - if salt.utils.is_windows(): - import time - time.sleep(1) - # Call clean_old_jobs function, patching the keep_jobs value with a # very small value to force the call to clean the job. with patch.dict(local_cache.__opts__, {'keep_jobs': 0.00000001}): From 35b79ecde60bfb54bef9d6fc065cccf78f488f6e Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 5 Jul 2017 12:54:17 -0600 Subject: [PATCH 024/639] Remove `cur` variable, use time.time() in comparison --- salt/returners/local_cache.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index 053c3e4563..f4106c94d5 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -420,8 +420,7 @@ def clean_old_jobs(): shutil.rmtree(t_path) elif os.path.isfile(jid_file): jid_ctime = os.stat(jid_file).st_ctime - cur = time.time() - hours_difference = (cur - jid_ctime) / 3600.0 + hours_difference = (time.time()- jid_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs'] and os.path.exists(t_path): # Remove the entire t_path from the original JID dir shutil.rmtree(t_path) @@ -435,8 +434,7 @@ def clean_old_jobs(): # Checking the time again prevents a possible race condition where # t_path JID dirs were created, but not yet populated by a jid file. t_path_ctime = os.stat(t_path).st_ctime - cur = time.time() - hours_difference = (cur - t_path_ctime) / 3600.0 + hours_difference = (time.time() - t_path_ctime) / 3600.0 if hours_difference > __opts__['keep_jobs']: shutil.rmtree(t_path) From ed531304e2321186b58fccbab3baad4d6f0a487e Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 11 Jul 2017 15:05:25 -0500 Subject: [PATCH 025/639] test_nfs3: pass string, not list --- tests/unit/modules/test_nfs3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_nfs3.py b/tests/unit/modules/test_nfs3.py index 737d73c0bc..70a7c41d3a 100644 --- a/tests/unit/modules/test_nfs3.py +++ b/tests/unit/modules/test_nfs3.py @@ -37,7 +37,7 @@ class NfsTestCase(TestCase, LoaderModuleMockMixin): mock_open(read_data=file_d), create=True) as mfi: mfi.return_value.__iter__.return_value = file_d.splitlines() self.assertDictEqual(nfs3.list_exports(), - {'A': [{'hosts': ['B1'], 'options': ['23']}]}) + {'A': [{'hosts': 'B1', 'options': ['23']}]}) def test_del_export(self): ''' From 178274f320006a21303357e2a9da6ac25d4728ed Mon Sep 17 00:00:00 2001 From: Arnold Bechtoldt Date: Fri, 14 Jul 2017 09:40:40 +0200 Subject: [PATCH 026/639] actually add newline (per line) to form a valid file --- salt/modules/ssh.py | 21 +++++++++++++++++---- tests/unit/modules/test_ssh.py | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 249a9e1fd6..d636643947 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -151,8 +151,11 @@ def _replace_auth_key( # open the file for both reading AND writing with salt.utils.fopen(full, 'r') as _fh: for line in _fh: - # We don't need any whitespace-only containing lines or arbitrary newlines + # We don't need any whitespace-only containing lines or arbitrary doubled newlines line = line.strip() + if line == '': + continue + line += '\n' if line.startswith('#'): # Commented Line @@ -160,6 +163,7 @@ def _replace_auth_key( continue comps = re.findall(r'((.*)\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\s([a-zA-Z0-9+/]+={0,2})(\s(.*))?', line) if len(comps) > 0 and len(comps[0]) > 3 and comps[0][3] == key: + # Found our key, replace it lines.append(auth_line) else: lines.append(line) @@ -184,8 +188,11 @@ def _validate_keys(key_file, fingerprint_hash_type): try: with salt.utils.fopen(key_file, 'r') as _fh: for line in _fh: - # We don't need any whitespace-only containing lines or arbitrary newlines + # We don't need any whitespace-only containing lines or arbitrary doubled newlines line = line.strip() + if line == '': + continue + line += '\n' if line.startswith('#'): # Commented Line @@ -576,8 +583,11 @@ def rm_auth_key(user, # and then write out the correct one. Open the file once with salt.utils.fopen(full, 'r') as _fh: for line in _fh: - # We don't need any whitespace-only containing lines or arbitrary newlines + # We don't need any whitespace-only containing lines or arbitrary doubled newlines line = line.strip() + if line == '': + continue + line += '\n' if line.startswith('#'): # Commented Line @@ -786,8 +796,11 @@ def _parse_openssh_output(lines, fingerprint_hash_type=None): and yield dict with keys information, one by one. ''' for line in lines: - # We don't need any whitespace-only containing lines or arbitrary newlines + # We don't need any whitespace-only containing lines or arbitrary doubled newlines line = line.strip() + if line == '': + continue + line += '\n' if line.startswith('#'): continue diff --git a/tests/unit/modules/test_ssh.py b/tests/unit/modules/test_ssh.py index fd0d4d7176..62d045fe02 100644 --- a/tests/unit/modules/test_ssh.py +++ b/tests/unit/modules/test_ssh.py @@ -90,7 +90,7 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin): options = 'command="/usr/local/lib/ssh-helper"' email = 'github.com' empty_line = '\n' - comment_line = '# this is a comment \n' + comment_line = '# this is a comment\n' # Write out the authorized key to a temporary file if salt.utils.is_windows(): From 8c6bc010b9d42b4160fb22001e4cea6ecc5bd769 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 20 Jul 2017 18:53:45 -0500 Subject: [PATCH 027/639] pylint fixes, initial unit test --- salt/modules/saltcheck.py | 500 +++++++++++++++++++++++++++ tests/unit/modules/test_saltcheck.py | 36 ++ 2 files changed, 536 insertions(+) create mode 100644 salt/modules/saltcheck.py create mode 100644 tests/unit/modules/test_saltcheck.py diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py new file mode 100644 index 0000000000..a4122cbf7d --- /dev/null +++ b/salt/modules/saltcheck.py @@ -0,0 +1,500 @@ +# -*- coding: utf-8 -*- +''' +This module should be saved as salt/modules/saltcheck.py +''' +from __future__ import absolute_import +import logging +import os +import time +import yaml +try: + import salt.utils + import salt.client + import salt.exceptions +except ImportError: + pass + +log = logging.getLogger(__name__) + +__virtualname__ = 'saltcheck' + + +def __virtual__(): + ''' + Check dependencies + ''' + return __virtualname__ + + +def update_master_cache(): + ''' + Updates the master cache onto the minion - to transfer all salt-check-tests + Should be done one time before running tests, and if tests are updated + + CLI Example: + salt '*' salt_check.update_master_cache + ''' + __salt__['cp.cache_master'](args=None, kwargs=None) + return True + + +def run_test(**kwargs): + ''' + Enables running one salt_check test via cli + CLI Example:: + salt '*' salt_check.run_test + test='{"module_and_function": "test.echo", + "assertion": "assertEqual", + "expected-return": "This works!", + "args":["This works!"] }' + ''' + # salt converts the string to a dictionary auto-magically + scheck = SaltCheck() + test = kwargs.get('test', None) + if test and isinstance(test, dict): + return scheck.run_test(test) + else: + return "test must be dictionary" + + +def run_state_tests(state): + ''' + Returns the output of running all salt check test for a state + CLI Example:: + salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 + ''' + scheck = SaltCheck() + paths = scheck.get_state_search_path_list() + stl = StateTestLoader(search_paths=paths) + sls_list = scheck.get_state_sls(state) + sls_paths = stl.convert_sls_to_paths(sls_list) + for mypath in sls_paths: + stl.add_test_files_for_sls(mypath) + stl.load_test_suite() + results_dict = {} + for key, value in stl.test_dict.items(): + result = scheck.run_test(value) + results_dict[key] = result + return {state: results_dict} + + +class SaltCheck(object): + ''' + This class implements the saltcheck + ''' + + def __init__(self): + self.sls_list_top = [] + self.sls_list_state = [] + self.modules = [] + self.results_dict = {} + self.results_dict_summary = {} + self.assertions_list = '''assertEqual assertNotEqual + assertTrue assertFalse + assertIn assertGreater + assertGreaterEqual + assertLess assertLessEqual'''.split() + # self.modules = self.populate_salt_modules_list() + # call when needed self.populate_salt_modules_list() + self.salt_lc = salt.client.Caller(mopts=__opts__) + + @staticmethod + def update_master_cache(): + '''Easy way to update the master files on the minion''' + __salt__['cp.cache_master'](args=None, kwargs=None) + return + + def get_top_sls(self): + ''' equivalent to a salt cli: salt web state.show_lowstate''' + # sls_list = [] + try: + returned = __salt__['state.show_lowstate']() + for i in returned: + if i['__sls__'] not in self.sls_list_top: + self.sls_list_top.append(i['__sls__']) + except Exception: + raise + # self.sls_list = sls_list + return self.sls_list_top + + def get_state_sls(self, state): + ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' + try: + returned = __salt__['state.show_low_sls'](state) + for i in returned: + if i['__sls__'] not in self.sls_list_state: + self.sls_list_state.append(i['__sls__']) + except Exception: + raise + return self.sls_list_state + + def populate_salt_modules_list(self): + '''return a list of all modules available on minion''' + self.modules = __salt__['sys.list_modules']() + return + + def __is_valid_module(self, module_name): + '''Determines if a module is valid on a minion''' + if module_name not in self.modules: + return False + else: + return True + + @staticmethod + def __is_valid_function(module_name, function): + '''Determine if a function is valid for a module''' + try: + functions = __salt__['sys.list_functions'](module_name) + except salt.exceptions.SaltException: + functions = ["unable to look up functions"] + return "{0}.{1}".format(module_name, function) in functions + + def __is_valid_test(self, test_dict): + '''Determine if a test contains: + a test name, + a valid module and function, + a valid assertion, + an expected return value''' + tots = 0 # need 6 to pass test + m_and_f = test_dict.get('module_and_function', None) + assertion = test_dict.get('assertion', None) + expected_return = test_dict.get('expected-return', None) + if m_and_f: + tots += 1 + module, function = m_and_f.split('.') + if self.__is_valid_module(module): + tots += 1 + if self.__is_valid_function(module, function): + tots += 1 + if assertion: + tots += 1 + if assertion in self.assertions_list: + tots += 1 + if expected_return: + tots += 1 + return tots >= 6 + + def call_salt_command(self, + fun, + args=None, + kwargs=None): + '''Generic call of salt Caller command''' + value = False + try: + if args and kwargs: + value = self.salt_lc.function(fun, *args, **kwargs) + elif args and not kwargs: + value = self.salt_lc.function(fun, *args) + elif not args and kwargs: + value = self.salt_lc.function(fun, **kwargs) + else: + value = self.salt_lc.function(fun) + except salt.exceptions.SaltException: + raise + except Exception: + raise + return value + + def call_salt_command_test(self, + fun + ): + '''Generic call of salt Caller command''' + value = False + try: + value = self.salt_lc.function(fun) + except salt.exceptions.SaltException: + raise + return value + + def run_test(self, test_dict): + '''Run a single salt_check test''' + if self.__is_valid_test(test_dict): + mod_and_func = test_dict['module_and_function'] + args = test_dict.get('args', None) + assertion = test_dict['assertion'] + expected_return = test_dict['expected-return'] + kwargs = test_dict.get('kwargs', None) + actual_return = self.call_salt_command(mod_and_func, args, kwargs) + # checking for membership in a list does not require a type cast + if assertion != "assertIn": + expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) + # return actual_return + if assertion == "assertEqual": + value = self.__assert_equal(expected_return, actual_return) + elif assertion == "assertNotEqual": + value = self.__assert_not_equal(expected_return, actual_return) + elif assertion == "assertTrue": + value = self.__assert_true(expected_return) + elif assertion == "assertFalse": + value = self.__assert_false(expected_return) + elif assertion == "assertIn": + value = self.__assert_in(expected_return, actual_return) + elif assertion == "assertNotIn": + value = self.__assert_not_in(expected_return, actual_return) + elif assertion == "assertGreater": + value = self.__assert_greater(expected_return, actual_return) + elif assertion == "assertGreaterEqual": + value = self.__assert_greater_equal(expected_return, actual_return) + elif assertion == "assertLess": + value = self.__assert_less(expected_return, actual_return) + elif assertion == "assertLessEqual": + value = self.__assert_less_equal(expected_return, actual_return) + else: + value = False + else: + return False + return value + + @staticmethod + def cast_expected_to_returned_type(expected, returned): + ''' + Determine the type of variable returned + Cast the expected to the type of variable returned + ''' + ret_type = type(returned) + new_expected = expected + if expected == "False" and ret_type == bool: + expected = False + try: + new_expected = ret_type(expected) + except ValueError: + log.info("Unable to cast expected into type of returned") + log.info("returned = {}".format(returned)) + log.info("type of returned = {}".format(type(returned))) + log.info("expected = {}".format(expected)) + log.info("type of expected = {}".format(type(expected))) + return new_expected + + @staticmethod + def __assert_equal(expected, returned): + ''' + Test if two objects are equal + ''' + result = True + + try: + assert (expected == returned), "{0} is not equal to {1}".format(expected, returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_not_equal(expected, returned): + ''' + Test if two objects are not equal + ''' + result = (True) + try: + assert (expected != returned), "{0} is equal to {1}".format(expected, returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_true(returned): + ''' + Test if an boolean is True + ''' + result = (True) + try: + assert (returned is True), "{0} not True".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_false(returned): + ''' + Test if an boolean is False + ''' + result = (True) + if isinstance(returned, str): + try: + returned = bool(returned) + except ValueError: + raise + try: + assert (returned is False), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_in(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected in returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_not_in(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected not in returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_greater(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected > returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_greater_equal(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected >= returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_less(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected < returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_less_equal(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected <= returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __show_minion_options(): + '''gather and return minion config options''' + cachedir = __opts__['cachedir'] + root_dir = __opts__['root_dir'] + states_dirs = __opts__['states_dirs'] + environment = __opts__['environment'] + file_roots = __opts__['file_roots'] + return {'cachedir': cachedir, + 'root_dir': root_dir, + 'states_dirs': states_dirs, + 'environment': environment, + 'file_roots': file_roots} + + @staticmethod + def get_state_search_path_list(): + '''For the state file system, return a + list of paths to search for states''' + # state cache should be updated before running this method + search_list = [] + cachedir = __opts__.get('cachedir', None) + environment = __opts__['environment'] + if environment: + path = cachedir + os.sep + "files" + os.sep + environment + search_list.append(path) + path = cachedir + os.sep + "files" + os.sep + "base" + search_list.append(path) + return search_list + + def __get_state_dir(self): + ''''return the path of the state dir''' + paths = self.get_state_search_path_list() + return paths + + +class StateTestLoader(object): + ''' + Class loads in test files for a state + e.g. state_dir/salt-check-tests/[1.tst, 2.tst, 3.tst] + ''' + + def __init__(self, search_paths): + self.search_paths = search_paths + self.path_type = None + self.test_files = [] # list of file paths + self.test_dict = {} + + def load_test_suite(self): + '''load tests either from one file, or a set of files''' + for myfile in self.test_files: + self.load_file(myfile) + + def load_file(self, filepath): + ''' + loads in one test file + ''' + try: + with salt.utils.fopen(filepath, 'r') as myfile: + # myfile = open(filepath, 'r') + contents_yaml = yaml.load(myfile) + for key, value in contents_yaml.items(): + self.test_dict[key] = value + except: + raise + return + + def gather_files(self, filepath): + '''gather files for a test suite''' + log.info("gather_files: {}".format(time.time())) + filepath = filepath + os.sep + 'salt-check-tests' + rootdir = filepath + for dirname, filelist in os.walk(rootdir): + for fname in filelist: + if fname.endswith('.tst'): + start_path = dirname + os.sep + fname + full_path = os.path.abspath(start_path) + self.test_files.append(full_path) + return + + @staticmethod + def convert_sls_to_paths(sls_list): + '''Converting sls to paths''' + new_sls_list = [] + for sls in sls_list: + sls = sls.replace(".", os.sep) + new_sls_list.append(sls) + return new_sls_list + + def add_test_files_for_sls(self, sls_path): + '''Adding test files''' + # state_path = None + for path in self.search_paths: + full_path = path + os.sep + sls_path + rootdir = full_path + if os.path.isdir(full_path): + log.info("searching path= {}".format(full_path)) + for dirname, subdirlist in os.walk(rootdir, topdown=True): + if "salt-check-tests" in subdirlist: + self.gather_files(dirname) + log.info("test_files list: {}".format(self.test_files)) + log.info("found subdir match in = {}".format(dirname)) + else: + log.info("did not find subdir match in = {}".format(dirname)) + del subdirlist[:] + else: + log.info("path is not a directory= {}".format(full_path)) + return diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py new file mode 100644 index 0000000000..cabbae8244 --- /dev/null +++ b/tests/unit/modules/test_saltcheck.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import, print_function + +# Import Salt Testing libs +from tests.support.unit import skipIf, TestCase +from salt.exceptions import CommandExecutionError + +# Import salt libs +import salt.modules.saltcheck as saltcheck + + +class SaltCheckTestCase(TestCase): + ''' SaltCheckTestCase''' + + def test_ping(self): + #self.assertTrue(True) + self.assertTrue(saltcheck.ping) + + def test_update_master_cache(self): + self.assertTrue(saltcheck.update_master_cache) + + #def test_sc_update_master_cache(self): + # sc = saltcheck.SaltCheck() + # self.assertTrue(sc.update_master_cache) + + def test_get_top_sls(self): + self.assertTrue(saltcheck.get_top_sls) + + def test_sc_add_nums(self): + sc = saltcheck.SaltCheck() + val = sc.add_nums(10, 1) + self.assertEqual(val, 11) + + From 82aa2d0971e1cb87ce18ced6c608c7030a68abb0 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 21 Jul 2017 12:11:27 -0500 Subject: [PATCH 028/639] added lots of unit tests --- salt/modules/saltcheck.py | 139 +++++++++++++------- tests/unit/modules/test_saltcheck.py | 184 +++++++++++++++++++++++++-- 2 files changed, 262 insertions(+), 61 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index a4122cbf7d..bce5cd5262 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -21,7 +21,7 @@ __virtualname__ = 'saltcheck' def __virtual__(): ''' - Check dependencies + Check dependencies - may be useful in future ''' return __virtualname__ @@ -78,6 +78,25 @@ def run_state_tests(state): return {state: results_dict} +def run_highstate_tests(): + ''' + Returns the output of running all salt checks of states that would apply for a highstate + CLI Example:: + salt '*' salt_check.run_highstate_tests + ''' + scheck = SaltCheck() + states = scheck.get_top_states() + all_states = {} + for sta in states: + log.info("State Name = {}".format(sta)) + result_dict = run_state_tests(sta) + log.info("result_dict = {}".format(result_dict)) + key = result_dict.keys()[0] + val = result_dict.values()[0] + all_states[key] = val + return all_states + + class SaltCheck(object): ''' This class implements the saltcheck @@ -95,27 +114,49 @@ class SaltCheck(object): assertGreaterEqual assertLess assertLessEqual'''.split() # self.modules = self.populate_salt_modules_list() - # call when needed self.populate_salt_modules_list() - self.salt_lc = salt.client.Caller(mopts=__opts__) + # self.salt_lc = salt.client.Caller(mopts=__opts__) + self.salt_lc = salt.client.Caller() + + # @staticmethod + # def update_master_cache(): + # '''Easy way to update the master files on the minion''' + # # currently unused, but might be useful later + # __salt__['cp.cache_master'](args=None, kwargs=None) + # return + + # def get_top_sls(self): + # ''' equivalent to a salt cli: salt web state.show_lowstate''' + # # sls_list = [] + # try: + # returned = __salt__['state.show_lowstate']() + # for i in returned: + # if i['__sls__'] not in self.sls_list_top: + # self.sls_list_top.append(i['__sls__']) + # except Exception: + # raise + # # self.sls_list = sls_list + # return self.sls_list_top @staticmethod - def update_master_cache(): - '''Easy way to update the master files on the minion''' - __salt__['cp.cache_master'](args=None, kwargs=None) - return - - def get_top_sls(self): - ''' equivalent to a salt cli: salt web state.show_lowstate''' - # sls_list = [] + def get_top_states(): + ''' equivalent to a salt cli: salt web state.show_top''' try: - returned = __salt__['state.show_lowstate']() - for i in returned: - if i['__sls__'] not in self.sls_list_top: - self.sls_list_top.append(i['__sls__']) + returned = __salt__['state.show_top']() + # returned = self.call_salt_command(fun='state.show_top', + # args=None, + # kwargs=None) + # doing this to handle states with periods + # e.g. apache.vhost_web1 + alt_states = [] + for state in returned['base']: + state_bits = state.split(".") + state_name = state_bits[0] + if state_name not in alt_states: + alt_states.append(state_name) except Exception: raise - # self.sls_list = sls_list - return self.sls_list_top + log.info("top states: {}".format(alt_states)) + return alt_states def get_state_sls(self, state): ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' @@ -195,16 +236,16 @@ class SaltCheck(object): raise return value - def call_salt_command_test(self, - fun - ): - '''Generic call of salt Caller command''' - value = False - try: - value = self.salt_lc.function(fun) - except salt.exceptions.SaltException: - raise - return value + # def call_salt_command_test(self, + # fun + # ): + # '''Generic call of salt Caller command''' + # value = False + # try: + # value = self.salt_lc.function(fun) + # except salt.exceptions.SaltException: + # raise + # return value def run_test(self, test_dict): '''Run a single salt_check test''' @@ -334,7 +375,7 @@ class SaltCheck(object): @staticmethod def __assert_not_in(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is not in the list of returned values ''' result = (True) try: @@ -346,7 +387,7 @@ class SaltCheck(object): @staticmethod def __assert_greater(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is greater than the returned value ''' result = (True) try: @@ -358,7 +399,7 @@ class SaltCheck(object): @staticmethod def __assert_greater_equal(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is greater than or equal to the returned value ''' result = (True) try: @@ -370,7 +411,7 @@ class SaltCheck(object): @staticmethod def __assert_less(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is less than the returned value ''' result = (True) try: @@ -382,7 +423,7 @@ class SaltCheck(object): @staticmethod def __assert_less_equal(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is less than or equal to the returned value ''' result = (True) try: @@ -391,19 +432,19 @@ class SaltCheck(object): result = "False: " + str(err) return result - @staticmethod - def __show_minion_options(): - '''gather and return minion config options''' - cachedir = __opts__['cachedir'] - root_dir = __opts__['root_dir'] - states_dirs = __opts__['states_dirs'] - environment = __opts__['environment'] - file_roots = __opts__['file_roots'] - return {'cachedir': cachedir, - 'root_dir': root_dir, - 'states_dirs': states_dirs, - 'environment': environment, - 'file_roots': file_roots} + # @staticmethod + # def __show_minion_options(): + # '''gather and return minion config options''' + # cachedir = __opts__['cachedir'] + # root_dir = __opts__['root_dir'] + # states_dirs = __opts__['states_dirs'] + # environment = __opts__['environment'] + # file_roots = __opts__['file_roots'] + # return {'cachedir': cachedir, + # 'root_dir': root_dir, + # 'states_dirs': states_dirs, + # 'environment': environment, + # 'file_roots': file_roots} @staticmethod def get_state_search_path_list(): @@ -420,10 +461,10 @@ class SaltCheck(object): search_list.append(path) return search_list - def __get_state_dir(self): - ''''return the path of the state dir''' - paths = self.get_state_search_path_list() - return paths + # def __get_state_dir(self): + # ''''return the path of the state dir''' + # paths = self.get_state_search_path_list() + # return paths class StateTestLoader(object): diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index cabbae8244..e43a40f654 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -14,23 +14,183 @@ import salt.modules.saltcheck as saltcheck class SaltCheckTestCase(TestCase): ''' SaltCheckTestCase''' - def test_ping(self): - #self.assertTrue(True) - self.assertTrue(saltcheck.ping) - def test_update_master_cache(self): self.assertTrue(saltcheck.update_master_cache) - #def test_sc_update_master_cache(self): + def test_call_salt_command(self): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertEqual(returned, 'hello') + + #def test__is_valid_test(self): + # my_dict = {'module_and_function': 'test.echo', + # 'assertion': 'assertEqual', + # 'expected-return': 'True'} # sc = saltcheck.SaltCheck() - # self.assertTrue(sc.update_master_cache) + # mybool = sc.__is_valid_test(my_dict) + # self.assertTrue(mybool) + + #def test_is_valid_module(self): + # sc = saltcheck.SaltCheck() + # returned = sc.is_valid_module('test') + # self.assertTrue(returned) - def test_get_top_sls(self): - self.assertTrue(saltcheck.get_top_sls) + def test__assert_equal1(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_equal(a, b) + self.assertTrue(mybool) - def test_sc_add_nums(self): - sc = saltcheck.SaltCheck() - val = sc.add_nums(10, 1) - self.assertEqual(val, 11) + def test__assert_equal2(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) + def test__assert_not_equal1(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertTrue(mybool) + def test__assert_not_equal2(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_true1(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(True, True) + self.assertTrue(mybool) + + def test__assert_true2(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) + + def test__assert_false1(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(False) + self.assertTrue(mybool) + + def test__assert_false2(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(True) + self.assertNotEqual(mybool, True) + + def test__assert_in1(self): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertTrue(mybool, True) + + def test__assert_in2(self): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertNotEqual(mybool, True) + + def test__assert_not_in1(self): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertTrue(mybool, True) + + def test__assert_not_in2(self): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertNotEqual(mybool, True) + + def test__assert_greater1(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertTrue(mybool, True) + + def test__assert_greater2(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater_equal_equal1(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertTrue(mybool, True) + + def test__assert_greater_equal2(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater_equal3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertEqual(mybool, True) + + def test__assert_less1(self): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertTrue(mybool, True) + + def test__assert_less2(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less_equal1(self): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertTrue(mybool, True) + + def test__assert_less_equal2(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less_equal3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertEqual(mybool, True) From 629d818bb6c27b862c4339a29128017631428875 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 21 Jul 2017 16:00:49 -0500 Subject: [PATCH 029/639] new check for updating master cache based on salt config.get --- salt/modules/saltcheck.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index bce5cd5262..ec935ccde1 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -34,7 +34,7 @@ def update_master_cache(): CLI Example: salt '*' salt_check.update_master_cache ''' - __salt__['cp.cache_master'](args=None, kwargs=None) + __salt__['cp.cache_master']() return True @@ -103,26 +103,23 @@ class SaltCheck(object): ''' def __init__(self): - self.sls_list_top = [] + # self.sls_list_top = [] self.sls_list_state = [] self.modules = [] self.results_dict = {} self.results_dict_summary = {} + self.auto_update_master_cache = __salt__['config.get']('auto_update_master_cache', False) self.assertions_list = '''assertEqual assertNotEqual assertTrue assertFalse assertIn assertGreater assertGreaterEqual assertLess assertLessEqual'''.split() - # self.modules = self.populate_salt_modules_list() + self.populate_salt_modules_list() + # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - - # @staticmethod - # def update_master_cache(): - # '''Easy way to update the master files on the minion''' - # # currently unused, but might be useful later - # __salt__['cp.cache_master'](args=None, kwargs=None) - # return + if self.auto_update_master_cache: + update_master_cache() # def get_top_sls(self): # ''' equivalent to a salt cli: salt web state.show_lowstate''' @@ -489,8 +486,7 @@ class StateTestLoader(object): loads in one test file ''' try: - with salt.utils.fopen(filepath, 'r') as myfile: - # myfile = open(filepath, 'r') + with salt.utils.files.fopen(filepath, 'r') as myfile: contents_yaml = yaml.load(myfile) for key, value in contents_yaml.items(): self.test_dict[key] = value @@ -503,7 +499,8 @@ class StateTestLoader(object): log.info("gather_files: {}".format(time.time())) filepath = filepath + os.sep + 'salt-check-tests' rootdir = filepath - for dirname, filelist in os.walk(rootdir): + # for dirname, subdirlist, filelist in os.walk(rootdir): + for dirname, dummy, filelist in os.walk(rootdir): for fname in filelist: if fname.endswith('.tst'): start_path = dirname + os.sep + fname @@ -528,7 +525,8 @@ class StateTestLoader(object): rootdir = full_path if os.path.isdir(full_path): log.info("searching path= {}".format(full_path)) - for dirname, subdirlist in os.walk(rootdir, topdown=True): + # for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True): + for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True): if "salt-check-tests" in subdirlist: self.gather_files(dirname) log.info("test_files list: {}".format(self.test_files)) From 1834dbcbd548e8bb45330e79f3efa014108de1f2 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Tue, 25 Jul 2017 14:11:25 -0700 Subject: [PATCH 030/639] major updates - refactored to make __salt__ work in module with unit tests, squashed deep nesting bug too --- salt/modules/saltcheck.py | 218 +++++++++++++-------------- tests/unit/modules/test_saltcheck.py | 53 ++++--- 2 files changed, 137 insertions(+), 134 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index ec935ccde1..141d9cd8a2 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -57,7 +57,7 @@ def run_test(**kwargs): return "test must be dictionary" -def run_state_tests(state): +def run_state_tests_old(state): ''' Returns the output of running all salt check test for a state CLI Example:: @@ -66,7 +66,7 @@ def run_state_tests(state): scheck = SaltCheck() paths = scheck.get_state_search_path_list() stl = StateTestLoader(search_paths=paths) - sls_list = scheck.get_state_sls(state) + sls_list = _get_state_sls(state) sls_paths = stl.convert_sls_to_paths(sls_list) for mypath in sls_paths: stl.add_test_files_for_sls(mypath) @@ -78,23 +78,96 @@ def run_state_tests(state): return {state: results_dict} +def run_state_tests(state): + ''' + Returns the output of running all salt check test for a state + CLI Example:: + salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 + ''' + scheck = SaltCheck() + paths = scheck.get_state_search_path_list() + stl = StateTestLoader(search_paths=paths) + results = {} + sls_list = _get_state_sls(state) + for state_name in sls_list: + mypath = stl.convert_sls_to_path(state_name) + stl.add_test_files_for_sls(mypath) + stl.load_test_suite() + results_dict = {} + for key, value in stl.test_dict.items(): + result = scheck.run_test(value) + results_dict[key] = result + results[state_name] = results_dict + return results + + def run_highstate_tests(): ''' Returns the output of running all salt checks of states that would apply for a highstate CLI Example:: salt '*' salt_check.run_highstate_tests ''' - scheck = SaltCheck() - states = scheck.get_top_states() + states = _get_top_states() all_states = {} for sta in states: log.info("State Name = {}".format(sta)) - result_dict = run_state_tests(sta) - log.info("result_dict = {}".format(result_dict)) - key = result_dict.keys()[0] - val = result_dict.values()[0] - all_states[key] = val - return all_states + all_states.update(run_state_tests(sta)) + # result_dict = run_state_tests(sta) + # log.info("result_dict = {}".format(result_dict)) + # key = result_dict.keys()[0] + # val = result_dict.values()[0] + # all_states[key] = val + return {'highstate_test_result': all_states} + + +def _is_valid_module(module): + '''return a list of all modules available on minion''' + modules = __salt__['sys.list_modules']() + return bool(module in modules) + + +def _get_auto_update_cache_value(): + '''return the config value of auto_update_master_cache''' + __salt__['config.get']('auto_update_master_cache') + return True + + +def _is_valid_function(module_name, function): + '''Determine if a function is valid for a module''' + try: + functions = __salt__['sys.list_functions'](module_name) + except salt.exceptions.SaltException: + functions = ["unable to look up functions"] + return "{0}.{1}".format(module_name, function) in functions + + +def _get_top_states(): + ''' equivalent to a salt cli: salt web state.show_top''' + try: + returned = __salt__['state.show_top']() + alt_states = [] + for state in returned['base']: + state_bits = state.split(".") + state_name = state_bits[0] + if state_name not in alt_states: + alt_states.append(state_name) + except Exception: + raise + log.info("top states: {}".format(alt_states)) + return alt_states + + +def _get_state_sls(state): + ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' + sls_list_state = [] + try: + returned = __salt__['state.show_low_sls'](state) + for i in returned: + if i['__sls__'] not in sls_list_state: + sls_list_state.append(i['__sls__']) + except Exception: + raise + return sls_list_state class SaltCheck(object): @@ -108,84 +181,17 @@ class SaltCheck(object): self.modules = [] self.results_dict = {} self.results_dict_summary = {} - self.auto_update_master_cache = __salt__['config.get']('auto_update_master_cache', False) self.assertions_list = '''assertEqual assertNotEqual assertTrue assertFalse assertIn assertGreater assertGreaterEqual assertLess assertLessEqual'''.split() - self.populate_salt_modules_list() + self.auto_update_master_cache = _get_auto_update_cache_value # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - if self.auto_update_master_cache: - update_master_cache() - - # def get_top_sls(self): - # ''' equivalent to a salt cli: salt web state.show_lowstate''' - # # sls_list = [] - # try: - # returned = __salt__['state.show_lowstate']() - # for i in returned: - # if i['__sls__'] not in self.sls_list_top: - # self.sls_list_top.append(i['__sls__']) - # except Exception: - # raise - # # self.sls_list = sls_list - # return self.sls_list_top - - @staticmethod - def get_top_states(): - ''' equivalent to a salt cli: salt web state.show_top''' - try: - returned = __salt__['state.show_top']() - # returned = self.call_salt_command(fun='state.show_top', - # args=None, - # kwargs=None) - # doing this to handle states with periods - # e.g. apache.vhost_web1 - alt_states = [] - for state in returned['base']: - state_bits = state.split(".") - state_name = state_bits[0] - if state_name not in alt_states: - alt_states.append(state_name) - except Exception: - raise - log.info("top states: {}".format(alt_states)) - return alt_states - - def get_state_sls(self, state): - ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' - try: - returned = __salt__['state.show_low_sls'](state) - for i in returned: - if i['__sls__'] not in self.sls_list_state: - self.sls_list_state.append(i['__sls__']) - except Exception: - raise - return self.sls_list_state - - def populate_salt_modules_list(self): - '''return a list of all modules available on minion''' - self.modules = __salt__['sys.list_modules']() - return - - def __is_valid_module(self, module_name): - '''Determines if a module is valid on a minion''' - if module_name not in self.modules: - return False - else: - return True - - @staticmethod - def __is_valid_function(module_name, function): - '''Determine if a function is valid for a module''' - try: - functions = __salt__['sys.list_functions'](module_name) - except salt.exceptions.SaltException: - functions = ["unable to look up functions"] - return "{0}.{1}".format(module_name, function) in functions + # if self.auto_update_master_cache: + # update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: @@ -193,16 +199,16 @@ class SaltCheck(object): a valid module and function, a valid assertion, an expected return value''' - tots = 0 # need 6 to pass test + tots = 0 # need total of >= 6 to pass test m_and_f = test_dict.get('module_and_function', None) assertion = test_dict.get('assertion', None) expected_return = test_dict.get('expected-return', None) if m_and_f: tots += 1 module, function = m_and_f.split('.') - if self.__is_valid_module(module): + if _is_valid_module(module): tots += 1 - if self.__is_valid_function(module, function): + if _is_valid_function(module, function): tots += 1 if assertion: tots += 1 @@ -233,17 +239,6 @@ class SaltCheck(object): raise return value - # def call_salt_command_test(self, - # fun - # ): - # '''Generic call of salt Caller command''' - # value = False - # try: - # value = self.salt_lc.function(fun) - # except salt.exceptions.SaltException: - # raise - # return value - def run_test(self, test_dict): '''Run a single salt_check test''' if self.__is_valid_test(test_dict): @@ -256,7 +251,6 @@ class SaltCheck(object): # checking for membership in a list does not require a type cast if assertion != "assertIn": expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) - # return actual_return if assertion == "assertEqual": value = self.__assert_equal(expected_return, actual_return) elif assertion == "assertNotEqual": @@ -429,20 +423,6 @@ class SaltCheck(object): result = "False: " + str(err) return result - # @staticmethod - # def __show_minion_options(): - # '''gather and return minion config options''' - # cachedir = __opts__['cachedir'] - # root_dir = __opts__['root_dir'] - # states_dirs = __opts__['states_dirs'] - # environment = __opts__['environment'] - # file_roots = __opts__['file_roots'] - # return {'cachedir': cachedir, - # 'root_dir': root_dir, - # 'states_dirs': states_dirs, - # 'environment': environment, - # 'file_roots': file_roots} - @staticmethod def get_state_search_path_list(): '''For the state file system, return a @@ -458,16 +438,11 @@ class SaltCheck(object): search_list.append(path) return search_list - # def __get_state_dir(self): - # ''''return the path of the state dir''' - # paths = self.get_state_search_path_list() - # return paths - class StateTestLoader(object): ''' Class loads in test files for a state - e.g. state_dir/salt-check-tests/[1.tst, 2.tst, 3.tst] + e.g. state_dir/saltcheck-tests/[1.tst, 2.tst, 3.tst] ''' def __init__(self, search_paths): @@ -478,8 +453,10 @@ class StateTestLoader(object): def load_test_suite(self): '''load tests either from one file, or a set of files''' + self.test_dict = {} for myfile in self.test_files: self.load_file(myfile) + self.test_files = [] def load_file(self, filepath): ''' @@ -496,8 +473,9 @@ class StateTestLoader(object): def gather_files(self, filepath): '''gather files for a test suite''' + self.test_files = [] log.info("gather_files: {}".format(time.time())) - filepath = filepath + os.sep + 'salt-check-tests' + filepath = filepath + os.sep + 'saltcheck-tests' rootdir = filepath # for dirname, subdirlist, filelist in os.walk(rootdir): for dirname, dummy, filelist in os.walk(rootdir): @@ -517,6 +495,12 @@ class StateTestLoader(object): new_sls_list.append(sls) return new_sls_list + @staticmethod + def convert_sls_to_path(sls): + '''Converting sls to paths''' + sls = sls.replace(".", os.sep) + return sls + def add_test_files_for_sls(self, sls_path): '''Adding test files''' # state_path = None @@ -527,7 +511,7 @@ class StateTestLoader(object): log.info("searching path= {}".format(full_path)) # for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True): for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True): - if "salt-check-tests" in subdirlist: + if "saltcheck-tests" in subdirlist: self.gather_files(dirname) log.info("test_files list: {}".format(self.test_files)) log.info("found subdir match in = {}".format(dirname)) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index e43a40f654..586b2dc2ea 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -4,12 +4,15 @@ from __future__ import absolute_import, print_function # Import Salt Testing libs +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase -from salt.exceptions import CommandExecutionError +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs +from salt.exceptions import CommandExecutionError import salt.modules.saltcheck as saltcheck +saltcheck.__salt__ = {} class SaltCheckTestCase(TestCase): ''' SaltCheckTestCase''' @@ -17,23 +20,24 @@ class SaltCheckTestCase(TestCase): def test_update_master_cache(self): self.assertTrue(saltcheck.update_master_cache) - def test_call_salt_command(self): - sc = saltcheck.SaltCheck() - returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) - self.assertEqual(returned, 'hello') - #def test__is_valid_test(self): - # my_dict = {'module_and_function': 'test.echo', - # 'assertion': 'assertEqual', - # 'expected-return': 'True'} - # sc = saltcheck.SaltCheck() - # mybool = sc.__is_valid_test(my_dict) - # self.assertTrue(mybool) - - #def test_is_valid_module(self): - # sc = saltcheck.SaltCheck() - # returned = sc.is_valid_module('test') - # self.assertTrue(returned) + def test_call_salt_command(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['module1']) + }): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertEqual(returned, 'hello') + + def test_call_salt_command2(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['module1']) + }): + #with patch.dict(saltcheck.__salt__, {'sys.list_modules': MagicMock(return_value=['module1'])}): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertNotEqual(returned, 'not-hello') + def test__assert_equal1(self): sc = saltcheck.SaltCheck() @@ -194,3 +198,18 @@ class SaltCheckTestCase(TestCase): b = 100 mybool = sc._SaltCheck__assert_less_equal(a, b) self.assertEqual(mybool, True) + + + #################################################### + #def test__is_valid_test(self): + # my_dict = {'module_and_function': 'test.echo', + # 'assertion': 'assertEqual', + # 'expected-return': 'True'} + # sc = saltcheck.SaltCheck() + # mybool = sc.__is_valid_test(my_dict) + # self.assertTrue(mybool) + + #def test_is_valid_module(self): + # sc = saltcheck.SaltCheck() + # returned = sc.is_valid_module('test') + # self.assertTrue(returned) From a5bfa36711d25bc5c83f9ff56f71e04f4ccd3872 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Tue, 25 Jul 2017 16:33:37 -0700 Subject: [PATCH 031/639] nice fixes --- salt/modules/saltcheck.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 141d9cd8a2..cf259c7b04 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -98,6 +98,21 @@ def run_state_tests(state): result = scheck.run_test(value) results_dict[key] = result results[state_name] = results_dict + for state in results: + passed = 0 + failed = 0 + if len(results[state].items()) == 0: + results[state]['state test results'] = {'pass': passed, 'fail': failed} + else: + for dummy, val in results[state].items(): + if val: + passed = passed + 1 + elif val.upper().startswith('False'): + failed = failed + 1 + else: + failed = 0 + passed = 0 + results[state]['state test results'] = {'pass': passed, 'fail': failed} return results @@ -112,12 +127,13 @@ def run_highstate_tests(): for sta in states: log.info("State Name = {}".format(sta)) all_states.update(run_state_tests(sta)) - # result_dict = run_state_tests(sta) - # log.info("result_dict = {}".format(result_dict)) - # key = result_dict.keys()[0] - # val = result_dict.values()[0] - # all_states[key] = val - return {'highstate_test_result': all_states} + passed = 0 + failed = 0 + for state in all_states: + passed = all_states[state]['state test results']['pass'] + passed + failed = all_states[state]['state test results']['fail'] + failed + all_states['Total Pass/Fail:'] = {'pass': passed, 'fail': failed} + return all_states def _is_valid_module(module): From 7377c5a8284cad6a5a76218301b1bd93a35ffbf3 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Wed, 26 Jul 2017 15:00:15 -0700 Subject: [PATCH 032/639] added auto check of minion config option --- salt/modules/saltcheck.py | 8 +- tests/unit/modules/test_saltcheck.py | 335 ++++++++++++++++----------- 2 files changed, 211 insertions(+), 132 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index cf259c7b04..b233b5ecc7 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -7,6 +7,7 @@ import logging import os import time import yaml +import collections try: import salt.utils import salt.client @@ -123,7 +124,8 @@ def run_highstate_tests(): salt '*' salt_check.run_highstate_tests ''' states = _get_top_states() - all_states = {} + #all_states = {} + all_states = collections.OrderedDict() for sta in states: log.info("State Name = {}".format(sta)) all_states.update(run_state_tests(sta)) @@ -206,8 +208,8 @@ class SaltCheck(object): # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - # if self.auto_update_master_cache: - # update_master_cache() + if self.auto_update_master_cache: + update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 586b2dc2ea..875c0084f8 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -23,7 +23,8 @@ class SaltCheckTestCase(TestCase): def test_call_salt_command(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), - 'sys.list_modules': MagicMock(return_value=['module1']) + 'sys.list_modules': MagicMock(return_value=['module1']), + 'cp.cache_master': MagicMock(return_value=[True]) }): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) @@ -31,185 +32,261 @@ class SaltCheckTestCase(TestCase): def test_call_salt_command2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), - 'sys.list_modules': MagicMock(return_value=['module1']) + 'sys.list_modules': MagicMock(return_value=['module1']), + 'cp.cache_master': MagicMock(return_value=[True]) }): - #with patch.dict(saltcheck.__salt__, {'sys.list_modules': MagicMock(return_value=['module1'])}): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertNotEqual(returned, 'not-hello') def test__assert_equal1(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_equal(a, b) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_equal(a, b) + self.assertTrue(mybool) def test__assert_equal2(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_equal(False, True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) def test__assert_not_equal1(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_not_equal(a, b) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertTrue(mybool) def test__assert_not_equal2(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_not_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_true1(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(True, True) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(True, True) + self.assertTrue(mybool) def test__assert_true2(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(False, True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) def test__assert_false1(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(False) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(False) + self.assertTrue(mybool) def test__assert_false2(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(True) + self.assertNotEqual(mybool, True) def test__assert_in1(self): - sc = saltcheck.SaltCheck() - a = "bob" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertTrue(mybool, True) def test__assert_in2(self): - sc = saltcheck.SaltCheck() - a = "elaine" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertNotEqual(mybool, True) def test__assert_not_in1(self): - sc = saltcheck.SaltCheck() - a = "elaine" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertTrue(mybool, True) def test__assert_not_in2(self): - sc = saltcheck.SaltCheck() - a = "bob" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertNotEqual(mybool, True) def test__assert_greater1(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertTrue(mybool, True) def test__assert_greater2(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater_equal_equal1(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertTrue(mybool, True) def test__assert_greater_equal2(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater_equal3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertEqual(mybool, True) def test__assert_less1(self): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertTrue(mybool, True) def test__assert_less2(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) def test__assert_less3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) def test__assert_less_equal1(self): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertTrue(mybool, True) def test__assert_less_equal2(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_less_equal3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertEqual(mybool, True) - + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertEqual(mybool, True) + + def test_run_test_1(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['test']), + 'sys.list_functions': MagicMock(return_value=['test.echo']), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + returned = saltcheck.run_test(test={"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", "args":["This works!"] }) + self.assertEqual(returned, True) + + # pillar injection not supported yet + #def test_run_test_2(self): + # with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + # 'sys.list_modules': MagicMock(return_value=['pillar']), + # 'sys.list_functions': MagicMock(return_value=['pillar.get']) + # }): + # returned = saltcheck.run_test(test={"module_and_function": "pillar.get", "pillar-data": "mykey:myvalue", "assertion": "assertEqual", "expected-return": "myvalue", "args":["mykey"] }) + # self.assertEqual(returned, True) - #################################################### - #def test__is_valid_test(self): - # my_dict = {'module_and_function': 'test.echo', - # 'assertion': 'assertEqual', - # 'expected-return': 'True'} - # sc = saltcheck.SaltCheck() - # mybool = sc.__is_valid_test(my_dict) - # self.assertTrue(mybool) - - #def test_is_valid_module(self): - # sc = saltcheck.SaltCheck() - # returned = sc.is_valid_module('test') - # self.assertTrue(returned) From 4b502b3c3b7b581144b1cca5607bafb50f5dfc55 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 20 Jul 2017 18:53:45 -0500 Subject: [PATCH 033/639] pylint fixes, initial unit test --- salt/modules/saltcheck.py | 500 +++++++++++++++++++++++++++ tests/unit/modules/test_saltcheck.py | 36 ++ 2 files changed, 536 insertions(+) create mode 100644 salt/modules/saltcheck.py create mode 100644 tests/unit/modules/test_saltcheck.py diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py new file mode 100644 index 0000000000..a4122cbf7d --- /dev/null +++ b/salt/modules/saltcheck.py @@ -0,0 +1,500 @@ +# -*- coding: utf-8 -*- +''' +This module should be saved as salt/modules/saltcheck.py +''' +from __future__ import absolute_import +import logging +import os +import time +import yaml +try: + import salt.utils + import salt.client + import salt.exceptions +except ImportError: + pass + +log = logging.getLogger(__name__) + +__virtualname__ = 'saltcheck' + + +def __virtual__(): + ''' + Check dependencies + ''' + return __virtualname__ + + +def update_master_cache(): + ''' + Updates the master cache onto the minion - to transfer all salt-check-tests + Should be done one time before running tests, and if tests are updated + + CLI Example: + salt '*' salt_check.update_master_cache + ''' + __salt__['cp.cache_master'](args=None, kwargs=None) + return True + + +def run_test(**kwargs): + ''' + Enables running one salt_check test via cli + CLI Example:: + salt '*' salt_check.run_test + test='{"module_and_function": "test.echo", + "assertion": "assertEqual", + "expected-return": "This works!", + "args":["This works!"] }' + ''' + # salt converts the string to a dictionary auto-magically + scheck = SaltCheck() + test = kwargs.get('test', None) + if test and isinstance(test, dict): + return scheck.run_test(test) + else: + return "test must be dictionary" + + +def run_state_tests(state): + ''' + Returns the output of running all salt check test for a state + CLI Example:: + salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 + ''' + scheck = SaltCheck() + paths = scheck.get_state_search_path_list() + stl = StateTestLoader(search_paths=paths) + sls_list = scheck.get_state_sls(state) + sls_paths = stl.convert_sls_to_paths(sls_list) + for mypath in sls_paths: + stl.add_test_files_for_sls(mypath) + stl.load_test_suite() + results_dict = {} + for key, value in stl.test_dict.items(): + result = scheck.run_test(value) + results_dict[key] = result + return {state: results_dict} + + +class SaltCheck(object): + ''' + This class implements the saltcheck + ''' + + def __init__(self): + self.sls_list_top = [] + self.sls_list_state = [] + self.modules = [] + self.results_dict = {} + self.results_dict_summary = {} + self.assertions_list = '''assertEqual assertNotEqual + assertTrue assertFalse + assertIn assertGreater + assertGreaterEqual + assertLess assertLessEqual'''.split() + # self.modules = self.populate_salt_modules_list() + # call when needed self.populate_salt_modules_list() + self.salt_lc = salt.client.Caller(mopts=__opts__) + + @staticmethod + def update_master_cache(): + '''Easy way to update the master files on the minion''' + __salt__['cp.cache_master'](args=None, kwargs=None) + return + + def get_top_sls(self): + ''' equivalent to a salt cli: salt web state.show_lowstate''' + # sls_list = [] + try: + returned = __salt__['state.show_lowstate']() + for i in returned: + if i['__sls__'] not in self.sls_list_top: + self.sls_list_top.append(i['__sls__']) + except Exception: + raise + # self.sls_list = sls_list + return self.sls_list_top + + def get_state_sls(self, state): + ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' + try: + returned = __salt__['state.show_low_sls'](state) + for i in returned: + if i['__sls__'] not in self.sls_list_state: + self.sls_list_state.append(i['__sls__']) + except Exception: + raise + return self.sls_list_state + + def populate_salt_modules_list(self): + '''return a list of all modules available on minion''' + self.modules = __salt__['sys.list_modules']() + return + + def __is_valid_module(self, module_name): + '''Determines if a module is valid on a minion''' + if module_name not in self.modules: + return False + else: + return True + + @staticmethod + def __is_valid_function(module_name, function): + '''Determine if a function is valid for a module''' + try: + functions = __salt__['sys.list_functions'](module_name) + except salt.exceptions.SaltException: + functions = ["unable to look up functions"] + return "{0}.{1}".format(module_name, function) in functions + + def __is_valid_test(self, test_dict): + '''Determine if a test contains: + a test name, + a valid module and function, + a valid assertion, + an expected return value''' + tots = 0 # need 6 to pass test + m_and_f = test_dict.get('module_and_function', None) + assertion = test_dict.get('assertion', None) + expected_return = test_dict.get('expected-return', None) + if m_and_f: + tots += 1 + module, function = m_and_f.split('.') + if self.__is_valid_module(module): + tots += 1 + if self.__is_valid_function(module, function): + tots += 1 + if assertion: + tots += 1 + if assertion in self.assertions_list: + tots += 1 + if expected_return: + tots += 1 + return tots >= 6 + + def call_salt_command(self, + fun, + args=None, + kwargs=None): + '''Generic call of salt Caller command''' + value = False + try: + if args and kwargs: + value = self.salt_lc.function(fun, *args, **kwargs) + elif args and not kwargs: + value = self.salt_lc.function(fun, *args) + elif not args and kwargs: + value = self.salt_lc.function(fun, **kwargs) + else: + value = self.salt_lc.function(fun) + except salt.exceptions.SaltException: + raise + except Exception: + raise + return value + + def call_salt_command_test(self, + fun + ): + '''Generic call of salt Caller command''' + value = False + try: + value = self.salt_lc.function(fun) + except salt.exceptions.SaltException: + raise + return value + + def run_test(self, test_dict): + '''Run a single salt_check test''' + if self.__is_valid_test(test_dict): + mod_and_func = test_dict['module_and_function'] + args = test_dict.get('args', None) + assertion = test_dict['assertion'] + expected_return = test_dict['expected-return'] + kwargs = test_dict.get('kwargs', None) + actual_return = self.call_salt_command(mod_and_func, args, kwargs) + # checking for membership in a list does not require a type cast + if assertion != "assertIn": + expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) + # return actual_return + if assertion == "assertEqual": + value = self.__assert_equal(expected_return, actual_return) + elif assertion == "assertNotEqual": + value = self.__assert_not_equal(expected_return, actual_return) + elif assertion == "assertTrue": + value = self.__assert_true(expected_return) + elif assertion == "assertFalse": + value = self.__assert_false(expected_return) + elif assertion == "assertIn": + value = self.__assert_in(expected_return, actual_return) + elif assertion == "assertNotIn": + value = self.__assert_not_in(expected_return, actual_return) + elif assertion == "assertGreater": + value = self.__assert_greater(expected_return, actual_return) + elif assertion == "assertGreaterEqual": + value = self.__assert_greater_equal(expected_return, actual_return) + elif assertion == "assertLess": + value = self.__assert_less(expected_return, actual_return) + elif assertion == "assertLessEqual": + value = self.__assert_less_equal(expected_return, actual_return) + else: + value = False + else: + return False + return value + + @staticmethod + def cast_expected_to_returned_type(expected, returned): + ''' + Determine the type of variable returned + Cast the expected to the type of variable returned + ''' + ret_type = type(returned) + new_expected = expected + if expected == "False" and ret_type == bool: + expected = False + try: + new_expected = ret_type(expected) + except ValueError: + log.info("Unable to cast expected into type of returned") + log.info("returned = {}".format(returned)) + log.info("type of returned = {}".format(type(returned))) + log.info("expected = {}".format(expected)) + log.info("type of expected = {}".format(type(expected))) + return new_expected + + @staticmethod + def __assert_equal(expected, returned): + ''' + Test if two objects are equal + ''' + result = True + + try: + assert (expected == returned), "{0} is not equal to {1}".format(expected, returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_not_equal(expected, returned): + ''' + Test if two objects are not equal + ''' + result = (True) + try: + assert (expected != returned), "{0} is equal to {1}".format(expected, returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_true(returned): + ''' + Test if an boolean is True + ''' + result = (True) + try: + assert (returned is True), "{0} not True".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_false(returned): + ''' + Test if an boolean is False + ''' + result = (True) + if isinstance(returned, str): + try: + returned = bool(returned) + except ValueError: + raise + try: + assert (returned is False), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_in(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected in returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_not_in(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected not in returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_greater(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected > returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_greater_equal(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected >= returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_less(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected < returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __assert_less_equal(expected, returned): + ''' + Test if a value is in the list of returned values + ''' + result = (True) + try: + assert (expected <= returned), "{0} not False".format(returned) + except AssertionError as err: + result = "False: " + str(err) + return result + + @staticmethod + def __show_minion_options(): + '''gather and return minion config options''' + cachedir = __opts__['cachedir'] + root_dir = __opts__['root_dir'] + states_dirs = __opts__['states_dirs'] + environment = __opts__['environment'] + file_roots = __opts__['file_roots'] + return {'cachedir': cachedir, + 'root_dir': root_dir, + 'states_dirs': states_dirs, + 'environment': environment, + 'file_roots': file_roots} + + @staticmethod + def get_state_search_path_list(): + '''For the state file system, return a + list of paths to search for states''' + # state cache should be updated before running this method + search_list = [] + cachedir = __opts__.get('cachedir', None) + environment = __opts__['environment'] + if environment: + path = cachedir + os.sep + "files" + os.sep + environment + search_list.append(path) + path = cachedir + os.sep + "files" + os.sep + "base" + search_list.append(path) + return search_list + + def __get_state_dir(self): + ''''return the path of the state dir''' + paths = self.get_state_search_path_list() + return paths + + +class StateTestLoader(object): + ''' + Class loads in test files for a state + e.g. state_dir/salt-check-tests/[1.tst, 2.tst, 3.tst] + ''' + + def __init__(self, search_paths): + self.search_paths = search_paths + self.path_type = None + self.test_files = [] # list of file paths + self.test_dict = {} + + def load_test_suite(self): + '''load tests either from one file, or a set of files''' + for myfile in self.test_files: + self.load_file(myfile) + + def load_file(self, filepath): + ''' + loads in one test file + ''' + try: + with salt.utils.fopen(filepath, 'r') as myfile: + # myfile = open(filepath, 'r') + contents_yaml = yaml.load(myfile) + for key, value in contents_yaml.items(): + self.test_dict[key] = value + except: + raise + return + + def gather_files(self, filepath): + '''gather files for a test suite''' + log.info("gather_files: {}".format(time.time())) + filepath = filepath + os.sep + 'salt-check-tests' + rootdir = filepath + for dirname, filelist in os.walk(rootdir): + for fname in filelist: + if fname.endswith('.tst'): + start_path = dirname + os.sep + fname + full_path = os.path.abspath(start_path) + self.test_files.append(full_path) + return + + @staticmethod + def convert_sls_to_paths(sls_list): + '''Converting sls to paths''' + new_sls_list = [] + for sls in sls_list: + sls = sls.replace(".", os.sep) + new_sls_list.append(sls) + return new_sls_list + + def add_test_files_for_sls(self, sls_path): + '''Adding test files''' + # state_path = None + for path in self.search_paths: + full_path = path + os.sep + sls_path + rootdir = full_path + if os.path.isdir(full_path): + log.info("searching path= {}".format(full_path)) + for dirname, subdirlist in os.walk(rootdir, topdown=True): + if "salt-check-tests" in subdirlist: + self.gather_files(dirname) + log.info("test_files list: {}".format(self.test_files)) + log.info("found subdir match in = {}".format(dirname)) + else: + log.info("did not find subdir match in = {}".format(dirname)) + del subdirlist[:] + else: + log.info("path is not a directory= {}".format(full_path)) + return diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py new file mode 100644 index 0000000000..cabbae8244 --- /dev/null +++ b/tests/unit/modules/test_saltcheck.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import, print_function + +# Import Salt Testing libs +from tests.support.unit import skipIf, TestCase +from salt.exceptions import CommandExecutionError + +# Import salt libs +import salt.modules.saltcheck as saltcheck + + +class SaltCheckTestCase(TestCase): + ''' SaltCheckTestCase''' + + def test_ping(self): + #self.assertTrue(True) + self.assertTrue(saltcheck.ping) + + def test_update_master_cache(self): + self.assertTrue(saltcheck.update_master_cache) + + #def test_sc_update_master_cache(self): + # sc = saltcheck.SaltCheck() + # self.assertTrue(sc.update_master_cache) + + def test_get_top_sls(self): + self.assertTrue(saltcheck.get_top_sls) + + def test_sc_add_nums(self): + sc = saltcheck.SaltCheck() + val = sc.add_nums(10, 1) + self.assertEqual(val, 11) + + From 5d088a6359714c5509069e14f7eec09018a823d4 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 21 Jul 2017 12:11:27 -0500 Subject: [PATCH 034/639] added lots of unit tests --- salt/modules/saltcheck.py | 139 +++++++++++++------- tests/unit/modules/test_saltcheck.py | 184 +++++++++++++++++++++++++-- 2 files changed, 262 insertions(+), 61 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index a4122cbf7d..bce5cd5262 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -21,7 +21,7 @@ __virtualname__ = 'saltcheck' def __virtual__(): ''' - Check dependencies + Check dependencies - may be useful in future ''' return __virtualname__ @@ -78,6 +78,25 @@ def run_state_tests(state): return {state: results_dict} +def run_highstate_tests(): + ''' + Returns the output of running all salt checks of states that would apply for a highstate + CLI Example:: + salt '*' salt_check.run_highstate_tests + ''' + scheck = SaltCheck() + states = scheck.get_top_states() + all_states = {} + for sta in states: + log.info("State Name = {}".format(sta)) + result_dict = run_state_tests(sta) + log.info("result_dict = {}".format(result_dict)) + key = result_dict.keys()[0] + val = result_dict.values()[0] + all_states[key] = val + return all_states + + class SaltCheck(object): ''' This class implements the saltcheck @@ -95,27 +114,49 @@ class SaltCheck(object): assertGreaterEqual assertLess assertLessEqual'''.split() # self.modules = self.populate_salt_modules_list() - # call when needed self.populate_salt_modules_list() - self.salt_lc = salt.client.Caller(mopts=__opts__) + # self.salt_lc = salt.client.Caller(mopts=__opts__) + self.salt_lc = salt.client.Caller() + + # @staticmethod + # def update_master_cache(): + # '''Easy way to update the master files on the minion''' + # # currently unused, but might be useful later + # __salt__['cp.cache_master'](args=None, kwargs=None) + # return + + # def get_top_sls(self): + # ''' equivalent to a salt cli: salt web state.show_lowstate''' + # # sls_list = [] + # try: + # returned = __salt__['state.show_lowstate']() + # for i in returned: + # if i['__sls__'] not in self.sls_list_top: + # self.sls_list_top.append(i['__sls__']) + # except Exception: + # raise + # # self.sls_list = sls_list + # return self.sls_list_top @staticmethod - def update_master_cache(): - '''Easy way to update the master files on the minion''' - __salt__['cp.cache_master'](args=None, kwargs=None) - return - - def get_top_sls(self): - ''' equivalent to a salt cli: salt web state.show_lowstate''' - # sls_list = [] + def get_top_states(): + ''' equivalent to a salt cli: salt web state.show_top''' try: - returned = __salt__['state.show_lowstate']() - for i in returned: - if i['__sls__'] not in self.sls_list_top: - self.sls_list_top.append(i['__sls__']) + returned = __salt__['state.show_top']() + # returned = self.call_salt_command(fun='state.show_top', + # args=None, + # kwargs=None) + # doing this to handle states with periods + # e.g. apache.vhost_web1 + alt_states = [] + for state in returned['base']: + state_bits = state.split(".") + state_name = state_bits[0] + if state_name not in alt_states: + alt_states.append(state_name) except Exception: raise - # self.sls_list = sls_list - return self.sls_list_top + log.info("top states: {}".format(alt_states)) + return alt_states def get_state_sls(self, state): ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' @@ -195,16 +236,16 @@ class SaltCheck(object): raise return value - def call_salt_command_test(self, - fun - ): - '''Generic call of salt Caller command''' - value = False - try: - value = self.salt_lc.function(fun) - except salt.exceptions.SaltException: - raise - return value + # def call_salt_command_test(self, + # fun + # ): + # '''Generic call of salt Caller command''' + # value = False + # try: + # value = self.salt_lc.function(fun) + # except salt.exceptions.SaltException: + # raise + # return value def run_test(self, test_dict): '''Run a single salt_check test''' @@ -334,7 +375,7 @@ class SaltCheck(object): @staticmethod def __assert_not_in(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is not in the list of returned values ''' result = (True) try: @@ -346,7 +387,7 @@ class SaltCheck(object): @staticmethod def __assert_greater(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is greater than the returned value ''' result = (True) try: @@ -358,7 +399,7 @@ class SaltCheck(object): @staticmethod def __assert_greater_equal(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is greater than or equal to the returned value ''' result = (True) try: @@ -370,7 +411,7 @@ class SaltCheck(object): @staticmethod def __assert_less(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is less than the returned value ''' result = (True) try: @@ -382,7 +423,7 @@ class SaltCheck(object): @staticmethod def __assert_less_equal(expected, returned): ''' - Test if a value is in the list of returned values + Test if a value is less than or equal to the returned value ''' result = (True) try: @@ -391,19 +432,19 @@ class SaltCheck(object): result = "False: " + str(err) return result - @staticmethod - def __show_minion_options(): - '''gather and return minion config options''' - cachedir = __opts__['cachedir'] - root_dir = __opts__['root_dir'] - states_dirs = __opts__['states_dirs'] - environment = __opts__['environment'] - file_roots = __opts__['file_roots'] - return {'cachedir': cachedir, - 'root_dir': root_dir, - 'states_dirs': states_dirs, - 'environment': environment, - 'file_roots': file_roots} + # @staticmethod + # def __show_minion_options(): + # '''gather and return minion config options''' + # cachedir = __opts__['cachedir'] + # root_dir = __opts__['root_dir'] + # states_dirs = __opts__['states_dirs'] + # environment = __opts__['environment'] + # file_roots = __opts__['file_roots'] + # return {'cachedir': cachedir, + # 'root_dir': root_dir, + # 'states_dirs': states_dirs, + # 'environment': environment, + # 'file_roots': file_roots} @staticmethod def get_state_search_path_list(): @@ -420,10 +461,10 @@ class SaltCheck(object): search_list.append(path) return search_list - def __get_state_dir(self): - ''''return the path of the state dir''' - paths = self.get_state_search_path_list() - return paths + # def __get_state_dir(self): + # ''''return the path of the state dir''' + # paths = self.get_state_search_path_list() + # return paths class StateTestLoader(object): diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index cabbae8244..e43a40f654 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -14,23 +14,183 @@ import salt.modules.saltcheck as saltcheck class SaltCheckTestCase(TestCase): ''' SaltCheckTestCase''' - def test_ping(self): - #self.assertTrue(True) - self.assertTrue(saltcheck.ping) - def test_update_master_cache(self): self.assertTrue(saltcheck.update_master_cache) - #def test_sc_update_master_cache(self): + def test_call_salt_command(self): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertEqual(returned, 'hello') + + #def test__is_valid_test(self): + # my_dict = {'module_and_function': 'test.echo', + # 'assertion': 'assertEqual', + # 'expected-return': 'True'} # sc = saltcheck.SaltCheck() - # self.assertTrue(sc.update_master_cache) + # mybool = sc.__is_valid_test(my_dict) + # self.assertTrue(mybool) + + #def test_is_valid_module(self): + # sc = saltcheck.SaltCheck() + # returned = sc.is_valid_module('test') + # self.assertTrue(returned) - def test_get_top_sls(self): - self.assertTrue(saltcheck.get_top_sls) + def test__assert_equal1(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_equal(a, b) + self.assertTrue(mybool) - def test_sc_add_nums(self): - sc = saltcheck.SaltCheck() - val = sc.add_nums(10, 1) - self.assertEqual(val, 11) + def test__assert_equal2(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) + def test__assert_not_equal1(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertTrue(mybool) + def test__assert_not_equal2(self): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_true1(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(True, True) + self.assertTrue(mybool) + + def test__assert_true2(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) + + def test__assert_false1(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(False) + self.assertTrue(mybool) + + def test__assert_false2(self): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(True) + self.assertNotEqual(mybool, True) + + def test__assert_in1(self): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertTrue(mybool, True) + + def test__assert_in2(self): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertNotEqual(mybool, True) + + def test__assert_not_in1(self): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertTrue(mybool, True) + + def test__assert_not_in2(self): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertNotEqual(mybool, True) + + def test__assert_greater1(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertTrue(mybool, True) + + def test__assert_greater2(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater_equal_equal1(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertTrue(mybool, True) + + def test__assert_greater_equal2(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_greater_equal3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertEqual(mybool, True) + + def test__assert_less1(self): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertTrue(mybool, True) + + def test__assert_less2(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less_equal1(self): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertTrue(mybool, True) + + def test__assert_less_equal2(self): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertNotEqual(mybool, True) + + def test__assert_less_equal3(self): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertEqual(mybool, True) From 87fbc7496cf73712b62f235842933823935276ba Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 21 Jul 2017 16:00:49 -0500 Subject: [PATCH 035/639] new check for updating master cache based on salt config.get --- salt/modules/saltcheck.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index bce5cd5262..ec935ccde1 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -34,7 +34,7 @@ def update_master_cache(): CLI Example: salt '*' salt_check.update_master_cache ''' - __salt__['cp.cache_master'](args=None, kwargs=None) + __salt__['cp.cache_master']() return True @@ -103,26 +103,23 @@ class SaltCheck(object): ''' def __init__(self): - self.sls_list_top = [] + # self.sls_list_top = [] self.sls_list_state = [] self.modules = [] self.results_dict = {} self.results_dict_summary = {} + self.auto_update_master_cache = __salt__['config.get']('auto_update_master_cache', False) self.assertions_list = '''assertEqual assertNotEqual assertTrue assertFalse assertIn assertGreater assertGreaterEqual assertLess assertLessEqual'''.split() - # self.modules = self.populate_salt_modules_list() + self.populate_salt_modules_list() + # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - - # @staticmethod - # def update_master_cache(): - # '''Easy way to update the master files on the minion''' - # # currently unused, but might be useful later - # __salt__['cp.cache_master'](args=None, kwargs=None) - # return + if self.auto_update_master_cache: + update_master_cache() # def get_top_sls(self): # ''' equivalent to a salt cli: salt web state.show_lowstate''' @@ -489,8 +486,7 @@ class StateTestLoader(object): loads in one test file ''' try: - with salt.utils.fopen(filepath, 'r') as myfile: - # myfile = open(filepath, 'r') + with salt.utils.files.fopen(filepath, 'r') as myfile: contents_yaml = yaml.load(myfile) for key, value in contents_yaml.items(): self.test_dict[key] = value @@ -503,7 +499,8 @@ class StateTestLoader(object): log.info("gather_files: {}".format(time.time())) filepath = filepath + os.sep + 'salt-check-tests' rootdir = filepath - for dirname, filelist in os.walk(rootdir): + # for dirname, subdirlist, filelist in os.walk(rootdir): + for dirname, dummy, filelist in os.walk(rootdir): for fname in filelist: if fname.endswith('.tst'): start_path = dirname + os.sep + fname @@ -528,7 +525,8 @@ class StateTestLoader(object): rootdir = full_path if os.path.isdir(full_path): log.info("searching path= {}".format(full_path)) - for dirname, subdirlist in os.walk(rootdir, topdown=True): + # for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True): + for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True): if "salt-check-tests" in subdirlist: self.gather_files(dirname) log.info("test_files list: {}".format(self.test_files)) From 1088e5eddc3418ba19ca7a1ee7a41844d6896c8b Mon Sep 17 00:00:00 2001 From: William Cannon Date: Tue, 25 Jul 2017 14:11:25 -0700 Subject: [PATCH 036/639] major updates - refactored to make __salt__ work in module with unit tests, squashed deep nesting bug too --- salt/modules/saltcheck.py | 218 +++++++++++++-------------- tests/unit/modules/test_saltcheck.py | 53 ++++--- 2 files changed, 137 insertions(+), 134 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index ec935ccde1..141d9cd8a2 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -57,7 +57,7 @@ def run_test(**kwargs): return "test must be dictionary" -def run_state_tests(state): +def run_state_tests_old(state): ''' Returns the output of running all salt check test for a state CLI Example:: @@ -66,7 +66,7 @@ def run_state_tests(state): scheck = SaltCheck() paths = scheck.get_state_search_path_list() stl = StateTestLoader(search_paths=paths) - sls_list = scheck.get_state_sls(state) + sls_list = _get_state_sls(state) sls_paths = stl.convert_sls_to_paths(sls_list) for mypath in sls_paths: stl.add_test_files_for_sls(mypath) @@ -78,23 +78,96 @@ def run_state_tests(state): return {state: results_dict} +def run_state_tests(state): + ''' + Returns the output of running all salt check test for a state + CLI Example:: + salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 + ''' + scheck = SaltCheck() + paths = scheck.get_state_search_path_list() + stl = StateTestLoader(search_paths=paths) + results = {} + sls_list = _get_state_sls(state) + for state_name in sls_list: + mypath = stl.convert_sls_to_path(state_name) + stl.add_test_files_for_sls(mypath) + stl.load_test_suite() + results_dict = {} + for key, value in stl.test_dict.items(): + result = scheck.run_test(value) + results_dict[key] = result + results[state_name] = results_dict + return results + + def run_highstate_tests(): ''' Returns the output of running all salt checks of states that would apply for a highstate CLI Example:: salt '*' salt_check.run_highstate_tests ''' - scheck = SaltCheck() - states = scheck.get_top_states() + states = _get_top_states() all_states = {} for sta in states: log.info("State Name = {}".format(sta)) - result_dict = run_state_tests(sta) - log.info("result_dict = {}".format(result_dict)) - key = result_dict.keys()[0] - val = result_dict.values()[0] - all_states[key] = val - return all_states + all_states.update(run_state_tests(sta)) + # result_dict = run_state_tests(sta) + # log.info("result_dict = {}".format(result_dict)) + # key = result_dict.keys()[0] + # val = result_dict.values()[0] + # all_states[key] = val + return {'highstate_test_result': all_states} + + +def _is_valid_module(module): + '''return a list of all modules available on minion''' + modules = __salt__['sys.list_modules']() + return bool(module in modules) + + +def _get_auto_update_cache_value(): + '''return the config value of auto_update_master_cache''' + __salt__['config.get']('auto_update_master_cache') + return True + + +def _is_valid_function(module_name, function): + '''Determine if a function is valid for a module''' + try: + functions = __salt__['sys.list_functions'](module_name) + except salt.exceptions.SaltException: + functions = ["unable to look up functions"] + return "{0}.{1}".format(module_name, function) in functions + + +def _get_top_states(): + ''' equivalent to a salt cli: salt web state.show_top''' + try: + returned = __salt__['state.show_top']() + alt_states = [] + for state in returned['base']: + state_bits = state.split(".") + state_name = state_bits[0] + if state_name not in alt_states: + alt_states.append(state_name) + except Exception: + raise + log.info("top states: {}".format(alt_states)) + return alt_states + + +def _get_state_sls(state): + ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' + sls_list_state = [] + try: + returned = __salt__['state.show_low_sls'](state) + for i in returned: + if i['__sls__'] not in sls_list_state: + sls_list_state.append(i['__sls__']) + except Exception: + raise + return sls_list_state class SaltCheck(object): @@ -108,84 +181,17 @@ class SaltCheck(object): self.modules = [] self.results_dict = {} self.results_dict_summary = {} - self.auto_update_master_cache = __salt__['config.get']('auto_update_master_cache', False) self.assertions_list = '''assertEqual assertNotEqual assertTrue assertFalse assertIn assertGreater assertGreaterEqual assertLess assertLessEqual'''.split() - self.populate_salt_modules_list() + self.auto_update_master_cache = _get_auto_update_cache_value # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - if self.auto_update_master_cache: - update_master_cache() - - # def get_top_sls(self): - # ''' equivalent to a salt cli: salt web state.show_lowstate''' - # # sls_list = [] - # try: - # returned = __salt__['state.show_lowstate']() - # for i in returned: - # if i['__sls__'] not in self.sls_list_top: - # self.sls_list_top.append(i['__sls__']) - # except Exception: - # raise - # # self.sls_list = sls_list - # return self.sls_list_top - - @staticmethod - def get_top_states(): - ''' equivalent to a salt cli: salt web state.show_top''' - try: - returned = __salt__['state.show_top']() - # returned = self.call_salt_command(fun='state.show_top', - # args=None, - # kwargs=None) - # doing this to handle states with periods - # e.g. apache.vhost_web1 - alt_states = [] - for state in returned['base']: - state_bits = state.split(".") - state_name = state_bits[0] - if state_name not in alt_states: - alt_states.append(state_name) - except Exception: - raise - log.info("top states: {}".format(alt_states)) - return alt_states - - def get_state_sls(self, state): - ''' equivalent to a salt cli: salt web state.show_low_sls STATE''' - try: - returned = __salt__['state.show_low_sls'](state) - for i in returned: - if i['__sls__'] not in self.sls_list_state: - self.sls_list_state.append(i['__sls__']) - except Exception: - raise - return self.sls_list_state - - def populate_salt_modules_list(self): - '''return a list of all modules available on minion''' - self.modules = __salt__['sys.list_modules']() - return - - def __is_valid_module(self, module_name): - '''Determines if a module is valid on a minion''' - if module_name not in self.modules: - return False - else: - return True - - @staticmethod - def __is_valid_function(module_name, function): - '''Determine if a function is valid for a module''' - try: - functions = __salt__['sys.list_functions'](module_name) - except salt.exceptions.SaltException: - functions = ["unable to look up functions"] - return "{0}.{1}".format(module_name, function) in functions + # if self.auto_update_master_cache: + # update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: @@ -193,16 +199,16 @@ class SaltCheck(object): a valid module and function, a valid assertion, an expected return value''' - tots = 0 # need 6 to pass test + tots = 0 # need total of >= 6 to pass test m_and_f = test_dict.get('module_and_function', None) assertion = test_dict.get('assertion', None) expected_return = test_dict.get('expected-return', None) if m_and_f: tots += 1 module, function = m_and_f.split('.') - if self.__is_valid_module(module): + if _is_valid_module(module): tots += 1 - if self.__is_valid_function(module, function): + if _is_valid_function(module, function): tots += 1 if assertion: tots += 1 @@ -233,17 +239,6 @@ class SaltCheck(object): raise return value - # def call_salt_command_test(self, - # fun - # ): - # '''Generic call of salt Caller command''' - # value = False - # try: - # value = self.salt_lc.function(fun) - # except salt.exceptions.SaltException: - # raise - # return value - def run_test(self, test_dict): '''Run a single salt_check test''' if self.__is_valid_test(test_dict): @@ -256,7 +251,6 @@ class SaltCheck(object): # checking for membership in a list does not require a type cast if assertion != "assertIn": expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) - # return actual_return if assertion == "assertEqual": value = self.__assert_equal(expected_return, actual_return) elif assertion == "assertNotEqual": @@ -429,20 +423,6 @@ class SaltCheck(object): result = "False: " + str(err) return result - # @staticmethod - # def __show_minion_options(): - # '''gather and return minion config options''' - # cachedir = __opts__['cachedir'] - # root_dir = __opts__['root_dir'] - # states_dirs = __opts__['states_dirs'] - # environment = __opts__['environment'] - # file_roots = __opts__['file_roots'] - # return {'cachedir': cachedir, - # 'root_dir': root_dir, - # 'states_dirs': states_dirs, - # 'environment': environment, - # 'file_roots': file_roots} - @staticmethod def get_state_search_path_list(): '''For the state file system, return a @@ -458,16 +438,11 @@ class SaltCheck(object): search_list.append(path) return search_list - # def __get_state_dir(self): - # ''''return the path of the state dir''' - # paths = self.get_state_search_path_list() - # return paths - class StateTestLoader(object): ''' Class loads in test files for a state - e.g. state_dir/salt-check-tests/[1.tst, 2.tst, 3.tst] + e.g. state_dir/saltcheck-tests/[1.tst, 2.tst, 3.tst] ''' def __init__(self, search_paths): @@ -478,8 +453,10 @@ class StateTestLoader(object): def load_test_suite(self): '''load tests either from one file, or a set of files''' + self.test_dict = {} for myfile in self.test_files: self.load_file(myfile) + self.test_files = [] def load_file(self, filepath): ''' @@ -496,8 +473,9 @@ class StateTestLoader(object): def gather_files(self, filepath): '''gather files for a test suite''' + self.test_files = [] log.info("gather_files: {}".format(time.time())) - filepath = filepath + os.sep + 'salt-check-tests' + filepath = filepath + os.sep + 'saltcheck-tests' rootdir = filepath # for dirname, subdirlist, filelist in os.walk(rootdir): for dirname, dummy, filelist in os.walk(rootdir): @@ -517,6 +495,12 @@ class StateTestLoader(object): new_sls_list.append(sls) return new_sls_list + @staticmethod + def convert_sls_to_path(sls): + '''Converting sls to paths''' + sls = sls.replace(".", os.sep) + return sls + def add_test_files_for_sls(self, sls_path): '''Adding test files''' # state_path = None @@ -527,7 +511,7 @@ class StateTestLoader(object): log.info("searching path= {}".format(full_path)) # for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True): for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True): - if "salt-check-tests" in subdirlist: + if "saltcheck-tests" in subdirlist: self.gather_files(dirname) log.info("test_files list: {}".format(self.test_files)) log.info("found subdir match in = {}".format(dirname)) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index e43a40f654..586b2dc2ea 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -4,12 +4,15 @@ from __future__ import absolute_import, print_function # Import Salt Testing libs +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase -from salt.exceptions import CommandExecutionError +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs +from salt.exceptions import CommandExecutionError import salt.modules.saltcheck as saltcheck +saltcheck.__salt__ = {} class SaltCheckTestCase(TestCase): ''' SaltCheckTestCase''' @@ -17,23 +20,24 @@ class SaltCheckTestCase(TestCase): def test_update_master_cache(self): self.assertTrue(saltcheck.update_master_cache) - def test_call_salt_command(self): - sc = saltcheck.SaltCheck() - returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) - self.assertEqual(returned, 'hello') - #def test__is_valid_test(self): - # my_dict = {'module_and_function': 'test.echo', - # 'assertion': 'assertEqual', - # 'expected-return': 'True'} - # sc = saltcheck.SaltCheck() - # mybool = sc.__is_valid_test(my_dict) - # self.assertTrue(mybool) - - #def test_is_valid_module(self): - # sc = saltcheck.SaltCheck() - # returned = sc.is_valid_module('test') - # self.assertTrue(returned) + def test_call_salt_command(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['module1']) + }): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertEqual(returned, 'hello') + + def test_call_salt_command2(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['module1']) + }): + #with patch.dict(saltcheck.__salt__, {'sys.list_modules': MagicMock(return_value=['module1'])}): + sc = saltcheck.SaltCheck() + returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + self.assertNotEqual(returned, 'not-hello') + def test__assert_equal1(self): sc = saltcheck.SaltCheck() @@ -194,3 +198,18 @@ class SaltCheckTestCase(TestCase): b = 100 mybool = sc._SaltCheck__assert_less_equal(a, b) self.assertEqual(mybool, True) + + + #################################################### + #def test__is_valid_test(self): + # my_dict = {'module_and_function': 'test.echo', + # 'assertion': 'assertEqual', + # 'expected-return': 'True'} + # sc = saltcheck.SaltCheck() + # mybool = sc.__is_valid_test(my_dict) + # self.assertTrue(mybool) + + #def test_is_valid_module(self): + # sc = saltcheck.SaltCheck() + # returned = sc.is_valid_module('test') + # self.assertTrue(returned) From f7d859093c14cb9185a934048ce20686c0de25f3 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Tue, 25 Jul 2017 16:33:37 -0700 Subject: [PATCH 037/639] nice fixes --- salt/modules/saltcheck.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 141d9cd8a2..cf259c7b04 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -98,6 +98,21 @@ def run_state_tests(state): result = scheck.run_test(value) results_dict[key] = result results[state_name] = results_dict + for state in results: + passed = 0 + failed = 0 + if len(results[state].items()) == 0: + results[state]['state test results'] = {'pass': passed, 'fail': failed} + else: + for dummy, val in results[state].items(): + if val: + passed = passed + 1 + elif val.upper().startswith('False'): + failed = failed + 1 + else: + failed = 0 + passed = 0 + results[state]['state test results'] = {'pass': passed, 'fail': failed} return results @@ -112,12 +127,13 @@ def run_highstate_tests(): for sta in states: log.info("State Name = {}".format(sta)) all_states.update(run_state_tests(sta)) - # result_dict = run_state_tests(sta) - # log.info("result_dict = {}".format(result_dict)) - # key = result_dict.keys()[0] - # val = result_dict.values()[0] - # all_states[key] = val - return {'highstate_test_result': all_states} + passed = 0 + failed = 0 + for state in all_states: + passed = all_states[state]['state test results']['pass'] + passed + failed = all_states[state]['state test results']['fail'] + failed + all_states['Total Pass/Fail:'] = {'pass': passed, 'fail': failed} + return all_states def _is_valid_module(module): From 493d065e21b6f139dd2d275b7c6ae64599a69ffa Mon Sep 17 00:00:00 2001 From: William Cannon Date: Wed, 26 Jul 2017 15:00:15 -0700 Subject: [PATCH 038/639] added auto check of minion config option --- salt/modules/saltcheck.py | 8 +- tests/unit/modules/test_saltcheck.py | 335 ++++++++++++++++----------- 2 files changed, 211 insertions(+), 132 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index cf259c7b04..b233b5ecc7 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -7,6 +7,7 @@ import logging import os import time import yaml +import collections try: import salt.utils import salt.client @@ -123,7 +124,8 @@ def run_highstate_tests(): salt '*' salt_check.run_highstate_tests ''' states = _get_top_states() - all_states = {} + #all_states = {} + all_states = collections.OrderedDict() for sta in states: log.info("State Name = {}".format(sta)) all_states.update(run_state_tests(sta)) @@ -206,8 +208,8 @@ class SaltCheck(object): # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - # if self.auto_update_master_cache: - # update_master_cache() + if self.auto_update_master_cache: + update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 586b2dc2ea..875c0084f8 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -23,7 +23,8 @@ class SaltCheckTestCase(TestCase): def test_call_salt_command(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), - 'sys.list_modules': MagicMock(return_value=['module1']) + 'sys.list_modules': MagicMock(return_value=['module1']), + 'cp.cache_master': MagicMock(return_value=[True]) }): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) @@ -31,185 +32,261 @@ class SaltCheckTestCase(TestCase): def test_call_salt_command2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), - 'sys.list_modules': MagicMock(return_value=['module1']) + 'sys.list_modules': MagicMock(return_value=['module1']), + 'cp.cache_master': MagicMock(return_value=[True]) }): - #with patch.dict(saltcheck.__salt__, {'sys.list_modules': MagicMock(return_value=['module1'])}): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertNotEqual(returned, 'not-hello') def test__assert_equal1(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_equal(a, b) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_equal(a, b) + self.assertTrue(mybool) def test__assert_equal2(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_equal(False, True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) def test__assert_not_equal1(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_not_equal(a, b) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2, 'c': 3} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertTrue(mybool) def test__assert_not_equal2(self): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_not_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = {'a': 1, 'b': 2} + b = {'a': 1, 'b': 2} + mybool = sc._SaltCheck__assert_not_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_true1(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(True, True) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(True, True) + self.assertTrue(mybool) def test__assert_true2(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(False, True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_equal(False, True) + self.assertNotEqual(mybool, True) def test__assert_false1(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(False) - self.assertTrue(mybool) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(False) + self.assertTrue(mybool) def test__assert_false2(self): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(True) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + mybool = sc._SaltCheck__assert_false(True) + self.assertNotEqual(mybool, True) def test__assert_in1(self): - sc = saltcheck.SaltCheck() - a = "bob" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertTrue(mybool, True) def test__assert_in2(self): - sc = saltcheck.SaltCheck() - a = "elaine" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_in(a, mylist) + self.assertNotEqual(mybool, True) def test__assert_not_in1(self): - sc = saltcheck.SaltCheck() - a = "elaine" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "elaine" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertTrue(mybool, True) def test__assert_not_in2(self): - sc = saltcheck.SaltCheck() - a = "bob" - mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = "bob" + mylist = ['alice', 'bob', 'charles', 'dana'] + mybool = sc._SaltCheck__assert_not_in(a, mylist) + self.assertNotEqual(mybool, True) def test__assert_greater1(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertTrue(mybool, True) def test__assert_greater2(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater_equal_equal1(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertTrue(mybool, True) def test__assert_greater_equal2(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 110 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_greater_equal3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_greater_equal(a, b) + self.assertEqual(mybool, True) def test__assert_less1(self): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertTrue(mybool, True) def test__assert_less2(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) def test__assert_less3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less(a, b) + self.assertNotEqual(mybool, True) def test__assert_less_equal1(self): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertTrue(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 99 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertTrue(mybool, True) def test__assert_less_equal2(self): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertNotEqual(mybool, True) + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 110 + b = 99 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertNotEqual(mybool, True) def test__assert_less_equal3(self): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertEqual(mybool, True) - + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + sc = saltcheck.SaltCheck() + a = 100 + b = 100 + mybool = sc._SaltCheck__assert_less_equal(a, b) + self.assertEqual(mybool, True) + + def test_run_test_1(self): + with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + 'sys.list_modules': MagicMock(return_value=['test']), + 'sys.list_functions': MagicMock(return_value=['test.echo']), + 'cp.cache_master': MagicMock(return_value=[True]) + }): + returned = saltcheck.run_test(test={"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", "args":["This works!"] }) + self.assertEqual(returned, True) + + # pillar injection not supported yet + #def test_run_test_2(self): + # with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), + # 'sys.list_modules': MagicMock(return_value=['pillar']), + # 'sys.list_functions': MagicMock(return_value=['pillar.get']) + # }): + # returned = saltcheck.run_test(test={"module_and_function": "pillar.get", "pillar-data": "mykey:myvalue", "assertion": "assertEqual", "expected-return": "myvalue", "args":["mykey"] }) + # self.assertEqual(returned, True) - #################################################### - #def test__is_valid_test(self): - # my_dict = {'module_and_function': 'test.echo', - # 'assertion': 'assertEqual', - # 'expected-return': 'True'} - # sc = saltcheck.SaltCheck() - # mybool = sc.__is_valid_test(my_dict) - # self.assertTrue(mybool) - - #def test_is_valid_module(self): - # sc = saltcheck.SaltCheck() - # returned = sc.is_valid_module('test') - # self.assertTrue(returned) From 5e4f36a6ed114989a64564c09793a4e01b1a3c74 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Wed, 26 Jul 2017 15:16:54 -0700 Subject: [PATCH 039/639] removing use of salt.utils --- salt/modules/saltcheck.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index b233b5ecc7..02656b0a6d 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -7,9 +7,8 @@ import logging import os import time import yaml -import collections try: - import salt.utils + # import salt.utils import salt.client import salt.exceptions except ImportError: @@ -124,8 +123,7 @@ def run_highstate_tests(): salt '*' salt_check.run_highstate_tests ''' states = _get_top_states() - #all_states = {} - all_states = collections.OrderedDict() + all_states = {} for sta in states: log.info("State Name = {}".format(sta)) all_states.update(run_state_tests(sta)) @@ -208,8 +206,8 @@ class SaltCheck(object): # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - if self.auto_update_master_cache: - update_master_cache() + # if self.auto_update_master_cache: + # update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: @@ -481,7 +479,8 @@ class StateTestLoader(object): loads in one test file ''' try: - with salt.utils.files.fopen(filepath, 'r') as myfile: + # with salt.utils.files.fopen(filepath, 'r') as myfile: + with open(filepath, 'r') as myfile: contents_yaml = yaml.load(myfile) for key, value in contents_yaml.items(): self.test_dict[key] = value From 0aa81fd52ea10e4d052002ef6e719045de9038c3 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 09:28:24 -0700 Subject: [PATCH 040/639] fixed auto cache update, fixed highstate tests - gathering correctly --- salt/modules/saltcheck.py | 142 ++++++++++++++++++++++++-------------- 1 file changed, 89 insertions(+), 53 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 02656b0a6d..74f5fff88b 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -32,7 +32,7 @@ def update_master_cache(): Should be done one time before running tests, and if tests are updated CLI Example: - salt '*' salt_check.update_master_cache + salt '*' saltcheck.update_master_cache ''' __salt__['cp.cache_master']() return True @@ -40,9 +40,9 @@ def update_master_cache(): def run_test(**kwargs): ''' - Enables running one salt_check test via cli + Enables running one saltcheck test via cli CLI Example:: - salt '*' salt_check.run_test + salt '*' saltcheck.run_test test='{"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", @@ -57,32 +57,11 @@ def run_test(**kwargs): return "test must be dictionary" -def run_state_tests_old(state): - ''' - Returns the output of running all salt check test for a state - CLI Example:: - salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 - ''' - scheck = SaltCheck() - paths = scheck.get_state_search_path_list() - stl = StateTestLoader(search_paths=paths) - sls_list = _get_state_sls(state) - sls_paths = stl.convert_sls_to_paths(sls_list) - for mypath in sls_paths: - stl.add_test_files_for_sls(mypath) - stl.load_test_suite() - results_dict = {} - for key, value in stl.test_dict.items(): - result = scheck.run_test(value) - results_dict[key] = result - return {state: results_dict} - - def run_state_tests(state): ''' Returns the output of running all salt check test for a state CLI Example:: - salt '*' salt_check.run_state_tests postfix_ubuntu_16_04 + salt '*' saltcheck.run_state_tests postfix_ubuntu_16_04 ''' scheck = SaltCheck() paths = scheck.get_state_search_path_list() @@ -98,11 +77,13 @@ def run_state_tests(state): result = scheck.run_test(value) results_dict[key] = result results[state_name] = results_dict + passed = 0 + failed = 0 + missing_tests = 0 for state in results: - passed = 0 - failed = 0 if len(results[state].items()) == 0: - results[state]['state test results'] = {'pass': passed, 'fail': failed} + # results[state]['test results'] = {'pass': passed, 'fail': failed} + missing_tests = missing_tests + 1 else: for dummy, val in results[state].items(): if val: @@ -110,29 +91,81 @@ def run_state_tests(state): elif val.upper().startswith('False'): failed = failed + 1 else: - failed = 0 - passed = 0 - results[state]['state test results'] = {'pass': passed, 'fail': failed} + # failed = 0 + # passed = 0 + pass + results['state test results'] = {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} return results - def run_highstate_tests(): ''' - Returns the output of running all salt checks of states that would apply for a highstate + Returns the output of running all salt check test for a state CLI Example:: - salt '*' salt_check.run_highstate_tests + salt '*' saltcheck.run_highstate_tests ''' - states = _get_top_states() - all_states = {} - for sta in states: - log.info("State Name = {}".format(sta)) - all_states.update(run_state_tests(sta)) + scheck = SaltCheck() + paths = scheck.get_state_search_path_list() + stl = StateTestLoader(search_paths=paths) + results = {} + sls_list = _get_top_states() + #sls_list = _get_state_sls(state) + for state_name in sls_list: + mypath = stl.convert_sls_to_path(state_name) + stl.add_test_files_for_sls(mypath) + stl.load_test_suite() + results_dict = {} + for key, value in stl.test_dict.items(): + result = scheck.run_test(value) + results_dict[key] = result + results[state_name] = results_dict passed = 0 failed = 0 - for state in all_states: - passed = all_states[state]['state test results']['pass'] + passed - failed = all_states[state]['state test results']['fail'] + failed - all_states['Total Pass/Fail:'] = {'pass': passed, 'fail': failed} + missing_tests = 0 + for state in results: + if len(results[state].items()) == 0: + # results[state]['test results'] = {'pass': passed, 'fail': failed} + missing_tests = missing_tests + 1 + else: + for dummy, val in results[state].items(): + if val: + passed = passed + 1 + elif val.upper().startswith('False'): + failed = failed + 1 + else: + # failed = 0 + # passed = 0 + pass + results['state test results'] = {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} + return results + + +def run_highstate_tests_old(): + ''' + Returns the output of running all salt checks of states that would apply for a highstate + CLI Example:: + salt '*' saltcheck.run_highstate_tests + ''' + # there is a bug here....b/c a state can deeply nest it is easy to get false numbers in terms of tests run + # with results....need to redo this to load in all tests and pass that dict to SaltCheck - similar to run_state_tests + states = _get_top_states() + all_states = {} + passed = 0 + failed = 0 + missing_tests = 0 + for sta in states: + out_dict = None + out_dict = run_state_tests(sta) + log.info("state-test-result: {}".format(out_dict)) + passed = out_dict['state test results']['passed'] + passed + failed = out_dict['state test results']['failed'] + failed + missing_tests = out_dict['state test results']['missing_tests'] + missing_tests + r = None + r = dict(out_dict) + del r['state test results'] + log.info("out_dict_after_removing_state_test_results: {}".format(out_dict)) + log.info("r_dict: {}".format(r)) + all_states.update(r) + all_states.update({'test_results': {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} }) return all_states @@ -159,17 +192,20 @@ def _is_valid_function(module_name, function): def _get_top_states(): ''' equivalent to a salt cli: salt web state.show_top''' + alt_states = [] try: returned = __salt__['state.show_top']() - alt_states = [] - for state in returned['base']: - state_bits = state.split(".") - state_name = state_bits[0] - if state_name not in alt_states: - alt_states.append(state_name) + for i in returned['base']: + alt_states.append(i) + #alt_states = [] + #for state in returned['base']: + # state_bits = state.split(".") + # state_name = state_bits[0] + # if state_name not in alt_states: + # alt_states.append(state_name) except Exception: raise - log.info("top states: {}".format(alt_states)) + #log.info("top states: {}".format(alt_states)) return alt_states @@ -206,8 +242,8 @@ class SaltCheck(object): # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() - # if self.auto_update_master_cache: - # update_master_cache() + if self.auto_update_master_cache: + update_master_cache() def __is_valid_test(self, test_dict): '''Determine if a test contains: @@ -256,7 +292,7 @@ class SaltCheck(object): return value def run_test(self, test_dict): - '''Run a single salt_check test''' + '''Run a single saltcheck test''' if self.__is_valid_test(test_dict): mod_and_func = test_dict['module_and_function'] args = test_dict.get('args', None) From e7a33973641aaf3c42bff3251b45f4eb75a68363 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 10:02:42 -0700 Subject: [PATCH 041/639] casting results to list for easy sorting, and appending overall info with order --- salt/modules/saltcheck.py | 62 +++++++++------------------------------ 1 file changed, 14 insertions(+), 48 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 74f5fff88b..3fe6cc9d44 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -82,7 +82,6 @@ def run_state_tests(state): missing_tests = 0 for state in results: if len(results[state].items()) == 0: - # results[state]['test results'] = {'pass': passed, 'fail': failed} missing_tests = missing_tests + 1 else: for dummy, val in results[state].items(): @@ -91,11 +90,14 @@ def run_state_tests(state): elif val.upper().startswith('False'): failed = failed + 1 else: - # failed = 0 - # passed = 0 pass - results['state test results'] = {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} - return results + out_list = [] + for key, value in results.items(): + out_list.append({key: value}) + out_list.sort() + out_list.append({"TEST RESULTS": {'passed': passed, 'failed': failed, 'missing_tests': missing_tests}}) + return out_list + def run_highstate_tests(): ''' @@ -108,7 +110,6 @@ def run_highstate_tests(): stl = StateTestLoader(search_paths=paths) results = {} sls_list = _get_top_states() - #sls_list = _get_state_sls(state) for state_name in sls_list: mypath = stl.convert_sls_to_path(state_name) stl.add_test_files_for_sls(mypath) @@ -123,7 +124,6 @@ def run_highstate_tests(): missing_tests = 0 for state in results: if len(results[state].items()) == 0: - # results[state]['test results'] = {'pass': passed, 'fail': failed} missing_tests = missing_tests + 1 else: for dummy, val in results[state].items(): @@ -132,41 +132,13 @@ def run_highstate_tests(): elif val.upper().startswith('False'): failed = failed + 1 else: - # failed = 0 - # passed = 0 pass - results['state test results'] = {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} - return results - - -def run_highstate_tests_old(): - ''' - Returns the output of running all salt checks of states that would apply for a highstate - CLI Example:: - salt '*' saltcheck.run_highstate_tests - ''' - # there is a bug here....b/c a state can deeply nest it is easy to get false numbers in terms of tests run - # with results....need to redo this to load in all tests and pass that dict to SaltCheck - similar to run_state_tests - states = _get_top_states() - all_states = {} - passed = 0 - failed = 0 - missing_tests = 0 - for sta in states: - out_dict = None - out_dict = run_state_tests(sta) - log.info("state-test-result: {}".format(out_dict)) - passed = out_dict['state test results']['passed'] + passed - failed = out_dict['state test results']['failed'] + failed - missing_tests = out_dict['state test results']['missing_tests'] + missing_tests - r = None - r = dict(out_dict) - del r['state test results'] - log.info("out_dict_after_removing_state_test_results: {}".format(out_dict)) - log.info("r_dict: {}".format(r)) - all_states.update(r) - all_states.update({'test_results': {'passed': passed, 'failed': failed, 'missing_tests': missing_tests} }) - return all_states + out_list = [] + for key, value in results.items(): + out_list.append({key: value}) + out_list.sort() + out_list.append({"TEST RESULTS": {'passed': passed, 'failed': failed, 'missing_tests': missing_tests}}) + return out_list def _is_valid_module(module): @@ -197,15 +169,9 @@ def _get_top_states(): returned = __salt__['state.show_top']() for i in returned['base']: alt_states.append(i) - #alt_states = [] - #for state in returned['base']: - # state_bits = state.split(".") - # state_name = state_bits[0] - # if state_name not in alt_states: - # alt_states.append(state_name) except Exception: raise - #log.info("top states: {}".format(alt_states)) + # log.info("top states: {}".format(alt_states)) return alt_states From e7785bf758fe754d7bc20956a3df0a41524e2db6 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 14:33:16 -0700 Subject: [PATCH 042/639] changed output of tests from True/False to Pass/Fail, updated unit tests --- salt/modules/saltcheck.py | 67 +++++++++++++--------------- tests/unit/modules/test_saltcheck.py | 6 +-- 2 files changed, 35 insertions(+), 38 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 3fe6cc9d44..148a543ee6 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -85,12 +85,11 @@ def run_state_tests(state): missing_tests = missing_tests + 1 else: for dummy, val in results[state].items(): - if val: + log.info("dummy={}, val={}".format(dummy, val)) + if val.startswith('Pass'): passed = passed + 1 - elif val.upper().startswith('False'): + if val.startswith('Fail'): failed = failed + 1 - else: - pass out_list = [] for key, value in results.items(): out_list.append({key: value}) @@ -127,12 +126,11 @@ def run_highstate_tests(): missing_tests = missing_tests + 1 else: for dummy, val in results[state].items(): - if val: + log.info("dummy={}, val={}".format(dummy, val)) + if val.startswith('Pass'): passed = passed + 1 - elif val.upper().startswith('False'): + if val.startswith('Fail'): failed = failed + 1 - else: - pass out_list = [] for key, value in results.items(): out_list.append({key: value}) @@ -244,13 +242,13 @@ class SaltCheck(object): value = False try: if args and kwargs: - value = self.salt_lc.function(fun, *args, **kwargs) + value = self.salt_lc.cmd(fun, *args, **kwargs) elif args and not kwargs: - value = self.salt_lc.function(fun, *args) + value = self.salt_lc.cmd(fun, *args) elif not args and kwargs: - value = self.salt_lc.function(fun, **kwargs) + value = self.salt_lc.cmd(fun, **kwargs) else: - value = self.salt_lc.function(fun) + value = self.salt_lc.cmd(fun) except salt.exceptions.SaltException: raise except Exception: @@ -266,7 +264,6 @@ class SaltCheck(object): expected_return = test_dict['expected-return'] kwargs = test_dict.get('kwargs', None) actual_return = self.call_salt_command(mod_and_func, args, kwargs) - # checking for membership in a list does not require a type cast if assertion != "assertIn": expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) if assertion == "assertEqual": @@ -290,9 +287,9 @@ class SaltCheck(object): elif assertion == "assertLessEqual": value = self.__assert_less_equal(expected_return, actual_return) else: - value = False + value = "Fail" else: - return False + return "Fail" return value @staticmethod @@ -320,12 +317,12 @@ class SaltCheck(object): ''' Test if two objects are equal ''' - result = True + result = "Pass" try: assert (expected == returned), "{0} is not equal to {1}".format(expected, returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -333,11 +330,11 @@ class SaltCheck(object): ''' Test if two objects are not equal ''' - result = (True) + result = "Pass" try: assert (expected != returned), "{0} is equal to {1}".format(expected, returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -345,11 +342,11 @@ class SaltCheck(object): ''' Test if an boolean is True ''' - result = (True) + result = "Pass" try: assert (returned is True), "{0} not True".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -357,7 +354,7 @@ class SaltCheck(object): ''' Test if an boolean is False ''' - result = (True) + result = "Pass" if isinstance(returned, str): try: returned = bool(returned) @@ -366,7 +363,7 @@ class SaltCheck(object): try: assert (returned is False), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -374,11 +371,11 @@ class SaltCheck(object): ''' Test if a value is in the list of returned values ''' - result = (True) + result = "Pass" try: assert (expected in returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -386,11 +383,11 @@ class SaltCheck(object): ''' Test if a value is not in the list of returned values ''' - result = (True) + result = "Pass" try: assert (expected not in returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -398,11 +395,11 @@ class SaltCheck(object): ''' Test if a value is greater than the returned value ''' - result = (True) + result = "Pass" try: assert (expected > returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -410,11 +407,11 @@ class SaltCheck(object): ''' Test if a value is greater than or equal to the returned value ''' - result = (True) + result = "Pass" try: assert (expected >= returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -422,11 +419,11 @@ class SaltCheck(object): ''' Test if a value is less than the returned value ''' - result = (True) + result = "Pass" try: assert (expected < returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod @@ -434,11 +431,11 @@ class SaltCheck(object): ''' Test if a value is less than or equal to the returned value ''' - result = (True) + result = "Pass" try: assert (expected <= returned), "{0} not False".format(returned) except AssertionError as err: - result = "False: " + str(err) + result = "Fail: " + str(err) return result @staticmethod diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 875c0084f8..6d27b8523b 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -210,7 +210,7 @@ class SaltCheckTestCase(TestCase): a = 100 b = 100 mybool = sc._SaltCheck__assert_greater_equal(a, b) - self.assertEqual(mybool, True) + self.assertEqual(mybool, 'Pass') def test__assert_less1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), @@ -270,7 +270,7 @@ class SaltCheckTestCase(TestCase): a = 100 b = 100 mybool = sc._SaltCheck__assert_less_equal(a, b) - self.assertEqual(mybool, True) + self.assertEqual(mybool, 'Pass') def test_run_test_1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), @@ -279,7 +279,7 @@ class SaltCheckTestCase(TestCase): 'cp.cache_master': MagicMock(return_value=[True]) }): returned = saltcheck.run_test(test={"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", "args":["This works!"] }) - self.assertEqual(returned, True) + self.assertEqual(returned, 'Pass') # pillar injection not supported yet #def test_run_test_2(self): From 7ef3950d0cea9aedd0cd5547c00b46168da98825 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 15:16:36 -0700 Subject: [PATCH 043/639] updated output of tests - state and highstate --- salt/modules/saltcheck.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 148a543ee6..32f067f5d1 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -94,7 +94,7 @@ def run_state_tests(state): for key, value in results.items(): out_list.append({key: value}) out_list.sort() - out_list.append({"TEST RESULTS": {'passed': passed, 'failed': failed, 'missing_tests': missing_tests}}) + out_list.append({"TEST RESULTS": {'Passed': passed, 'Failed': failed, 'Missing Tests': missing_tests}}) return out_list @@ -135,7 +135,7 @@ def run_highstate_tests(): for key, value in results.items(): out_list.append({key: value}) out_list.sort() - out_list.append({"TEST RESULTS": {'passed': passed, 'failed': failed, 'missing_tests': missing_tests}}) + out_list.append({"TEST RESULTS": {'Passed': passed, 'Failed': failed, 'Missing Tests': missing_tests}}) return out_list From e4f7603723a1e54f5c77e38269566b3c6b270468 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 15:43:12 -0700 Subject: [PATCH 044/639] removed comments, updated docstrings - mostly cosmetic --- salt/modules/saltcheck.py | 22 +++++++++++----------- tests/unit/modules/test_saltcheck.py | 10 ---------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 32f067f5d1..8627a79f70 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- ''' This module should be saved as salt/modules/saltcheck.py +This works in master and masterless configurations ''' from __future__ import absolute_import import logging @@ -28,8 +29,9 @@ def __virtual__(): def update_master_cache(): ''' - Updates the master cache onto the minion - to transfer all salt-check-tests + Updates the master cache onto the minion - transfers all salt-check-tests Should be done one time before running tests, and if tests are updated + Can be automated by setting "auto_update_master_cache: True" in minion config CLI Example: salt '*' saltcheck.update_master_cache @@ -40,13 +42,13 @@ def update_master_cache(): def run_test(**kwargs): ''' - Enables running one saltcheck test via cli + Enables running one saltcheck test via command line CLI Example:: salt '*' saltcheck.run_test - test='{"module_and_function": "test.echo", - "assertion": "assertEqual", - "expected-return": "This works!", - "args":["This works!"] }' + test='{"module_and_function": "test.echo", + "assertion": "assertEqual", + "expected-return": "This works!", + "args":["This works!"] }' ''' # salt converts the string to a dictionary auto-magically scheck = SaltCheck() @@ -54,14 +56,14 @@ def run_test(**kwargs): if test and isinstance(test, dict): return scheck.run_test(test) else: - return "test must be dictionary" + return "Test must be a dictionary" def run_state_tests(state): ''' Returns the output of running all salt check test for a state CLI Example:: - salt '*' saltcheck.run_state_tests postfix_ubuntu_16_04 + salt '*' saltcheck.run_state_tests postfix ''' scheck = SaltCheck() paths = scheck.get_state_search_path_list() @@ -203,7 +205,6 @@ class SaltCheck(object): assertGreaterEqual assertLess assertLessEqual'''.split() self.auto_update_master_cache = _get_auto_update_cache_value - # log.info("modules are: {}".format(self.modules)) # self.salt_lc = salt.client.Caller(mopts=__opts__) self.salt_lc = salt.client.Caller() if self.auto_update_master_cache: @@ -215,7 +216,7 @@ class SaltCheck(object): a valid module and function, a valid assertion, an expected return value''' - tots = 0 # need total of >= 6 to pass test + tots = 0 # need total of >= 6 to be a valid test m_and_f = test_dict.get('module_and_function', None) assertion = test_dict.get('assertion', None) expected_return = test_dict.get('expected-return', None) @@ -519,7 +520,6 @@ class StateTestLoader(object): def add_test_files_for_sls(self, sls_path): '''Adding test files''' - # state_path = None for path in self.search_paths: full_path = path + os.sep + sls_path rootdir = full_path diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 6d27b8523b..086b1467b1 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -280,13 +280,3 @@ class SaltCheckTestCase(TestCase): }): returned = saltcheck.run_test(test={"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", "args":["This works!"] }) self.assertEqual(returned, 'Pass') - - # pillar injection not supported yet - #def test_run_test_2(self): - # with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), - # 'sys.list_modules': MagicMock(return_value=['pillar']), - # 'sys.list_functions': MagicMock(return_value=['pillar.get']) - # }): - # returned = saltcheck.run_test(test={"module_and_function": "pillar.get", "pillar-data": "mykey:myvalue", "assertion": "assertEqual", "expected-return": "myvalue", "args":["mykey"] }) - # self.assertEqual(returned, True) - From 9061b197e084c9043937d1ac4db20cf30654ebac Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 16:34:54 -0700 Subject: [PATCH 045/639] updated documentation to fit with saltstack recommendation --- salt/modules/saltcheck.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 8627a79f70..c3bd48643b 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -1,7 +1,8 @@ # -*- coding: utf-8 -*- ''' -This module should be saved as salt/modules/saltcheck.py -This works in master and masterless configurations +A module for testing the logic of states and highstates +:codeauthor: William Cannon +:maturity: new ''' from __future__ import absolute_import import logging @@ -42,7 +43,9 @@ def update_master_cache(): def run_test(**kwargs): ''' - Enables running one saltcheck test via command line + Execute one saltcheck test and return result + + :param keyword arg test: CLI Example:: salt '*' saltcheck.run_test test='{"module_and_function": "test.echo", @@ -61,7 +64,11 @@ def run_test(**kwargs): def run_state_tests(state): ''' - Returns the output of running all salt check test for a state + Execute all tests for a salt state and return results + Nested states will also be tested + + :param str state: the name of a user defined state + CLI Example:: salt '*' saltcheck.run_state_tests postfix ''' @@ -102,7 +109,8 @@ def run_state_tests(state): def run_highstate_tests(): ''' - Returns the output of running all salt check test for a state + Execute all tests for a salt highstate and return results + CLI Example:: salt '*' saltcheck.run_highstate_tests ''' From e88eaadcd0ad991198d824a4a84daf3ee0a75ade Mon Sep 17 00:00:00 2001 From: William Cannon Date: Thu, 27 Jul 2017 16:51:06 -0700 Subject: [PATCH 046/639] adding more documentation --- salt/modules/saltcheck.py | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index c3bd48643b..68abfa18a8 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -1,6 +1,48 @@ # -*- coding: utf-8 -*- ''' A module for testing the logic of states and highstates + +Saltcheck provides unittest like functionality requiring only the knowledge of salt module execution and yaml. + +In order to run state and highstate saltcheck tests a sub-folder of a state must be creaed and named "saltcheck-tests". + +Tests for a state should be created in files ending in *.tst and placed in the saltcheck-tests folder. + +Multiple tests can be created in a file. +Multiple *.tst files can be created in the saltcheck-tests folder. +The "id" of a test works in the same manner as in salt state files. +They should be unique and descriptive. + +Example file system layout: +/srv/salt/apache/ + init.sls + config.sls + saltcheck-tests/ + pkg_and_mods.tst + config.tst + + +Saltcheck Test Syntax: + +Unique-ID: + module_and_function: + args: + kwargs: + assertion: + expected-return: + + +Example test 1: + +echo-test-hello: + module_and_function: test.echo + args: + - "hello" + kwargs: + assertion: assertEqual + expected-return: 'hello' + + :codeauthor: William Cannon :maturity: new ''' From f13a1dfeab691aa1f589648b2c1d0bebdedab5b1 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 28 Jul 2017 10:40:55 -0700 Subject: [PATCH 047/639] fixed passing in of keyword arguments --- salt/modules/saltcheck.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 68abfa18a8..9b5f260b71 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -25,11 +25,11 @@ Example file system layout: Saltcheck Test Syntax: Unique-ID: - module_and_function: + module_and_function: args: kwargs: assertion: - expected-return: + expected-return: Example test 1: @@ -42,7 +42,7 @@ echo-test-hello: assertion: assertEqual expected-return: 'hello' - + :codeauthor: William Cannon :maturity: new ''' @@ -270,6 +270,7 @@ class SaltCheck(object): m_and_f = test_dict.get('module_and_function', None) assertion = test_dict.get('assertion', None) expected_return = test_dict.get('expected-return', None) + log.info("__is_valid_test has test: {}".format(test_dict)) if m_and_f: tots += 1 module, function = m_and_f.split('.') @@ -277,18 +278,22 @@ class SaltCheck(object): tots += 1 if _is_valid_function(module, function): tots += 1 + log.info("__is_valid_test has valid m_and_f") if assertion: tots += 1 if assertion in self.assertions_list: tots += 1 + log.info("__is_valid_test has valid_assertion") if expected_return: tots += 1 + log.info("__is_valid_test has valid_expected_return") + log.info("__is_valid_test score: {}".format(tots)) return tots >= 6 def call_salt_command(self, fun, - args=None, - kwargs=None): + args, + kwargs): '''Generic call of salt Caller command''' value = False try: @@ -311,9 +316,9 @@ class SaltCheck(object): if self.__is_valid_test(test_dict): mod_and_func = test_dict['module_and_function'] args = test_dict.get('args', None) + kwargs = test_dict.get('kwargs', None) assertion = test_dict['assertion'] expected_return = test_dict['expected-return'] - kwargs = test_dict.get('kwargs', None) actual_return = self.call_salt_command(mod_and_func, args, kwargs) if assertion != "assertIn": expected_return = self.cast_expected_to_returned_type(expected_return, actual_return) @@ -338,9 +343,9 @@ class SaltCheck(object): elif assertion == "assertLessEqual": value = self.__assert_less_equal(expected_return, actual_return) else: - value = "Fail" + value = "Fail - bas assertion" else: - return "Fail" + return "Fail - invalid test" return value @staticmethod From 1f9a2de4f9b5ca464e9a4c7ec6500966928ecce6 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 28 Jul 2017 16:10:58 -0700 Subject: [PATCH 048/639] fixed bug in run_highstate_tests - was not handling nested states --- salt/modules/saltcheck.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 9b5f260b71..39cded6699 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -161,6 +161,13 @@ def run_highstate_tests(): stl = StateTestLoader(search_paths=paths) results = {} sls_list = _get_top_states() + all_states = [] + for top_state in sls_list: + sls_list = _get_state_sls(top_state) + for state in sls_list: + if state not in all_states: + all_states.append(state) + for state_name in sls_list: mypath = stl.convert_sls_to_path(state_name) stl.add_test_files_for_sls(mypath) From f0dfb283fdeddb6bfed0c6652a39c45aeb8dc1e2 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Mon, 31 Jul 2017 16:07:27 -0500 Subject: [PATCH 049/639] omission of correct list in iteration --- salt/modules/saltcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 39cded6699..902ffc8757 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -168,7 +168,7 @@ def run_highstate_tests(): if state not in all_states: all_states.append(state) - for state_name in sls_list: + for state_name in all_states: mypath = stl.convert_sls_to_path(state_name) stl.add_test_files_for_sls(mypath) stl.load_test_suite() From 389c037285190e34ea57374a2464f33cdf0ca894 Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Mon, 17 Jul 2017 14:07:36 +1000 Subject: [PATCH 050/639] Check remote tags before deciding to do a fetch #42329 --- salt/states/git.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/states/git.py b/salt/states/git.py index 274662e83b..a1bf29cecc 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1308,6 +1308,19 @@ def latest(name, 'if it does not already exist).', comments ) + if set(all_local_tags) != set([ + x.split('/')[-1] for x in __salt__['git.ls_remote']( + cwd=target, + remote=remote, + opts="--tags", + user=user, + password=password, + identity=identity, + saltenv=__env__, + ignore_retcode=True, + ).keys() + ]): + has_remote_rev = False if not has_remote_rev: try: From 4b1df2f2235d1f2170dd00c15668f6c71f834dd7 Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Fri, 4 Aug 2017 17:26:18 +1000 Subject: [PATCH 051/639] Exclude annotated tags from checks --- salt/states/git.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/salt/states/git.py b/salt/states/git.py index a1bf29cecc..7e0110b40c 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1308,7 +1308,7 @@ def latest(name, 'if it does not already exist).', comments ) - if set(all_local_tags) != set([ + remote_tags = set([ x.split('/')[-1] for x in __salt__['git.ls_remote']( cwd=target, remote=remote, @@ -1318,9 +1318,14 @@ def latest(name, identity=identity, saltenv=__env__, ignore_retcode=True, - ).keys() - ]): + ).keys() if '^{}' not in x + ]) + if set(all_local_tags) != remote_tags: has_remote_rev = False + ret['changes']['tags'] = { + 'old': all_local_tags, + 'new': list(remote_tags) + } if not has_remote_rev: try: From 2fabc060f88929b5b8f17582049fcdcd8e71306e Mon Sep 17 00:00:00 2001 From: Joseph Nix Date: Fri, 4 Aug 2017 17:09:53 -0500 Subject: [PATCH 052/639] [develop] Adding fingerprint_hash_type to known host example. Since 2017.7.0 the existing example would not have worked without this change. --- salt/states/ssh_known_hosts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/states/ssh_known_hosts.py b/salt/states/ssh_known_hosts.py index 0fe24cb813..0b9bc6b74a 100644 --- a/salt/states/ssh_known_hosts.py +++ b/salt/states/ssh_known_hosts.py @@ -12,6 +12,7 @@ Manage the information stored in the known_hosts files. - present - user: root - fingerprint: 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48 + - fingerprint_hash_type: md5 example.com: ssh_known_hosts: From 1488daca0fdfe192168239d2a79f8b7397085762 Mon Sep 17 00:00:00 2001 From: Andy Hibbert Date: Tue, 8 Aug 2017 22:16:58 +0100 Subject: [PATCH 053/639] develop: Allow creation_token to be passed into create_file_system and get_file_system Change-Id: I49734011406ad847290ab3a42beb23f89d2de269 --- salt/modules/boto_efs.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/salt/modules/boto_efs.py b/salt/modules/boto_efs.py index 1ef85d9233..a498fe5e8a 100644 --- a/salt/modules/boto_efs.py +++ b/salt/modules/boto_efs.py @@ -131,6 +131,7 @@ def _get_conn(key=None, def create_file_system(name, performance_mode='generalPurpose', + creation_token=None, keyid=None, key=None, profile=None, @@ -146,6 +147,10 @@ def create_file_system(name, (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO + creation_token + (string) - A unique name to be used as reference when creating an EFS. + This will ensure idempotency. Set to name if not specified otherwise + returns (dict) - A dict of the data for the elastic file system @@ -155,10 +160,11 @@ def create_file_system(name, salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' - import os - import base64 - creation_token = base64.b64encode(os.urandom(46), ['-', '_']) - tags = [{"Key": "Name", "Value": name}] + + if creation_token is None: + creation_token = name + + tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) @@ -355,6 +361,7 @@ def delete_tags(filesystemid, def get_file_systems(filesystemid=None, + creation_token=None, keyid=None, key=None, profile=None, @@ -367,6 +374,12 @@ def get_file_systems(filesystemid=None, filesystemid (string) - ID of the file system to retrieve properties + creation_token + (string) - A unique token that identifies an EFS. + If fileysystem created via create_file_system this would + either be explictitly passed in or set to name. + You can limit your search with this. + returns (list[dict]) - list of all elastic file system properties @@ -380,9 +393,16 @@ def get_file_systems(filesystemid=None, result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) - if filesystemid: + if filesystemid and creation_token: + response = client.describe_file_systems(FileSystemId=filesystemid, + CreationToken=creation_token) + result = response["FileSystems"] + elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] + elif creation_token: + response = client.describe_file_systems(CreationToken=creation_token) + result = response["FileSystems"] else: response = client.describe_file_systems() From 71bbf6e473c233b3c07ee99576161844a14e4489 Mon Sep 17 00:00:00 2001 From: ubaumann Date: Wed, 9 Aug 2017 23:07:45 +0200 Subject: [PATCH 054/639] Add custom pillar value support --- salt/modules/state.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/salt/modules/state.py b/salt/modules/state.py index 7db21849b4..1d4c8e415d 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -1462,6 +1462,17 @@ def show_low_sls(mods, test=None, queue=False, **kwargs): saltenv Specify a salt fileserver environment to be used when applying states + pillar + Custom Pillar values, passed as a dictionary of key-value pairs + + .. code-block:: bash + + salt '*' state.show_low_sls test pillar='{"foo": "bar"}' + + .. note:: + Values passed this way will override Pillar values set via + ``pillar_roots`` or an external Pillar source. + pillarenv Specify a Pillar environment to be used when applying states. This can also be set in the minion config file using the @@ -1496,12 +1507,26 @@ def show_low_sls(mods, test=None, queue=False, **kwargs): # the 'base' saltenv if none is configured and none was passed. if opts['environment'] is None: opts['environment'] = 'base' + + pillar_override = kwargs.get('pillar') + pillar_enc = kwargs.get('pillar_enc') + if pillar_enc is None \ + and pillar_override is not None \ + and not isinstance(pillar_override, dict): + raise SaltInvocationError( + 'Pillar data must be formatted as a dictionary, unless pillar_enc ' + 'is specified.' + ) + try: st_ = salt.state.HighState(opts, + pillar_override, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: - st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts)) + st_ = salt.state.HighState(opts, + pillar_override, + initial_pillar=_get_initial_pillar(opts)) if not _check_pillar(kwargs, st_.opts['pillar']): __context__['retcode'] = 5 From 02a3ad818e556cc08c8f163b798dcdc12d8945fc Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Thu, 10 Aug 2017 13:15:29 +1000 Subject: [PATCH 055/639] Update salt-cloud azurearm to work with latest sdk - allows compatibility with azure-cli --- salt/cloud/clouds/azurearm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index 68d1d91847..8d65b9c9c0 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -79,7 +79,6 @@ HAS_LIBS = False try: import salt.utils.msazure from salt.utils.msazure import object_to_dict - import azure.storage from azure.common.credentials import ( UserPassCredentials, ServicePrincipalCredentials, @@ -115,6 +114,7 @@ try: from azure.mgmt.storage import StorageManagementClient from azure.mgmt.web import WebSiteManagementClient from msrestazure.azure_exceptions import CloudError + from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount HAS_LIBS = True except ImportError: pass @@ -1728,7 +1728,7 @@ def list_containers(call=None, kwargs=None): # pylint: disable=unused-argument if not storconn: storconn = get_conn(StorageManagementClient) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False @@ -1769,7 +1769,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument 'A container must be specified' ) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False @@ -1809,7 +1809,7 @@ def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument 'A blob must be specified' ) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False From 50f1691cd8147666d09a0c216eac473bb37508bf Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Thu, 10 Aug 2017 13:19:45 +1000 Subject: [PATCH 056/639] Update documentation for azure arm dependencies --- doc/topics/cloud/azurearm.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/topics/cloud/azurearm.rst b/doc/topics/cloud/azurearm.rst index 278cdc314e..f77895475a 100644 --- a/doc/topics/cloud/azurearm.rst +++ b/doc/topics/cloud/azurearm.rst @@ -15,9 +15,7 @@ More information about Azure is located at `http://www.windowsazure.com/ Dependencies ============ -* `Microsoft Azure SDK for Python `_ >= 2.0rc6 -* `Microsoft Azure Storage SDK for Python `_ >= 0.32 -* The python-requests library, for Python < 2.7.9. +* Azure Cli ```pip install 'azure-cli>=2.0.12'``` * A Microsoft Azure account * `Salt `_ From 2728e5d9777dd15c6d716d3e2d13707c9ba2d2ec Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Fri, 11 Aug 2017 14:59:57 +1000 Subject: [PATCH 057/639] Only include new tags in changes --- salt/states/git.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/salt/states/git.py b/salt/states/git.py index 7e0110b40c..4808c0dce3 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1322,10 +1322,9 @@ def latest(name, ]) if set(all_local_tags) != remote_tags: has_remote_rev = False - ret['changes']['tags'] = { - 'old': all_local_tags, - 'new': list(remote_tags) - } + ret['changes']['new_tags'] = remote_tags.symmetric_difference( + all_local_tags + ) if not has_remote_rev: try: From dd58225a200290dc5b20b0c2874a420ded257de8 Mon Sep 17 00:00:00 2001 From: davidjoliver86 Date: Fri, 30 Jun 2017 19:00:43 -0700 Subject: [PATCH 058/639] ssh config roster for salt-ssh addresses #35727 --- salt/config/__init__.py | 2 + salt/roster/sshconfig.py | 146 +++++++++++++++++++++++++++ salt/syspaths.py | 18 ++-- tests/unit/test_ssh_config_roster.py | 87 ++++++++++++++++ 4 files changed, 246 insertions(+), 7 deletions(-) create mode 100644 salt/roster/sshconfig.py create mode 100644 tests/unit/test_ssh_config_roster.py diff --git a/salt/config/__init__.py b/salt/config/__init__.py index de0964d7a7..4588d2bbde 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -904,6 +904,7 @@ VALID_OPTS = { 'ssh_scan_timeout': float, 'ssh_identities_only': bool, 'ssh_log_file': str, + 'ssh_config_file': str, # Enable ioflo verbose logging. Warning! Very verbose! 'ioflo_verbose': int, @@ -1601,6 +1602,7 @@ DEFAULT_MASTER_OPTS = { 'ssh_scan_timeout': 0.01, 'ssh_identities_only': False, 'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'), + 'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'), 'master_floscript': os.path.join(FLO_DIR, 'master.flo'), 'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'), 'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'), diff --git a/salt/roster/sshconfig.py b/salt/roster/sshconfig.py new file mode 100644 index 0000000000..29c01ccaf3 --- /dev/null +++ b/salt/roster/sshconfig.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +''' +Parses roster entries out of Host directives from SSH config + +.. code-block:: bash + + salt-ssh --roster sshconfig '*' -r "echo hi" +''' +from __future__ import absolute_import + +# Import python libs +import os +import collections +import fnmatch +import re + +# Import Salt libs +import salt.utils +from salt.ext.six import string_types + +import logging +log = logging.getLogger(__name__) + +_SSHConfRegex = collections.namedtuple('_SSHConfRegex', ['target_field', 'pattern']) +_ROSTER_FIELDS = ( + _SSHConfRegex(target_field='user', pattern=r'\s+User (.*)'), + _SSHConfRegex(target_field='port', pattern=r'\s+Port (.*)'), + _SSHConfRegex(target_field='priv', pattern=r'\s+IdentityFile (.*)'), +) + + +def _get_ssh_config_file(opts): + ''' + :return: Path to the .ssh/config file - usually /.ssh/config + ''' + ssh_config_file = opts.get('ssh_config_file') + if not os.path.isfile(ssh_config_file): + raise IOError('Cannot find SSH config file') + if not os.access(ssh_config_file, os.R_OK): + raise IOError('Cannot access SSH config file: {}'.format(ssh_config_file)) + return ssh_config_file + + +def parse_ssh_config(lines): + ''' + Parses lines from the SSH config to create roster targets. + + :param lines: Individual lines from the ssh config file + :return: Dictionary of targets in similar style to the flat roster + ''' + # transform the list of individual lines into a list of sublists where each + # sublist represents a single Host definition + hosts = [] + for line in lines: + if not line or line.startswith('#'): + continue + elif line.startswith('Host '): + hosts.append([]) + hosts[-1].append(line) + + # construct a dictionary of Host names to mapped roster properties + targets = collections.OrderedDict() + for host_data in hosts: + target = collections.OrderedDict() + hostnames = host_data[0].split()[1:] + for line in host_data[1:]: + for field in _ROSTER_FIELDS: + match = re.match(field.pattern, line) + if match: + target[field.target_field] = match.group(1) + for hostname in hostnames: + targets[hostname] = target + + # apply matching for glob hosts + wildcard_targets = [] + non_wildcard_targets = [] + for target in targets.keys(): + if '*' in target or '?' in target: + wildcard_targets.append(target) + else: + non_wildcard_targets.append(target) + for pattern in wildcard_targets: + for candidate in non_wildcard_targets: + if fnmatch.fnmatch(candidate, pattern): + targets[candidate].update(targets[pattern]) + del targets[pattern] + + # finally, update the 'host' to refer to its declaration in the SSH config + # so that its connection parameters can be utilized + for target in targets: + targets[target]['host'] = target + return targets + + +def targets(tgt, tgt_type='glob', **kwargs): + ''' + Return the targets from the flat yaml file, checks opts for location but + defaults to /etc/salt/roster + ''' + ssh_config_file = _get_ssh_config_file(__opts__) + with salt.utils.fopen(ssh_config_file, 'r') as fp: + all_minions = parse_ssh_config([line.rstrip() for line in fp]) + rmatcher = RosterMatcher(all_minions, tgt, tgt_type) + matched = rmatcher.targets() + return matched + + +class RosterMatcher(object): + ''' + Matcher for the roster data structure + ''' + def __init__(self, raw, tgt, tgt_type): + self.tgt = tgt + self.tgt_type = tgt_type + self.raw = raw + + def targets(self): + ''' + Execute the correct tgt_type routine and return + ''' + try: + return getattr(self, 'ret_{0}_minions'.format(self.tgt_type))() + except AttributeError: + return {} + + def ret_glob_minions(self): + ''' + Return minions that match via glob + ''' + minions = {} + for minion in self.raw: + if fnmatch.fnmatch(minion, self.tgt): + data = self.get_data(minion) + if data: + minions[minion] = data + return minions + + def get_data(self, minion): + ''' + Return the configured ip + ''' + if isinstance(self.raw[minion], string_types): + return {'host': self.raw[minion]} + if isinstance(self.raw[minion], dict): + return self.raw[minion] + return False diff --git a/salt/syspaths.py b/salt/syspaths.py index 95efc7246b..4d81aa92f7 100644 --- a/salt/syspaths.py +++ b/salt/syspaths.py @@ -34,13 +34,13 @@ try: import salt._syspaths as __generated_syspaths # pylint: disable=no-name-in-module except ImportError: import types - __generated_syspaths = types.ModuleType('salt._syspaths') # future lint: disable=non-unicode-string - for key in (u'ROOT_DIR', u'CONFIG_DIR', u'CACHE_DIR', u'SOCK_DIR', - u'SRV_ROOT_DIR', u'BASE_FILE_ROOTS_DIR', - u'BASE_PILLAR_ROOTS_DIR', u'BASE_THORIUM_ROOTS_DIR', - u'BASE_MASTER_ROOTS_DIR', u'LOGS_DIR', u'PIDFILE_DIR', - u'SPM_FORMULA_PATH', u'SPM_PILLAR_PATH', u'SPM_REACTOR_PATH', - u'SHARE_DIR'): + __generated_syspaths = types.ModuleType('salt._syspaths') + for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR', + 'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'HOME_DIR', + 'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR', + 'BASE_MASTER_ROOTS_DIR', 'LOGS_DIR', 'PIDFILE_DIR', + 'SPM_FORMULA_PATH', 'SPM_PILLAR_PATH', 'SPM_REACTOR_PATH', + 'SHARE_DIR'): setattr(__generated_syspaths, key, None) @@ -139,6 +139,10 @@ SPM_REACTOR_PATH = __generated_syspaths.SPM_REACTOR_PATH if SPM_REACTOR_PATH is None: SPM_REACTOR_PATH = os.path.join(SRV_ROOT_DIR, u'spm', u'reactor') +HOME_DIR = __generated_syspaths.HOME_DIR +if HOME_DIR is None: + HOME_DIR = os.path.expanduser('~') + __all__ = [ u'ROOT_DIR', diff --git a/tests/unit/test_ssh_config_roster.py b/tests/unit/test_ssh_config_roster.py new file mode 100644 index 0000000000..f3479caf89 --- /dev/null +++ b/tests/unit/test_ssh_config_roster.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +from __future__ import absolute_import +import collections + +# Import Salt Testing Libs +from tests.support import mock +from tests.support import mixins +from tests.support.unit import skipIf, TestCase + +# Import Salt Libs +import salt.roster.sshconfig as sshconfig + +_SAMPLE_SSH_CONFIG = """ +Host * + User user.mcuserface + +Host abc* + IdentityFile ~/.ssh/id_rsa_abc + +Host def* + IdentityFile ~/.ssh/id_rsa_def + +Host abc.asdfgfdhgjkl.com + HostName 123.123.123.123 + +Host abc123.asdfgfdhgjkl.com + HostName 123.123.123.124 + +Host def.asdfgfdhgjkl.com + HostName 234.234.234.234 +""" + +_TARGET_ABC = collections.OrderedDict([ + ('user', 'user.mcuserface'), + ('priv', '~/.ssh/id_rsa_abc'), + ('host', 'abc.asdfgfdhgjkl.com') +]) + +_TARGET_ABC123 = collections.OrderedDict([ + ('user', 'user.mcuserface'), + ('priv', '~/.ssh/id_rsa_abc'), + ('host', 'abc123.asdfgfdhgjkl.com') +]) + +_TARGET_DEF = collections.OrderedDict([ + ('user', 'user.mcuserface'), + ('priv', '~/.ssh/id_rsa_def'), + ('host', 'def.asdfgfdhgjkl.com') +]) + +_ALL = { + 'abc.asdfgfdhgjkl.com': _TARGET_ABC, + 'abc123.asdfgfdhgjkl.com': _TARGET_ABC123, + 'def.asdfgfdhgjkl.com': _TARGET_DEF +} + +_ABC_GLOB = { + 'abc.asdfgfdhgjkl.com': _TARGET_ABC, + 'abc123.asdfgfdhgjkl.com': _TARGET_ABC123 +} + + +@skipIf(mock.NO_MOCK, mock.NO_MOCK_REASON) +class SSHConfigRosterTestCase(TestCase, mixins.LoaderModuleMockMixin): + + @classmethod + def setUpClass(cls): + cls.mock_fp = mock_fp = mock.mock_open(read_data=_SAMPLE_SSH_CONFIG) + + def setup_loader_modules(self): + return {sshconfig: {}} + + def test_all(self): + with mock.patch('salt.utils.fopen', self.mock_fp): + with mock.patch('salt.roster.sshconfig._get_ssh_config_file'): + self.mock_fp.return_value.__iter__.return_value = _SAMPLE_SSH_CONFIG.splitlines() + targets = sshconfig.targets('*') + self.assertEqual(targets, _ALL) + + def test_abc_glob(self): + with mock.patch('salt.utils.fopen', self.mock_fp): + with mock.patch('salt.roster.sshconfig._get_ssh_config_file'): + self.mock_fp.return_value.__iter__.return_value = _SAMPLE_SSH_CONFIG.splitlines() + targets = sshconfig.targets('abc*') + self.assertEqual(targets, _ABC_GLOB) From 0e902f1ec88756ef8dea8e6c4b49bd12a3a2cde3 Mon Sep 17 00:00:00 2001 From: davidjoliver86 Date: Sat, 12 Aug 2017 15:04:27 -0700 Subject: [PATCH 059/639] fix indent --- salt/syspaths.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/syspaths.py b/salt/syspaths.py index 4d81aa92f7..7aeb6e45e4 100644 --- a/salt/syspaths.py +++ b/salt/syspaths.py @@ -34,7 +34,7 @@ try: import salt._syspaths as __generated_syspaths # pylint: disable=no-name-in-module except ImportError: import types - __generated_syspaths = types.ModuleType('salt._syspaths') + __generated_syspaths = types.ModuleType('salt._syspaths') for key in ('ROOT_DIR', 'CONFIG_DIR', 'CACHE_DIR', 'SOCK_DIR', 'SRV_ROOT_DIR', 'BASE_FILE_ROOTS_DIR', 'HOME_DIR', 'BASE_PILLAR_ROOTS_DIR', 'BASE_THORIUM_ROOTS_DIR', From 079f09798555d9eb565d244fc1a652eff8dcba09 Mon Sep 17 00:00:00 2001 From: Jason Unovitch Date: Mon, 7 Aug 2017 14:47:28 -0400 Subject: [PATCH 060/639] Fix 'preserve_minion_cache: True' functionality (fixes #35840) - Drop preserve_minions as a a condition used in the check_minion_cache() logic test. This should be more in line with the intent of the "Optionally, pass in a list of minions which should have their caches preserved. To preserve all caches, set __opts__['preserve_minion_cache']" comment documented in key.py. - Add --preserve_minions as an optional CLI option to salt-key. This will allow the user to optionally preserve caches independently of the preserve_minion_cache option. Option does not override config file. - This effectively reverts commit 661f5686bf1090554d571a6ed23f09d511f5a15a which introduced the regression with preserve_minion_cache set to True to fix a regression when preserve_minion_cache is set to False. Prior to that commit, the preserve_minion_cache option was completely ignored when set to True and when set to False cache directories were still preserved. - Functional testing (three minions 'minion1', 'oldminion', and 'minion2') /etc/salt/master - preserve_minion_cache: False # salt-key -d minion1 PASS: deletes 'minion1', deletes stale 'oldminion', preserve active 'minion2' # salt-key -d minion1 --preserve-minions=true PASS: preserves minion1 as requested, deletes oldminion as it was not in the match from the delete_key() comment "To preserve the master caches of minions who are matched", preserves active minion2 /etc/salt/master - preserve_minion_cache: True # salt-key -d minion1 PASS: no directories deleted per config option # salt-key -d minion1 --preserve-minions=false PASS: no directories deleted per config option, does not override config --- salt/key.py | 37 ++++++++++++++++++++----------------- salt/utils/parsers.py | 17 +++++++++++++++++ 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/salt/key.py b/salt/key.py index 51d7be6a89..f1dd28319f 100644 --- a/salt/key.py +++ b/salt/key.py @@ -489,7 +489,7 @@ class Key(object): minions = [] for key, val in six.iteritems(keys): minions.extend(val) - if not self.opts.get('preserve_minion_cache', False) or not preserve_minions: + if not self.opts.get('preserve_minion_cache', False): m_cache = os.path.join(self.opts['cachedir'], self.ACC) if os.path.isdir(m_cache): for minion in os.listdir(m_cache): @@ -736,7 +736,7 @@ class Key(object): def delete_key(self, match=None, match_dict=None, - preserve_minions=False, + preserve_minions=None, revoke_auth=False): ''' Delete public keys. If "match" is passed, it is evaluated as a glob. @@ -774,11 +774,10 @@ class Key(object): salt.utils.event.tagify(prefix='key')) except (OSError, IOError): pass - if preserve_minions: - preserve_minions_list = matches.get('minions', []) + if self.opts.get('preserve_minions') is True: + self.check_minion_cache(preserve_minions=matches.get('minions', [])) else: - preserve_minions_list = [] - self.check_minion_cache(preserve_minions=preserve_minions_list) + self.check_minion_cache() if self.opts.get('rotate_aes_key'): salt.crypt.dropfile(self.opts['cachedir'], self.opts['user']) return ( @@ -969,16 +968,17 @@ class RaetKey(Key): minions.extend(val) m_cache = os.path.join(self.opts['cachedir'], 'minions') - if os.path.isdir(m_cache): - for minion in os.listdir(m_cache): - if minion not in minions: - shutil.rmtree(os.path.join(m_cache, minion)) - cache = salt.cache.factory(self.opts) - clist = cache.ls(self.ACC) - if clist: - for minion in clist: + if not self.opts.get('preserve_minion_cache', False): + if os.path.isdir(m_cache): + for minion in os.listdir(m_cache): if minion not in minions and minion not in preserve_minions: - cache.flush('{0}/{1}'.format(self.ACC, minion)) + shutil.rmtree(os.path.join(m_cache, minion)) + cache = salt.cache.factory(self.opts) + clist = cache.ls(self.ACC) + if clist: + for minion in clist: + if minion not in minions and minion not in preserve_minions: + cache.flush('{0}/{1}'.format(self.ACC, minion)) kind = self.opts.get('__role', '') # application kind if kind not in salt.utils.kinds.APPL_KINDS: @@ -1220,7 +1220,7 @@ class RaetKey(Key): def delete_key(self, match=None, match_dict=None, - preserve_minions=False, + preserve_minions=None, revoke_auth=False): ''' Delete public keys. If "match" is passed, it is evaluated as a glob. @@ -1251,7 +1251,10 @@ class RaetKey(Key): os.remove(os.path.join(self.opts['pki_dir'], status, key)) except (OSError, IOError): pass - self.check_minion_cache(preserve_minions=matches.get('minions', [])) + if self.opts.get('preserve_minions') is True: + self.check_minion_cache(preserve_minions=matches.get('minions', [])) + else: + self.check_minion_cache() return ( self.name_match(match) if match is not None else self.dict_match(matches) diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 34565da0d6..fe8bacaf15 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -2306,6 +2306,16 @@ class SaltKeyOptionParser(six.with_metaclass(OptionParserMeta, 'Default: %default.') ) + self.add_option( + '--preserve-minions', + default=False, + help=('Setting this to True prevents the master from deleting ' + 'the minion cache when keys are deleted, this may have ' + 'security implications if compromised minions auth with ' + 'a previous deleted minion ID. ' + 'Default: %default.') + ) + key_options_group = optparse.OptionGroup( self, 'Key Generation Options' ) @@ -2405,6 +2415,13 @@ class SaltKeyOptionParser(six.with_metaclass(OptionParserMeta, elif self.options.rotate_aes_key.lower() == 'false': self.options.rotate_aes_key = False + def process_preserve_minions(self): + if hasattr(self.options, 'preserve_minions') and isinstance(self.options.preserve_minions, str): + if self.options.preserve_minions.lower() == 'true': + self.options.preserve_minions = True + elif self.options.preserve_minions.lower() == 'false': + self.options.preserve_minions = False + def process_list(self): # Filter accepted list arguments as soon as possible if not self.options.list: From c4cc00b6023421fa86cc92979fd91a35457071a7 Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 13 Aug 2017 16:24:39 -0700 Subject: [PATCH 061/639] boto_elbv2 state add error message to virtual func --- salt/states/boto_elbv2.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 4bf0e12bac..8f08b7431b 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -50,7 +50,11 @@ def __virtual__(): ''' Only load if boto is available. ''' - return 'boto_elbv2' if 'boto_elbv2.target_group_exists' in __salt__ else False + if 'boto_elbv2.target_group_exists' in __salt__: + return 'boto_elbv2' + else + return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + def targets_registered(name, targets, region=None, key=None, keyid=None, From 37029c1a16945fe407f4562fcf649e980bdf5fe2 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 15 Aug 2017 15:30:13 -0600 Subject: [PATCH 062/639] Fix unit.test_doc test Use findstr instead of grep on Windows Use os.linesep for file paths --- tests/unit/test_doc.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/tests/unit/test_doc.py b/tests/unit/test_doc.py index 336833a46c..9e6583a099 100644 --- a/tests/unit/test_doc.py +++ b/tests/unit/test_doc.py @@ -6,6 +6,7 @@ # Import Python libs from __future__ import absolute_import +import os # Import Salt Testing libs from tests.support.unit import TestCase @@ -13,6 +14,7 @@ from tests.support.unit import TestCase # Import Salt libs import tests.integration as integration import salt.modules.cmdmod +import salt.utils class DocTestCase(TestCase): @@ -32,8 +34,15 @@ class DocTestCase(TestCase): https://github.com/saltstack/salt/issues/12788 ''' salt_dir = integration.CODE_DIR - salt_dir += '/' - cmd = 'grep -r :doc: ' + salt_dir + + if salt.utils.is_windows(): + # No grep in Windows, use findstr + # findstr in windows doesn't prepend 'Binary` to binary files, so + # use the '/P' switch to skip files with unprintable characters + cmd = 'findstr /C:":doc:" /S /P {0}\*'.format(salt_dir) + else: + salt_dir += '/' + cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') @@ -43,25 +52,32 @@ class DocTestCase(TestCase): if line.startswith('Binary'): continue - key, val = line.split(':', 1) + if salt.utils.is_windows(): + # Need the space after the colon so it doesn't split the drive + # letter + key, val = line.split(': ', 1) + else: + key, val = line.split(':', 1) # Don't test man pages, this file, # the page that documents to not use ":doc:", or # the doc/conf.py file if 'man' in key \ or key.endswith('test_doc.py') \ - or key.endswith('doc/conf.py') \ - or key.endswith('/conventions/documentation.rst') \ - or key.endswith('doc/topics/releases/2016.11.2.rst') \ - or key.endswith('doc/topics/releases/2016.11.3.rst') \ - or key.endswith('doc/topics/releases/2016.3.5.rst'): + or key.endswith(os.sep.join(['doc', 'conf.py'])) \ + or key.endswith(os.sep.join(['conventions', 'documentation.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.11.2.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.11.3.rst'])) \ + or key.endswith(os.sep.join(['doc', 'topics', 'releases', '2016.3.5.rst'])): continue # Set up test return dict if test_ret.get(key) is None: - test_ret[key] = [val.lstrip()] + test_ret[key] = [val.strip()] else: - test_ret[key].append(val.lstrip()) + test_ret[key].append(val.strip()) + + print('*' * 68) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None From dd02d7170b01901d0f7f84863e0f25c9d6d4fe9c Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 9 Aug 2017 12:22:29 -0700 Subject: [PATCH 063/639] experimenting with how to handle missing minions --- salt/client/__init__.py | 2 ++ salt/utils/minions.py | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index 9859a37a11..ce7d1c0cfe 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -325,6 +325,7 @@ class LocalClient(object): arg = salt.utils.args.condition_input(arg, kwarg) + log.debug('==== tgt {} ===='.format(tgt)) try: pub_data = self.pub( tgt, @@ -1578,6 +1579,7 @@ class LocalClient(object): connected_minions = None return_count = 0 + log.debug('==== minions {} ==='.format(minions)) for ret in self.get_iter_returns(jid, minions, timeout=timeout, diff --git a/salt/utils/minions.py b/salt/utils/minions.py index eeeb5ad369..e3c5db04f8 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -207,8 +207,15 @@ class CkMinions(object): ''' Return the minions found by looking via a list ''' + import inspect + curframe = inspect.currentframe() + calframe = inspect.getouterframes(curframe, 2) + log.debug('=== {} called _check_list_minions ==='.format(calframe[1][3])) + + log.debug('== calling _check_list_minions ==') if isinstance(expr, six.string_types): expr = [m for m in expr.split(',') if m] + log.debug('== expr {} =='.format(expr)) minions = self._pki_minions() return [x for x in expr if x in minions] @@ -437,7 +444,7 @@ class CkMinions(object): ''' Return the minions found by looking via compound matcher ''' - log.debug('_check_compound_minions({0}, {1}, {2}, {3})'.format(expr, delimiter, greedy, pillar_exact)) + log.debug('=== _check_compound_minions({0}, {1}, {2}, {3})'.format(expr, delimiter, greedy, pillar_exact)) if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error('Compound target that is neither string, list nor tuple') return [] @@ -545,6 +552,7 @@ class CkMinions(object): engine_args.append(greedy) results.append(str(set(engine(*engine_args)))) + log.debug('== results {} =='.format(results)) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() @@ -562,6 +570,7 @@ class CkMinions(object): results = ' '.join(results) log.debug('Evaluating final compound matching expr: {0}' .format(results)) + log.debug('=== unmatched {} ==='.format(unmatched)) try: return list(eval(results)) # pylint: disable=W0123 except Exception: @@ -633,6 +642,11 @@ class CkMinions(object): match the regex, this will then be used to parse the returns to make sure everyone has checked back in. ''' + import inspect + curframe = inspect.currentframe() + calframe = inspect.getouterframes(curframe, 2) + log.debug('=== {} called check_minions ==='.format(calframe[1][3])) + try: if expr is None: expr = '' From fcf61844d5922d450a6dae72920d90c964858c4a Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 11 Aug 2017 11:39:22 -0700 Subject: [PATCH 064/639] Making all _check functions return missing minions for consistency. General cleanup, removing debugging logs. --- salt/client/__init__.py | 9 +++- salt/master.py | 12 +++-- salt/utils/minions.py | 98 +++++++++++++++++++++++++++-------------- 3 files changed, 80 insertions(+), 39 deletions(-) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index ce7d1c0cfe..ed0a9d7244 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -325,7 +325,6 @@ class LocalClient(object): arg = salt.utils.args.condition_input(arg, kwarg) - log.debug('==== tgt {} ===='.format(tgt)) try: pub_data = self.pub( tgt, @@ -1143,6 +1142,7 @@ class LocalClient(object): minion_timeouts = {} found = set() + missing = [] # Check to see if the jid is real, if not return the empty dict try: if self.returners[u'{0}.get_load'.format(self.opts[u'master_job_cache'])](jid) == {}: @@ -1181,6 +1181,8 @@ class LocalClient(object): break if u'minions' in raw.get(u'data', {}): minions.update(raw[u'data'][u'minions']) + if u'missing' in raw.get(u'data', {}): + missing.extend(raw[u'data'][u'missing']) continue if u'return' not in raw[u'data']: continue @@ -1322,6 +1324,10 @@ class LocalClient(object): for minion in list((minions - found)): yield {minion: {u'failed': True}} + if missing: + for minion in missing: + yield {minion: {'failed': True}} + def get_returns( self, jid, @@ -1579,7 +1585,6 @@ class LocalClient(object): connected_minions = None return_count = 0 - log.debug('==== minions {} ==='.format(minions)) for ret in self.get_iter_returns(jid, minions, timeout=timeout, diff --git a/salt/master.py b/salt/master.py index 59388bc0b5..37ac6cbaa0 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1853,11 +1853,13 @@ class ClearFuncs(object): # Retrieve the minions list delimiter = clear_load.get(u'kwargs', {}).get(u'delimiter', DEFAULT_TARGET_DELIM) - minions = self.ckminions.check_minions( + _res = self.ckminions.check_minions( clear_load[u'tgt'], clear_load.get(u'tgt_type', u'glob'), delimiter ) + minions = _res.get('minions', list()) + missing = _res.get('missing', list()) # Check for external auth calls if extra.get(u'token', False): @@ -1962,7 +1964,7 @@ class ClearFuncs(object): if jid is None: return {u'enc': u'clear', u'load': {u'error': u'Master failed to assign jid'}} - payload = self._prep_pub(minions, jid, clear_load, extra) + payload = self._prep_pub(minions, jid, clear_load, extra, missing) # Send it! self._send_pub(payload) @@ -1971,7 +1973,8 @@ class ClearFuncs(object): u'enc': u'clear', u'load': { u'jid': clear_load[u'jid'], - u'minions': minions + u'minions': minions, + u'missing': missing } } @@ -2008,7 +2011,7 @@ class ClearFuncs(object): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load) - def _prep_pub(self, minions, jid, clear_load, extra): + def _prep_pub(self, minions, jid, clear_load, extra, missing): ''' Take a given load and perform the necessary steps to prepare a publication. @@ -2029,6 +2032,7 @@ class ClearFuncs(object): u'fun': clear_load[u'fun'], u'arg': clear_load[u'arg'], u'minions': minions, + u'missing': missing, } # Announce the job on the event bus diff --git a/salt/utils/minions.py b/salt/utils/minions.py index e3c5db04f8..8ef12c25ff 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -201,7 +201,8 @@ class CkMinions(object): ''' Return the minions found by looking via globs ''' - return fnmatch.filter(self._pki_minions(), expr) + return {'minions': fnmatch.filter(self._pki_minions(), expr), + 'missing': list()} def _check_list_minions(self, expr, greedy): # pylint: disable=unused-argument ''' @@ -212,19 +213,22 @@ class CkMinions(object): calframe = inspect.getouterframes(curframe, 2) log.debug('=== {} called _check_list_minions ==='.format(calframe[1][3])) - log.debug('== calling _check_list_minions ==') if isinstance(expr, six.string_types): expr = [m for m in expr.split(',') if m] log.debug('== expr {} =='.format(expr)) minions = self._pki_minions() - return [x for x in expr if x in minions] + log.debug('== missing {} =='.format([x for x in expr if x not in minions])) + #return [x for x in expr if x in minions] + return {'minions': [x for x in expr if x in minions], + 'missing': [x for x in expr if x not in minions]} def _check_pcre_minions(self, expr, greedy): # pylint: disable=unused-argument ''' Return the minions found by looking via regular expressions ''' reg = re.compile(expr) - return [m for m in self._pki_minions() if reg.match(m)] + return {'minions': [m for m in self._pki_minions() if reg.match(m)], + 'missing': list()} def _pki_minions(self): ''' @@ -272,7 +276,8 @@ class CkMinions(object): elif cache_enabled: minions = list_cached_minions() else: - return [] + return {'minions': list(), + 'missing': list()} if cache_enabled: if greedy: @@ -280,7 +285,8 @@ class CkMinions(object): else: cminions = minions if not cminions: - return minions + return {'minions': minions, + 'missing': list()} minions = set(minions) for id_ in cminions: if greedy and id_ not in minions: @@ -298,7 +304,8 @@ class CkMinions(object): exact_match=exact_match): minions.remove(id_) minions = list(minions) - return minions + return {'minions': minions, + 'missing': list()} def _check_grain_minions(self, expr, delimiter, greedy): ''' @@ -353,7 +360,8 @@ class CkMinions(object): elif cache_enabled: minions = self.cache.ls('minions') else: - return [] + return {'minions': list(), + 'missing': list()} if cache_enabled: if greedy: @@ -361,7 +369,8 @@ class CkMinions(object): else: cminions = minions if cminions is None: - return minions + return {'minions': minions, + 'missing': list()} tgt = expr try: @@ -373,7 +382,8 @@ class CkMinions(object): tgt = ipaddress.ip_network(tgt) except: # pylint: disable=bare-except log.error('Invalid IP/CIDR target: {0}'.format(tgt)) - return [] + return {'minions': list(), + 'missing': list()} proto = 'ipv{0}'.format(tgt.version) minions = set(minions) @@ -394,7 +404,8 @@ class CkMinions(object): if not match and id_ in minions: minions.remove(id_) - return list(minions) + return {'minions': list(minions), + 'missing': list()} def _check_range_minions(self, expr, greedy): ''' @@ -419,11 +430,14 @@ class CkMinions(object): for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): mlist.append(fn_) - return mlist + return {'minions': mlist, + 'missing': list()} elif cache_enabled: - return self.cache.ls('minions') + return {'minions': self.cache.ls('minions'), + 'missing': list()} else: - return list() + return {'minions': list(), + 'missing': list()} def _check_compound_pillar_exact_minions(self, expr, delimiter, greedy): ''' @@ -444,10 +458,14 @@ class CkMinions(object): ''' Return the minions found by looking via compound matcher ''' - log.debug('=== _check_compound_minions({0}, {1}, {2}, {3})'.format(expr, delimiter, greedy, pillar_exact)) + import inspect + curframe = inspect.currentframe() + calframe = inspect.getouterframes(curframe, 2) + log.debug('=== {} called _check_compound_minions ==='.format(calframe[1][3])) + if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error('Compound target that is neither string, list nor tuple') - return [] + return {'minions': list(), 'missing': list()} minions = set(self._pki_minions()) log.debug('minions: {0}'.format(minions)) @@ -468,6 +486,7 @@ class CkMinions(object): results = [] unmatched = [] opers = ['and', 'or', 'not', '(', ')'] + missing = [] if isinstance(expr, six.string_types): words = expr.split() @@ -482,7 +501,7 @@ class CkMinions(object): if results: if results[-1] == '(' and word in ('and', 'or'): log.error('Invalid beginning operator after "(": {0}'.format(word)) - return [] + return {'minions': list(), 'missing': list()} if word == 'not': if not results[-1] in ('&', '|', '('): results.append('&') @@ -502,7 +521,7 @@ class CkMinions(object): log.error('Invalid compound expr (unexpected ' 'right parenthesis): {0}' .format(expr)) - return [] + return {'minions': list(), 'missing': list()} results.append(word) unmatched.pop() if unmatched and unmatched[-1] == '-': @@ -511,7 +530,7 @@ class CkMinions(object): else: # Won't get here, unless oper is added log.error('Unhandled oper in compound expr: {0}' .format(expr)) - return [] + return {'minions': list(), 'missing': list()} else: # seq start with oper, fail if word == 'not': @@ -527,13 +546,13 @@ class CkMinions(object): 'Expression may begin with' ' binary operator: {0}'.format(word) ) - return [] + return {'minions': list(), 'missing': list()} elif target_info and target_info['engine']: if 'N' == target_info['engine']: # Nodegroups should already be expanded/resolved to other engines log.error('Detected nodegroup expansion failure of "{0}"'.format(word)) - return [] + return {'minions': list(), 'missing': list()} engine = ref.get(target_info['engine']) if not engine: # If an unknown engine is called at any time, fail out @@ -544,22 +563,31 @@ class CkMinions(object): word, ) ) - return [] + return {'minions': list(), 'missing': list()} engine_args = [target_info['pattern']] if target_info['engine'] in ('G', 'P', 'I', 'J'): engine_args.append(target_info['delimiter'] or ':') engine_args.append(greedy) - results.append(str(set(engine(*engine_args)))) + _results = engine(*engine_args) + log.debug('== _results from engine {} =='.format(_results)) + #results.append(str(set(engine(*engine_args)))) + results.append(str(set(_results['minions']))) + missing.extend(_results['missing']) + log.debug('=== extending missing {} ==='.format(missing)) log.debug('== results {} =='.format(results)) + log.debug('== missing {} =='.format(missing)) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() else: # The match is not explicitly defined, evaluate as a glob - results.append(str(set(self._check_glob_minions(word, True)))) + _results = self._check_glob_minions(word, True) + log.debug('=== running glob here {} ==='.format(_results)) + results.append(str(set(_results['minions']))) + log.debug('=== results after glob {} ==='.format(results)) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() @@ -570,14 +598,15 @@ class CkMinions(object): results = ' '.join(results) log.debug('Evaluating final compound matching expr: {0}' .format(results)) - log.debug('=== unmatched {} ==='.format(unmatched)) try: - return list(eval(results)) # pylint: disable=W0123 + minions = list(eval(results)) # pylint: disable=W0123 + return {'minions': minions, 'missing': missing} except Exception: log.error('Invalid compound target: {0}'.format(expr)) - return [] + return {'minions': list(), 'missing': list()} - return list(minions) + return {'minions': list(minions), + 'missing': list()} def connected_ids(self, subset=None, show_ipv4=False, include_localhost=False): ''' @@ -629,7 +658,7 @@ class CkMinions(object): for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): mlist.append(fn_) - return mlist + return {'minions': mlist, 'missing': list()} def check_minions(self, expr, @@ -646,7 +675,9 @@ class CkMinions(object): curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) log.debug('=== {} called check_minions ==='.format(calframe[1][3])) + log.debug('=== tgt_type {} ==='.format(tgt_type)) + missing = [] try: if expr is None: expr = '' @@ -658,15 +689,16 @@ class CkMinions(object): 'pillar_exact', 'compound', 'compound_pillar_exact'): - minions = check_func(expr, delimiter, greedy) + _res = check_func(expr, delimiter, greedy) else: - minions = check_func(expr, greedy) + _res = check_func(expr, greedy) except Exception: log.exception( 'Failed matching available minions with {0} pattern: {1}' .format(tgt_type, expr)) - minions = [] - return minions + _res = {'minions': list(), 'missing': list()} + log.debug('== inside check_minions, returning _res {} =='.format(_res)) + return _res def _expand_matching(self, auth_entry): ref = {'G': 'grain', From 96e8b90d1e4023d81dedc38aa023f7a9f8bb04e6 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 14 Aug 2017 09:01:08 -0700 Subject: [PATCH 065/639] Updating all instances of check_minions. --- salt/cli/cp.py | 3 +- salt/client/__init__.py | 3 +- salt/daemons/masterapi.py | 9 ++++-- salt/pillar/consul_pillar.py | 3 +- salt/pillar/file_tree.py | 3 +- salt/pillar/nodegroups.py | 3 +- salt/returners/couchbase_return.py | 3 +- salt/returners/local_cache.py | 3 +- salt/roster/cache.py | 3 +- salt/transport/zeromq.py | 5 +-- salt/utils/master.py | 3 +- salt/utils/minions.py | 52 +++++++++++------------------- 12 files changed, 45 insertions(+), 48 deletions(-) diff --git a/salt/cli/cp.py b/salt/cli/cp.py index d4259df45c..cb29c79921 100644 --- a/salt/cli/cp.py +++ b/salt/cli/cp.py @@ -122,9 +122,10 @@ class SaltCP(object): if gzip \ else salt.utils.itertools.read_file - minions = salt.utils.minions.CkMinions(self.opts).check_minions( + _res = salt.utils.minions.CkMinions(self.opts).check_minions( tgt, tgt_type=selected_target_option or 'glob') + minions = _res['minions'] local = salt.client.get_local_client(self.opts['conf_file']) diff --git a/salt/client/__init__.py b/salt/client/__init__.py index ed0a9d7244..abcedd52bf 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -348,7 +348,8 @@ class LocalClient(object): return self._check_pub_data(pub_data) def gather_minions(self, tgt, expr_form): - return salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) + _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) + return _res['minions'] @tornado.gen.coroutine def run_job_async( diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 4e2e2baf84..ed5594eea9 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -585,11 +585,12 @@ class RemoteFuncs(object): if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) - minions = checker.check_minions( + _res = checker.check_minions( load['tgt'], match_type, greedy=False ) + minions = _res['minions'] for minion in minions: fdata = self.cache.fetch('minions/{0}'.format(minion), 'mine') if isinstance(fdata, dict): @@ -908,9 +909,10 @@ class RemoteFuncs(object): pub_load['tgt_type'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) - ret['minions'] = self.ckminions.check_minions( + _res = self.ckminions.check_minions( load['tgt'], pub_load['tgt_type']) + ret['minions'] = _res['minions'] auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') @@ -1203,11 +1205,12 @@ class LocalFuncs(object): # Retrieve the minions list delimiter = load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) - minions = self.ckminions.check_minions( + _res = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob'), delimiter ) + minions = _res['minions'] # Check for external auth calls if extra.get('token', False): diff --git a/salt/pillar/consul_pillar.py b/salt/pillar/consul_pillar.py index 0d10b80c36..d661649f4f 100644 --- a/salt/pillar/consul_pillar.py +++ b/salt/pillar/consul_pillar.py @@ -167,7 +167,8 @@ def ext_pillar(minion_id, opts['target'] = match.group(1) temp = temp.replace(match.group(0), '') checker = salt.utils.minions.CkMinions(__opts__) - minions = checker.check_minions(opts['target'], 'compound') + _res = checker.check_minions(opts['target'], 'compound') + minions = _res['minions'] if minion_id not in minions: return {} diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py index 59b420e4a4..323958e2f9 100644 --- a/salt/pillar/file_tree.py +++ b/salt/pillar/file_tree.py @@ -336,9 +336,10 @@ def ext_pillar(minion_id, if (os.path.isdir(nodegroups_dir) and nodegroup in master_ngroups): ckminions = salt.utils.minions.CkMinions(__opts__) - match = ckminions.check_minions( + _res = ckminions.check_minions( master_ngroups[nodegroup], 'compound') + match = _res['minions'] if minion_id in match: ngroup_dir = os.path.join( nodegroups_dir, str(nodegroup)) diff --git a/salt/pillar/nodegroups.py b/salt/pillar/nodegroups.py index bf74c2f8b6..540213436c 100644 --- a/salt/pillar/nodegroups.py +++ b/salt/pillar/nodegroups.py @@ -64,9 +64,10 @@ def ext_pillar(minion_id, pillar, pillar_name=None): ckminions = None for nodegroup_name in six.iterkeys(all_nodegroups): ckminions = ckminions or CkMinions(__opts__) - match = ckminions.check_minions( + _res = ckminions.check_minions( all_nodegroups[nodegroup_name], 'compound') + match = _res['minions'] if minion_id in match: nodegroups_minion_is_in.append(nodegroup_name) diff --git a/salt/returners/couchbase_return.py b/salt/returners/couchbase_return.py index 24c3a9105a..9e56dec407 100644 --- a/salt/returners/couchbase_return.py +++ b/salt/returners/couchbase_return.py @@ -223,10 +223,11 @@ def save_load(jid, clear_load, minion=None): if 'tgt' in clear_load and clear_load['tgt'] != '': ckminions = salt.utils.minions.CkMinions(__opts__) # Retrieve the minions list - minions = ckminions.check_minions( + _res = ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob') ) + minions = _res['minions'] save_minions(jid, minions) diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index 85a1c07264..91334e6b11 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -225,10 +225,11 @@ def save_load(jid, clear_load, minions=None, recurse_count=0): if minions is None: ckminions = salt.utils.minions.CkMinions(__opts__) # Retrieve the minions list - minions = ckminions.check_minions( + _res = ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob') ) + minions = _res['minions'] # save the minions to a cache so we can see in the UI save_minions(jid, minions) diff --git a/salt/roster/cache.py b/salt/roster/cache.py index cea0377f8a..bdb1cc8171 100644 --- a/salt/roster/cache.py +++ b/salt/roster/cache.py @@ -117,7 +117,8 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 The resulting roster can be configured using ``roster_order`` and ``roster_default``. ''' minions = salt.utils.minions.CkMinions(__opts__) - minions = minions.check_minions(tgt, tgt_type) + _res = minions.check_minions(tgt, tgt_type) + minions = _res['minions'] ret = {} if not minions: diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index 3d35982c31..7bb5409baf 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -827,8 +827,9 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel): match_targets = ["pcre", "glob", "list"] if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets: # Fetch a list of minions that match - match_ids = self.ckminions.check_minions(load['tgt'], - tgt_type=load['tgt_type']) + _res = self.ckminions.check_minions(load['tgt'], + tgt_type=load['tgt_type']) + match_ids = _res['minions'] log.debug("Publish Side Match: {0}".format(match_ids)) # Send list of miions thru so zmq can target them diff --git a/salt/utils/master.py b/salt/utils/master.py index 51c635bb52..dabe226a42 100644 --- a/salt/utils/master.py +++ b/salt/utils/master.py @@ -248,7 +248,8 @@ class MasterPillarUtil(object): # Return a list of minion ids that match the target and tgt_type minion_ids = [] ckminions = salt.utils.minions.CkMinions(self.opts) - minion_ids = ckminions.check_minions(self.tgt, self.tgt_type) + _res = ckminions.check_minions(self.tgt, self.tgt_type) + minion_ids = _res['minions'] if len(minion_ids) == 0: log.debug('No minions matched for tgt="{0}" and tgt_type="{1}"'.format(self.tgt, self.tgt_type)) return {} diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 8ef12c25ff..24d58668df 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -208,17 +208,9 @@ class CkMinions(object): ''' Return the minions found by looking via a list ''' - import inspect - curframe = inspect.currentframe() - calframe = inspect.getouterframes(curframe, 2) - log.debug('=== {} called _check_list_minions ==='.format(calframe[1][3])) - if isinstance(expr, six.string_types): expr = [m for m in expr.split(',') if m] - log.debug('== expr {} =='.format(expr)) minions = self._pki_minions() - log.debug('== missing {} =='.format([x for x in expr if x not in minions])) - #return [x for x in expr if x in minions] return {'minions': [x for x in expr if x in minions], 'missing': [x for x in expr if x not in minions]} @@ -458,11 +450,6 @@ class CkMinions(object): ''' Return the minions found by looking via compound matcher ''' - import inspect - curframe = inspect.currentframe() - calframe = inspect.getouterframes(curframe, 2) - log.debug('=== {} called _check_compound_minions ==='.format(calframe[1][3])) - if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error('Compound target that is neither string, list nor tuple') return {'minions': list(), 'missing': list()} @@ -571,13 +558,8 @@ class CkMinions(object): engine_args.append(greedy) _results = engine(*engine_args) - log.debug('== _results from engine {} =='.format(_results)) - #results.append(str(set(engine(*engine_args)))) results.append(str(set(_results['minions']))) missing.extend(_results['missing']) - log.debug('=== extending missing {} ==='.format(missing)) - log.debug('== results {} =='.format(results)) - log.debug('== missing {} =='.format(missing)) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() @@ -585,9 +567,7 @@ class CkMinions(object): else: # The match is not explicitly defined, evaluate as a glob _results = self._check_glob_minions(word, True) - log.debug('=== running glob here {} ==='.format(_results)) results.append(str(set(_results['minions']))) - log.debug('=== results after glob {} ==='.format(results)) if unmatched and unmatched[-1] == '-': results.append(')') unmatched.pop() @@ -671,13 +651,7 @@ class CkMinions(object): match the regex, this will then be used to parse the returns to make sure everyone has checked back in. ''' - import inspect - curframe = inspect.currentframe() - calframe = inspect.getouterframes(curframe, 2) - log.debug('=== {} called check_minions ==='.format(calframe[1][3])) - log.debug('=== tgt_type {} ==='.format(tgt_type)) - missing = [] try: if expr is None: expr = '' @@ -697,7 +671,6 @@ class CkMinions(object): 'Failed matching available minions with {0} pattern: {1}' .format(tgt_type, expr)) _res = {'minions': list(), 'missing': list()} - log.debug('== inside check_minions, returning _res {} =='.format(_res)) return _res def _expand_matching(self, auth_entry): @@ -718,7 +691,8 @@ class CkMinions(object): v_matcher = ref.get(target_info['engine']) v_expr = target_info['pattern'] - return set(self.check_minions(v_expr, v_matcher)) + _res = self.check_minions(v_expr, v_matcher) + return set(_res['minions']) def validate_tgt(self, valid, expr, tgt_type, minions=None, expr_form=None): ''' @@ -738,7 +712,8 @@ class CkMinions(object): v_minions = self._expand_matching(valid) if minions is None: - minions = set(self.check_minions(expr, tgt_type)) + _res = self.check_minions(expr, tgt_type) + minions = set(_res['minions']) else: minions = set(minions) d_bool = not bool(minions.difference(v_minions)) @@ -827,8 +802,12 @@ class CkMinions(object): v_tgt_type = 'pillar_exact' elif tgt_type.lower() == 'compound': v_tgt_type = 'compound_pillar_exact' - v_minions = set(self.check_minions(tgt, v_tgt_type)) - minions = set(self.check_minions(tgt, tgt_type)) + _res = self.check_minions(tgt, v_tgt_type) + v_minions = set(_res['minions']) + + _res = self.check_minions(tgt, tgt_type) + minions = set(_res['minions']) + mismatch = bool(minions.difference(v_minions)) # If the non-exact match gets more minions than the exact match # then pillar globbing or PCRE is being used, and we have a @@ -915,8 +894,12 @@ class CkMinions(object): v_tgt_type = 'pillar_exact' elif tgt_type.lower() == 'compound': v_tgt_type = 'compound_pillar_exact' - v_minions = set(self.check_minions(tgt, v_tgt_type)) - minions = set(self.check_minions(tgt, tgt_type)) + _res = self.check_minions(tgt, v_tgt_type) + v_minions = set(_res['minions']) + + _res = self.check_minions(tgt, tgt_type) + minions = set(_res['minions']) + mismatch = bool(minions.difference(v_minions)) # If the non-exact match gets more minions than the exact match # then pillar globbing or PCRE is being used, and we have a @@ -1138,9 +1121,10 @@ def mine_get(tgt, fun, tgt_type='glob', opts=None): ret = {} serial = salt.payload.Serial(opts) checker = CkMinions(opts) - minions = checker.check_minions( + _res = checker.check_minions( tgt, tgt_type) + minions = _res['minions'] cache = salt.cache.factory(opts) for minion in minions: mdata = cache.fetch('minions/{0}'.format(minion), 'mine') From e6b750d6a1f53e5406b034f41abbce8852555ef7 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 14 Aug 2017 10:49:27 -0700 Subject: [PATCH 066/639] Adding some integration tests. --- tests/integration/client/test_standard.py | 30 +++++++++++++++++++++++ tests/integration/files/conf/master | 1 + 2 files changed, 31 insertions(+) diff --git a/tests/integration/client/test_standard.py b/tests/integration/client/test_standard.py index f58c7272cc..b0e8809da1 100644 --- a/tests/integration/client/test_standard.py +++ b/tests/integration/client/test_standard.py @@ -147,3 +147,33 @@ class StdTest(ModuleCase): finally: os.unlink(key_file) + + def test_missing_minion_list(self): + ''' + test cmd with missing minion in nodegroup + ''' + ret = self.client.cmd( + 'minion,ghostminion', + 'test.ping', + tgt_type='list' + ) + self.assertIn('minion', ret) + self.assertIn('ghostminion', ret) + self.assertEqual(True, ret['minion']) + self.assertEqual(u'Minion did not return. [No response]', + ret['ghostminion']) + + def test_missing_minion_nodegroup(self): + ''' + test cmd with missing minion in nodegroup + ''' + ret = self.client.cmd( + 'missing_minion', + 'test.ping', + tgt_type='nodegroup' + ) + self.assertIn('minion', ret) + self.assertIn('ghostminion', ret) + self.assertEqual(True, ret['minion']) + self.assertEqual(u'Minion did not return. [No response]', + ret['ghostminion']) diff --git a/tests/integration/files/conf/master b/tests/integration/files/conf/master index b78ecd9180..10c2eec07b 100644 --- a/tests/integration/files/conf/master +++ b/tests/integration/files/conf/master @@ -78,6 +78,7 @@ nodegroups: redundant_minions: N@min or N@mins nodegroup_loop_a: N@nodegroup_loop_b nodegroup_loop_b: N@nodegroup_loop_a + missing_minion: L@minion,ghostminion mysql.host: localhost From 0c0cdd8aa1052017bd4cf0012e9324e091babefe Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 15 Aug 2017 08:28:38 -0700 Subject: [PATCH 067/639] Updating the test_auth tests. --- tests/unit/test_auth.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index e8d868e7ff..52114b8187 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -151,7 +151,8 @@ class MasterACLTestCase(ModuleCase): Test to ensure a simple name can auth against a given function. This tests to ensure test_user can access test.ping but *not* sys.doc ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): # Can we access test.ping? self.clear.publish(self.valid_clear_load) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], 'test.ping') @@ -166,7 +167,8 @@ class MasterACLTestCase(ModuleCase): ''' Tests to ensure test_group can access test.echo but *not* sys.doc ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs']['user'] = 'new_user' self.valid_clear_load['fun'] = 'test.echo' self.valid_clear_load['arg'] = 'hello' @@ -232,7 +234,8 @@ class MasterACLTestCase(ModuleCase): requested_tgt = 'minion_glob1' self.valid_clear_load['tgt'] = requested_tgt self.valid_clear_load['fun'] = requested_function - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=['minion_glob1'])): # Assume that there is a listening minion match + _check_minions_return = {'minions': ['minion_glob1'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): # Assume that there is a listening minion match self.clear.publish(self.valid_clear_load) self.assertTrue(self.fire_event_mock.called, 'Did not fire {0} for minion tgt {1}'.format(requested_function, requested_tgt)) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], requested_function, 'Did not fire {0} for minion glob'.format(requested_function)) @@ -258,7 +261,8 @@ class MasterACLTestCase(ModuleCase): minion1: - test.empty: ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): + _check_minions_return = {'minions': ['minion1'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', @@ -278,7 +282,8 @@ class MasterACLTestCase(ModuleCase): - 'TEST' - 'TEST.*' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): + _check_minions_return = {'minions': ['minion1'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', @@ -298,7 +303,8 @@ class MasterACLTestCase(ModuleCase): - 'TEST' - 'TEST.*' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): + _check_minions_return = {'minions': ['minion1'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': 'minion1', @@ -323,7 +329,8 @@ class MasterACLTestCase(ModuleCase): - 'TEST' - 'TEST.*' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='minion1')): + _check_minions_return = {'minions': ['minion1'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) # Wrong last arg self.valid_clear_load.update({'user': 'test_user_func', @@ -355,7 +362,8 @@ class MasterACLTestCase(ModuleCase): kwargs: text: 'KWMSG:.*' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', @@ -377,7 +385,8 @@ class MasterACLTestCase(ModuleCase): kwargs: text: 'KWMSG:.*' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', @@ -430,7 +439,8 @@ class MasterACLTestCase(ModuleCase): 'kwa': 'kwa.*' 'kwb': 'kwb' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', @@ -460,7 +470,8 @@ class MasterACLTestCase(ModuleCase): 'kwa': 'kwa.*' 'kwb': 'kwb' ''' - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value='some_minions')): + _check_minions_return = {'minions': ['some_minions'], 'missing': []} + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): self.valid_clear_load['kwargs'].update({'username': 'test_user_func'}) self.valid_clear_load.update({'user': 'test_user_func', 'tgt': '*', From 869599cb42a99152e1a9dfb8ee4512399bf724d0 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 15 Aug 2017 20:24:15 -0700 Subject: [PATCH 068/639] Updating another test for nodegroups in pillar. --- tests/unit/pillar/test_nodegroups.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/pillar/test_nodegroups.py b/tests/unit/pillar/test_nodegroups.py index 03b4730d95..4b872d79cb 100644 --- a/tests/unit/pillar/test_nodegroups.py +++ b/tests/unit/pillar/test_nodegroups.py @@ -27,8 +27,10 @@ fake_pillar_name = 'fake_pillar_name' def side_effect(group_sel, t): if group_sel.find(fake_minion_id) != -1: - return [fake_minion_id, ] - return ['another_minion_id', ] + return {'minions': [fake_minion_id, ], + 'missing': []} + return {'minions': ['another_minion_id', ], + 'missing': []} class NodegroupsPillarTestCase(TestCase, LoaderModuleMockMixin): From 3f2e96e561947b66ed0ea362bb46cf3e444614dd Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Wed, 16 Aug 2017 21:04:37 +1000 Subject: [PATCH 069/639] Convert set to list for serializer --- salt/states/git.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/git.py b/salt/states/git.py index 4808c0dce3..f12ae29c26 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1322,9 +1322,9 @@ def latest(name, ]) if set(all_local_tags) != remote_tags: has_remote_rev = False - ret['changes']['new_tags'] = remote_tags.symmetric_difference( + ret['changes']['new_tags'] = list(remote_tags.symmetric_difference( all_local_tags - ) + )) if not has_remote_rev: try: From c8e98c8d8a38b1263d4d7fc4371765bf3a1676ac Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 16 Aug 2017 14:51:16 +0200 Subject: [PATCH 070/639] Added unit tests for Kubernetes module Added unit tests for: * Node listing, * deployment listing, * service listing, * pod listing, * deployment deletion and * deployment creation. --- tests/unit/modules/kubernetes_test.py | 115 ++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 tests/unit/modules/kubernetes_test.py diff --git a/tests/unit/modules/kubernetes_test.py b/tests/unit/modules/kubernetes_test.py new file mode 100644 index 0000000000..5f2dcdc1dc --- /dev/null +++ b/tests/unit/modules/kubernetes_test.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Jochen Breuer ` +''' + +# Import Python Libs +from __future__ import absolute_import +import os + +# Import Salt Testing Libs +from salttesting import TestCase, skipIf +from salttesting.mock import ( + Mock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + +from salt.modules import kubernetes + +kubernetes.__salt__ = {} +kubernetes.__grains__ = {} +kubernetes.__context__ = {} + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class KubernetesTestCase(TestCase): + ''' + Test cases for salt.modules.kubernetes + ''' + def test_nodes(self): + ''' + Test node listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_node.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_node_name'}}]}} + ) + self.assertEqual(kubernetes.nodes(), ['mock_node_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_node().to_dict.called) + + def test_deployments(self): + ''' + Tests deployment listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"list_namespaced_deployment.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_deployment_name'}}]}} + ) + self.assertEqual(kubernetes.deployments(), ['mock_deployment_name']) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api().list_namespaced_deployment().to_dict.called) + + def test_services(self): + ''' + Tests services listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_namespaced_service.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_service_name'}}]}} + ) + self.assertEqual(kubernetes.services(), ['mock_service_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_namespaced_service().to_dict.called) + + def test_pods(self): + ''' + Tests pods listing. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.CoreV1Api.return_value = Mock( + **{"list_namespaced_pod.return_value.to_dict.return_value": + {'items': [{'metadata': {'name': 'mock_pod_name'}}]}} + ) + self.assertEqual(kubernetes.pods(), ['mock_pod_name']) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_namespaced_pod().to_dict.called) + + def test_delete_deployments(self): + ''' + Tests deployment creation. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.V1DeleteOptions = Mock(return_value="") + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"delete_namespaced_deployment.return_value.to_dict.return_value": {}} + ) + self.assertEqual(kubernetes.delete_deployment("test"), {}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api().delete_namespaced_deployment().to_dict.called) + + def test_create_deployments(self): + ''' + Tests deployment creation. + :return: + ''' + with patch('salt.modules.kubernetes.kubernetes') as mock_kubernetes_lib: + with patch.dict(kubernetes.__salt__, {'config.option': Mock(return_value="")}): + mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( + **{"create_namespaced_deployment.return_value.to_dict.return_value": {}} + ) + self.assertEqual(kubernetes.create_deployment("test", "default", {}, {}, None, None, None), {}) + self.assertTrue( + kubernetes.kubernetes.client.ExtensionsV1beta1Api().create_namespaced_deployment().to_dict.called) From 651b1bab096b9f45660b81baec30ebe7258e9874 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 16 Aug 2017 18:36:09 +0300 Subject: [PATCH 071/639] Properly handle `prereq` having lost requisites. Add the result to `self.pre` instead of to `running`. --- salt/state.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/salt/state.py b/salt/state.py index 82c180486b..729e740f5f 100644 --- a/salt/state.py +++ b/salt/state.py @@ -2071,13 +2071,17 @@ class State(object): req_val = lreq[req_key] comment += \ '{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val) - running[tag] = {'changes': {}, - 'result': False, - 'comment': comment, - '__run_num__': self.__run_num, - '__sls__': low['__sls__']} + if low.get('__prereq__'): + run_dict = self.pre + else: + run_dict = running + run_dict[tag] = {'changes': {}, + 'result': False, + 'comment': comment, + '__run_num__': self.__run_num, + '__sls__': low['__sls__']} self.__run_num += 1 - self.event(running[tag], len(chunks), fire_event=low.get('fire_event')) + self.event(run_dict[tag], len(chunks), fire_event=low.get('fire_event')) return running for chunk in reqs: # Check to see if the chunk has been run, only run it if From 201ceae4c45782f0bfbf2ee6d1e341880460a10b Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 16 Aug 2017 10:29:26 -0600 Subject: [PATCH 072/639] Fix lint, remove debug statement --- tests/unit/test_doc.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/unit/test_doc.py b/tests/unit/test_doc.py index 9e6583a099..52311d2f9e 100644 --- a/tests/unit/test_doc.py +++ b/tests/unit/test_doc.py @@ -39,7 +39,7 @@ class DocTestCase(TestCase): # No grep in Windows, use findstr # findstr in windows doesn't prepend 'Binary` to binary files, so # use the '/P' switch to skip files with unprintable characters - cmd = 'findstr /C:":doc:" /S /P {0}\*'.format(salt_dir) + cmd = 'findstr /C:":doc:" /S /P {0}\\*'.format(salt_dir) else: salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir @@ -77,8 +77,6 @@ class DocTestCase(TestCase): else: test_ret[key].append(val.strip()) - print('*' * 68) - # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None From 25627d99abf1a02d2e8f5fc359ec15f547adb2a8 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 16 Aug 2017 12:36:12 -0500 Subject: [PATCH 073/639] nfs3: fix 3 lint errors --- salt/modules/nfs3.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 090cd4c87f..61e3389fe9 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -89,13 +89,13 @@ def add_export(exports='/etc/exports', path=None, hosts=None, options=None): salt '*' nfs3.add_export path='/srv/test' hosts='127.0.0.1' options=['rw'] ''' - if options == None: + if options is None: options = [] - if not type(hosts) is str: + if type(hosts) is not str: # Lists, etc would silently mangle /etc/exports raise TypeError('hosts argument must be a string') edict = list_exports(exports) - if not path in edict: + if path not in edict: edict[path] = [] new = {'hosts': hosts, 'options': options} edict[path].append(new) From eec0646c79d89a6aab45f2c884955ce6afa336fb Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 16 Aug 2017 11:22:16 -0700 Subject: [PATCH 074/639] Fixing lint error in test_auth.py --- tests/unit/test_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 52114b8187..761f06a7e2 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -235,7 +235,7 @@ class MasterACLTestCase(ModuleCase): self.valid_clear_load['tgt'] = requested_tgt self.valid_clear_load['fun'] = requested_function _check_minions_return = {'minions': ['minion_glob1'], 'missing': []} - with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): # Assume that there is a listening minion match + with patch('salt.utils.minions.CkMinions.check_minions', MagicMock(return_value=_check_minions_return)): # Assume that there is a listening minion match self.clear.publish(self.valid_clear_load) self.assertTrue(self.fire_event_mock.called, 'Did not fire {0} for minion tgt {1}'.format(requested_function, requested_tgt)) self.assertEqual(self.fire_event_mock.call_args[0][0]['fun'], requested_function, 'Did not fire {0} for minion glob'.format(requested_function)) From 90c46ddefb457bfe39e168ea2c083beadf22a2df Mon Sep 17 00:00:00 2001 From: Lukas Raska Date: Wed, 16 Aug 2017 21:40:33 +0200 Subject: [PATCH 075/639] Allow cmdmod.exec_code_all to utilize cmdmod.run_all parameters --- salt/modules/cmdmod.py | 19 ++++++++++++---- tests/integration/modules/test_cmdmod.py | 28 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 862b1d4b70..a311e10e3c 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -2403,33 +2403,39 @@ def has_exec(cmd): return which(cmd) is not None -def exec_code(lang, code, cwd=None): +def exec_code(lang, code, cwd=None, args=None, **kwargs): ''' Pass in two strings, the first naming the executable language, aka - python2, python3, ruby, perl, lua, etc. the second string containing the code you wish to execute. The stdout will be returned. + All parameters from :mod:`cmd.run_all ` except python_shell can be used. + CLI Example: .. code-block:: bash salt '*' cmd.exec_code ruby 'puts "cheese"' + salt '*' cmd.exec_code ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}' ''' - return exec_code_all(lang, code, cwd)['stdout'] + return exec_code_all(lang, code, cwd, args, **kwargs)['stdout'] -def exec_code_all(lang, code, cwd=None): +def exec_code_all(lang, code, cwd=None, args=None, **kwargs): ''' Pass in two strings, the first naming the executable language, aka - python2, python3, ruby, perl, lua, etc. the second string containing the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid) will be returned. + All parameters from :mod:`cmd.run_all ` except python_shell can be used. + CLI Example: .. code-block:: bash salt '*' cmd.exec_code_all ruby 'puts "cheese"' + salt '*' cmd.exec_code_all ruby 'puts "cheese"' args='["arg1", "arg2"]' env='{"FOO": "bar"}' ''' powershell = lang.lower().startswith("powershell") @@ -2446,7 +2452,12 @@ def exec_code_all(lang, code, cwd=None): else: cmd = [lang, codefile] - ret = run_all(cmd, cwd=cwd, python_shell=False) + if isinstance(args, str): + cmd.append(args) + elif isinstance(args, list): + cmd += args + + ret = run_all(cmd, cwd=cwd, python_shell=False, **kwargs) os.remove(codefile) return ret diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py index 8434361e08..be306cbb48 100644 --- a/tests/integration/modules/test_cmdmod.py +++ b/tests/integration/modules/test_cmdmod.py @@ -187,6 +187,34 @@ class CMDModuleTest(ModuleCase): code]).rstrip(), 'cheese') + def test_exec_code_with_single_arg(self): + ''' + cmd.exec_code + ''' + code = textwrap.dedent('''\ + import sys + sys.stdout.write(sys.argv[1])''') + arg = 'cheese' + self.assertEqual(self.run_function('cmd.exec_code', + [AVAILABLE_PYTHON_EXECUTABLE, + code], + args=arg).rstrip(), + arg) + + def test_exec_code_with_multiple_args(self): + ''' + cmd.exec_code + ''' + code = textwrap.dedent('''\ + import sys + sys.stdout.write(sys.argv[1])''') + arg = 'cheese' + self.assertEqual(self.run_function('cmd.exec_code', + [AVAILABLE_PYTHON_EXECUTABLE, + code], + args=[arg, 'test']).rstrip(), + arg) + def test_quotes(self): ''' cmd.run with quoted command From 1b8729b3e786107f8dc95740420eb97189e4d237 Mon Sep 17 00:00:00 2001 From: SuperPommeDeTerre Date: Thu, 17 Aug 2017 11:56:23 +0200 Subject: [PATCH 076/639] Fix for #26995 --- salt/modules/artifactory.py | 77 +++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index d521e786f3..169ffaff8b 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -202,45 +202,48 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio has_classifier = classifier is not None and classifier != "" if snapshot_version is None: - snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + try: + snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + if packaging not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if packaging not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) + if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) - - snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + except CommandExecutionError as err: + log.error('Could not fetch maven-metadat.xml. Assuming snapshot_version=%s.', version) + snapshot_version = version group_url = __get_group_id_subpath(group_id) From 0b666e100bda9b71e39a193e609f302f43918d75 Mon Sep 17 00:00:00 2001 From: SuperPommeDeTerre Date: Thu, 17 Aug 2017 13:28:10 +0200 Subject: [PATCH 077/639] Fix typo. --- salt/modules/artifactory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index 169ffaff8b..26065a8d37 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -242,7 +242,7 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] except CommandExecutionError as err: - log.error('Could not fetch maven-metadat.xml. Assuming snapshot_version=%s.', version) + log.error('Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.', version) snapshot_version = version group_url = __get_group_id_subpath(group_id) From 1e8587f56fa92947a71c5b6be44473ef2995fc7d Mon Sep 17 00:00:00 2001 From: Lukas Raska Date: Thu, 17 Aug 2017 18:08:02 +0200 Subject: [PATCH 078/639] Use six.string_types instead of str for Py2/3 compatibility --- salt/modules/cmdmod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index a311e10e3c..d961b8490c 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -2452,7 +2452,7 @@ def exec_code_all(lang, code, cwd=None, args=None, **kwargs): else: cmd = [lang, codefile] - if isinstance(args, str): + if isinstance(args, six.string_types): cmd.append(args) elif isinstance(args, list): cmd += args From 5c4a5ca59a3131703f5e2633d640573369081bb9 Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Tue, 15 Aug 2017 15:11:02 -0600 Subject: [PATCH 079/639] Add kernelpkg.remove function for YUM-based systems --- salt/modules/kernelpkg_linux_yum.py | 59 ++++++++++++++++++- tests/unit/modules/test_kernelpkg.py | 23 ++++++++ .../unit/modules/test_kernelpkg_linux_yum.py | 32 +++++++++- 3 files changed, 110 insertions(+), 4 deletions(-) diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py index b0fabe516b..4ddddc2c18 100644 --- a/salt/modules/kernelpkg_linux_yum.py +++ b/salt/modules/kernelpkg_linux_yum.py @@ -6,11 +6,13 @@ from __future__ import absolute_import import functools import logging -# Import Salt libs -from salt.ext import six - try: + # Import Salt libs + from salt.ext import six from salt.utils.versions import LooseVersion as _LooseVersion + from salt.exceptions import CommandExecutionError + import salt.utils.systemd + import salt.modules.yumpkg HAS_REQUIRED_LIBS = True except ImportError: HAS_REQUIRED_LIBS = False @@ -20,6 +22,9 @@ log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'kernelpkg' +# Import functions from yumpkg +_yum = salt.utils.namespaced_function(salt.modules.yumpkg._yum, globals()) + def __virtual__(): ''' @@ -186,6 +191,54 @@ def upgrade_available(): return _LooseVersion(latest_available()) > _LooseVersion(latest_installed()) +def remove(release): + ''' + Remove a specific version of the kernel. + + release + The release number of an installed kernel. This must be the entire release + number as returned by :py:func:`~salt.modules.kernelpkg.list_installed`, + not the package name. + ''' + if release not in list_installed(): + raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release)) + + if release == active(): + raise CommandExecutionError('Active kernel cannot be removed') + + target = '{0}-{1}'.format(_package_name(), release) + log.info('Removing kernel package {0}'.format(target)) + old = __salt__['pkg.list_pkgs']() + + # Build the command string + cmd = [] + if salt.utils.systemd.has_scope(__context__) \ + and __salt__['config.get']('systemd.scope', True): + cmd.extend(['systemd-run', '--scope']) + cmd.extend([_yum(), '-y', 'remove', target]) + + # Execute the command + out = __salt__['cmd.run_all']( + cmd, + output_loglevel='trace', + python_shell=False + ) + + # Look for the changes in installed packages + __context__.pop('pkg.list_pkgs', None) + new = __salt__['pkg.list_pkgs']() + ret = salt.utils.compare_dicts(old, new) + + # Look for command execution errors + if out['retcode'] != 0: + raise CommandExecutionError( + 'Error occurred removing package(s)', + info={'errors': [out['stderr']], 'changes': ret} + ) + + return {'removed': [target]} + + def _package_name(): ''' Return static string for the package name diff --git a/tests/unit/modules/test_kernelpkg.py b/tests/unit/modules/test_kernelpkg.py index 1dc2331582..2d30926096 100644 --- a/tests/unit/modules/test_kernelpkg.py +++ b/tests/unit/modules/test_kernelpkg.py @@ -10,6 +10,7 @@ from __future__ import absolute_import # Salt testing libs try: from tests.support.mock import MagicMock, patch + from salt.exceptions import CommandExecutionError except ImportError: pass @@ -171,3 +172,25 @@ class KernelPkgTestCase(object): with patch.object(self._kernelpkg, 'latest_available', return_value=self.KERNEL_LIST[0]): with patch.object(self._kernelpkg, 'latest_installed', return_value=self.KERNEL_LIST[-1]): self.assertFalse(self._kernelpkg.upgrade_available()) + + def test_remove_active(self): + ''' + Test - remove kernel package + ''' + mock = MagicMock(return_value={'retcode': 0, 'stderr': []}) + with patch.dict(self._kernelpkg.__salt__, {'cmd.run_all': mock}): + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release=self.KERNEL_LIST[-1]) + self._kernelpkg.__salt__['cmd.run_all'].assert_not_called() + + def test_remove_invalid(self): + ''' + Test - remove kernel package + ''' + mock = MagicMock(return_value={'retcode': 0, 'stderr': []}) + with patch.dict(self._kernelpkg.__salt__, {'cmd.run_all': mock}): + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release='invalid') + self._kernelpkg.__salt__['cmd.run_all'].assert_not_called() diff --git a/tests/unit/modules/test_kernelpkg_linux_yum.py b/tests/unit/modules/test_kernelpkg_linux_yum.py index 9674c049bf..adc8f79431 100644 --- a/tests/unit/modules/test_kernelpkg_linux_yum.py +++ b/tests/unit/modules/test_kernelpkg_linux_yum.py @@ -19,6 +19,7 @@ try: from tests.unit.modules.test_kernelpkg import KernelPkgTestCase import salt.modules.kernelpkg_linux_yum as kernelpkg import salt.modules.yumpkg as pkg + from salt.exceptions import CommandExecutionError HAS_MODULES = True except ImportError: HAS_MODULES = False @@ -32,18 +33,22 @@ class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): KERNEL_LIST = ['3.10.0-327.el7', '3.11.0-327.el7', '4.9.1-100.el7'] LATEST = KERNEL_LIST[-1] OS_ARCH = 'x86_64' + OS_NAME = 'RedHat' def setup_loader_modules(self): return { kernelpkg: { '__grains__': { + 'os': self.OS_NAME, 'kernelrelease': '{0}.{1}'.format(self.KERNEL_LIST[0], self.OS_ARCH) }, '__salt__': { 'pkg.normalize_name': pkg.normalize_name, 'pkg.upgrade': MagicMock(return_value={}), + 'pkg.list_pkgs': MagicMock(return_value={}), 'pkg.version': MagicMock(return_value=self.KERNEL_LIST), - 'system.reboot': MagicMock(return_value=None) + 'system.reboot': MagicMock(return_value=None), + 'config.get': MagicMock(return_value=True) } }, pkg: { @@ -68,3 +73,28 @@ class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): mock = MagicMock(return_value=None) with patch.dict(self._kernelpkg.__salt__, {'pkg.version': mock}): self.assertListEqual(self._kernelpkg.list_installed(), []) + + def test_remove_success(self): + ''' + Test - remove kernel package + ''' + mock = MagicMock(return_value={'retcode': 0, 'stderr': []}) + with patch.dict(self._kernelpkg.__salt__, {'cmd.run_all': mock}): + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) + self._kernelpkg.__salt__['cmd.run_all'].assert_called_once() + self.assertIn('removed', result) + target = '{0}-{1}'.format(self._kernelpkg._package_name(), self.KERNEL_LIST[0]) + self.assertListEqual(result['removed'], [target]) + + def test_remove_error(self): + ''' + Test - remove kernel package + ''' + mock = MagicMock(return_value={'retcode': -1, 'stderr': []}) + with patch.dict(self._kernelpkg.__salt__, {'cmd.run_all': mock}): + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release=self.KERNEL_LIST[0]) + self._kernelpkg.__salt__['cmd.run_all'].assert_called_once() From 8f575ade60707e44af356a4eb3096f3fa90969d2 Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Wed, 16 Aug 2017 08:57:53 -0600 Subject: [PATCH 080/639] Add kernelpkg.remove function for APT-based systems --- salt/modules/kernelpkg_linux_apt.py | 29 +++++++++++++++++-- .../unit/modules/test_kernelpkg_linux_apt.py | 25 ++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py index 50182232f7..53412bc8b1 100644 --- a/salt/modules/kernelpkg_linux_apt.py +++ b/salt/modules/kernelpkg_linux_apt.py @@ -7,12 +7,12 @@ import functools import logging import re -# Import Salt libs -from salt.ext import six - try: + # Import Salt libs + from salt.ext import six from salt.utils.versions import LooseVersion as _LooseVersion from salt.ext.six.moves import filter # pylint: disable=import-error,redefined-builtin + from salt.exceptions import CommandExecutionError HAS_REQUIRED_LIBS = True except ImportError: HAS_REQUIRED_LIBS = False @@ -200,6 +200,29 @@ def upgrade_available(): return _LooseVersion(latest_available()) > _LooseVersion(latest_installed()) +def remove(release): + ''' + Remove a specific version of the kernel. + + release + The release number of an installed kernel. This must be the entire release + number as returned by :py:func:`~salt.modules.kernelpkg.list_installed`, + not the package name. + ''' + if release not in list_installed(): + raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release)) + + if release == active(): + raise CommandExecutionError('Active kernel cannot be removed') + + target = '{0}-{1}'.format(_package_prefix(), release) + log.info('Removing kernel package {0}'.format(target)) + + __salt__['pkg.purge'](target) + + return {'removed': [target]} + + def _package_prefix(): ''' Return static string for the package prefix diff --git a/tests/unit/modules/test_kernelpkg_linux_apt.py b/tests/unit/modules/test_kernelpkg_linux_apt.py index 38488f4e90..12e3efc2a5 100644 --- a/tests/unit/modules/test_kernelpkg_linux_apt.py +++ b/tests/unit/modules/test_kernelpkg_linux_apt.py @@ -19,6 +19,7 @@ try: # Import Salt Libs from tests.unit.modules.test_kernelpkg import KernelPkgTestCase import salt.modules.kernelpkg_linux_apt as kernelpkg + from salt.exceptions import CommandExecutionError HAS_MODULES = True except ImportError: HAS_MODULES = False @@ -51,6 +52,7 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): 'pkg.install': MagicMock(return_value={}), 'pkg.latest_version': MagicMock(return_value=self.LATEST), 'pkg.list_pkgs': MagicMock(return_value=self.PACKAGE_DICT), + 'pkg.purge': MagicMock(return_value=None), 'system.reboot': MagicMock(return_value=None) } } @@ -73,3 +75,26 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): mock = MagicMock(return_value=None) with patch.dict(self._kernelpkg.__salt__, {'pkg.list_pkgs': mock}): self.assertListEqual(self._kernelpkg.list_installed(), []) + + def test_remove_success(self): + ''' + Test - remove kernel package + ''' + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) + self._kernelpkg.__salt__['pkg.purge'].assert_called_once() + self.assertIn('removed', result) + target = '{0}-{1}'.format(self._kernelpkg._package_prefix(), self.KERNEL_LIST[0]) + self.assertListEqual(result['removed'], [target]) + + def test_remove_error(self): + ''' + Test - remove kernel package + ''' + mock = MagicMock(side_effect=CommandExecutionError()) + with patch.dict(self._kernelpkg.__salt__, {'pkg.purge': mock}): + with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): + with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): + self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release=self.KERNEL_LIST[0]) + self._kernelpkg.__salt__['pkg.purge'].assert_called_once() From f87fe54b7b7dea664432d0a3263b3625db9030b7 Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Wed, 16 Aug 2017 09:44:15 -0600 Subject: [PATCH 081/639] Fix linter errors for kernelpkg modules and states --- salt/modules/kernelpkg_linux_apt.py | 8 ++++---- salt/modules/kernelpkg_linux_yum.py | 10 +++++----- salt/states/kernelpkg.py | 6 ++++-- tests/unit/modules/test_kernelpkg.py | 4 +++- tests/unit/modules/test_kernelpkg_linux_apt.py | 10 +++++++--- tests/unit/modules/test_kernelpkg_linux_yum.py | 6 +++++- tests/unit/states/test_kernelpkg.py | 1 + 7 files changed, 29 insertions(+), 16 deletions(-) diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py index 53412bc8b1..7b9586386e 100644 --- a/salt/modules/kernelpkg_linux_apt.py +++ b/salt/modules/kernelpkg_linux_apt.py @@ -241,11 +241,11 @@ def _cmp_version(item1, item2): ''' Compare function for package version sorting ''' - v1 = _LooseVersion(item1) - v2 = _LooseVersion(item2) + vers1 = _LooseVersion(item1) + vers2 = _LooseVersion(item2) - if v1 < v2: + if vers1 < vers2: return -1 - if v1 > v2: + if vers1 > vers2: return 1 return 0 diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py index 4ddddc2c18..fb3696aad0 100644 --- a/salt/modules/kernelpkg_linux_yum.py +++ b/salt/modules/kernelpkg_linux_yum.py @@ -23,7 +23,7 @@ log = logging.getLogger(__name__) __virtualname__ = 'kernelpkg' # Import functions from yumpkg -_yum = salt.utils.namespaced_function(salt.modules.yumpkg._yum, globals()) +_yum = salt.utils.namespaced_function(salt.modules.yumpkg._yum, globals()) # pylint: disable=invalid-name, protected-access def __virtual__(): @@ -250,11 +250,11 @@ def _cmp_version(item1, item2): ''' Compare function for package version sorting ''' - v1 = _LooseVersion(item1) - v2 = _LooseVersion(item2) + vers1 = _LooseVersion(item1) + vers2 = _LooseVersion(item2) - if v1 < v2: + if vers1 < vers2: return -1 - if v1 > v2: + if vers1 > vers2: return 1 return 0 diff --git a/salt/states/kernelpkg.py b/salt/states/kernelpkg.py index 252c1c084e..f9ceb2a0e7 100644 --- a/salt/states/kernelpkg.py +++ b/salt/states/kernelpkg.py @@ -114,7 +114,8 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum state will reboot the system. See :mod:`kernelpkg.upgrade ` and - :mod:`kernelpkg.latest_installed ` for ways to install new kernel packages. + :mod:`kernelpkg.latest_installed ` + for ways to install new kernel packages. This module does not attempt to understand or manage boot loader configurations it is possible to have a new kernel installed, but a boot loader configuration @@ -122,7 +123,8 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum schedule this state to run automatically. Because this state function may cause the system to reboot, it may be preferable - to move it to the very end of the state run. See :mod:`kernelpkg.latest_wait ` + to move it to the very end of the state run. + See :mod:`kernelpkg.latest_wait ` for a waitable state that can be called with the `listen` requesite. name diff --git a/tests/unit/modules/test_kernelpkg.py b/tests/unit/modules/test_kernelpkg.py index 2d30926096..52feae061d 100644 --- a/tests/unit/modules/test_kernelpkg.py +++ b/tests/unit/modules/test_kernelpkg.py @@ -5,6 +5,8 @@ :maturity: develop versionadded:: oxygen ''' +# pylint: disable=invalid-name,no-member + from __future__ import absolute_import # Salt testing libs @@ -17,7 +19,7 @@ except ImportError: class KernelPkgTestCase(object): ''' - Test cases for salt.modules.yumkernelpkg + Test cases shared by all kernelpkg virtual modules ''' def test_active(self): diff --git a/tests/unit/modules/test_kernelpkg_linux_apt.py b/tests/unit/modules/test_kernelpkg_linux_apt.py index 12e3efc2a5..a24ba8d245 100644 --- a/tests/unit/modules/test_kernelpkg_linux_apt.py +++ b/tests/unit/modules/test_kernelpkg_linux_apt.py @@ -5,6 +5,7 @@ :maturity: develop versionadded:: oxygen ''' +# pylint: disable=invalid-name,no-member # Import Python Libs from __future__ import absolute_import @@ -28,6 +29,9 @@ except ImportError: @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_MODULES, 'Salt modules could not be loaded') class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.kernelpkg_linux_apt + ''' _kernelpkg = kernelpkg KERNEL_LIST = ['4.4.0-70-generic', '4.4.0-71-generic', '4.5.1-14-generic'] @@ -39,7 +43,7 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): cls.LATEST = '{0}.{1}'.format(version.group(1), version.group(2)) for kernel in cls.KERNEL_LIST: - pkg = '{0}-{1}'.format(kernelpkg._package_prefix(), kernel) + pkg = '{0}-{1}'.format(kernelpkg._package_prefix(), kernel) # pylint: disable=protected-access cls.PACKAGE_DICT[pkg] = pkg def setup_loader_modules(self): @@ -62,7 +66,7 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): ''' Test - Return return the latest installed kernel version ''' - PACKAGE_LIST = ['{0}-{1}'.format(kernelpkg._package_prefix(), kernel) for kernel in self.KERNEL_LIST] + PACKAGE_LIST = ['{0}-{1}'.format(kernelpkg._package_prefix(), kernel) for kernel in self.KERNEL_LIST] # pylint: disable=protected-access mock = MagicMock(return_value=PACKAGE_LIST) with patch.dict(self._kernelpkg.__salt__, {'pkg.list_pkgs': mock}): @@ -85,7 +89,7 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) self._kernelpkg.__salt__['pkg.purge'].assert_called_once() self.assertIn('removed', result) - target = '{0}-{1}'.format(self._kernelpkg._package_prefix(), self.KERNEL_LIST[0]) + target = '{0}-{1}'.format(self._kernelpkg._package_prefix(), self.KERNEL_LIST[0]) # pylint: disable=protected-access self.assertListEqual(result['removed'], [target]) def test_remove_error(self): diff --git a/tests/unit/modules/test_kernelpkg_linux_yum.py b/tests/unit/modules/test_kernelpkg_linux_yum.py index adc8f79431..946689c619 100644 --- a/tests/unit/modules/test_kernelpkg_linux_yum.py +++ b/tests/unit/modules/test_kernelpkg_linux_yum.py @@ -5,6 +5,7 @@ :maturity: develop versionadded:: oxygen ''' +# pylint: disable=invalid-name,no-member # Import Python Libs from __future__ import absolute_import @@ -28,6 +29,9 @@ except ImportError: @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_MODULES, 'Salt modules could not be loaded') class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.modules.kernelpkg_linux_yum + ''' _kernelpkg = kernelpkg KERNEL_LIST = ['3.10.0-327.el7', '3.11.0-327.el7', '4.9.1-100.el7'] @@ -85,7 +89,7 @@ class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) self._kernelpkg.__salt__['cmd.run_all'].assert_called_once() self.assertIn('removed', result) - target = '{0}-{1}'.format(self._kernelpkg._package_name(), self.KERNEL_LIST[0]) + target = '{0}-{1}'.format(self._kernelpkg._package_name(), self.KERNEL_LIST[0]) # pylint: disable=protected-access self.assertListEqual(result['removed'], [target]) def test_remove_error(self): diff --git a/tests/unit/states/test_kernelpkg.py b/tests/unit/states/test_kernelpkg.py index c6eff8ed63..dec4b5a9d7 100644 --- a/tests/unit/states/test_kernelpkg.py +++ b/tests/unit/states/test_kernelpkg.py @@ -5,6 +5,7 @@ :maturity: develop versionadded:: oxygen ''' +# pylint: disable=invalid-name,no-member # Import Python libs from __future__ import absolute_import From 9f5682f07613baa1f29166ed9991dd25485cb569 Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Wed, 16 Aug 2017 10:00:48 -0600 Subject: [PATCH 082/639] Add kernelpkg.cleanup functions --- salt/modules/kernelpkg_linux_apt.py | 28 ++++++++++++++++++++++++++++ salt/modules/kernelpkg_linux_yum.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py index 7b9586386e..fa9f5ac378 100644 --- a/salt/modules/kernelpkg_linux_apt.py +++ b/salt/modules/kernelpkg_linux_apt.py @@ -223,6 +223,34 @@ def remove(release): return {'removed': [target]} +def cleanup(keep_latest=True): + ''' + Remove all unused kernel packages from the system. + + keep_latest : True + In the event that the active kernel is not the latest one installed, setting this to True + will retain the latest kernel package, in addition to the active one. If False, all kernel + packages other than the active one will be removed. + ''' + removed = [] + + # Loop over all installed kernel packages + for kernel in list_installed(): + + # Keep the active kernel package + if kernel == active(): + continue + + # Optionally keep the latest kernel package + if keep_latest and kernel == latest_installed(): + continue + + # Remove the kernel package + removed.extend(remove(kernel)['removed']) + + return {'removed': removed} + + def _package_prefix(): ''' Return static string for the package prefix diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py index fb3696aad0..973801aeb1 100644 --- a/salt/modules/kernelpkg_linux_yum.py +++ b/salt/modules/kernelpkg_linux_yum.py @@ -239,6 +239,34 @@ def remove(release): return {'removed': [target]} +def cleanup(keep_latest=True): + ''' + Remove all unused kernel packages from the system. + + keep_latest : True + In the event that the active kernel is not the latest one installed, setting this to True + will retain the latest kernel package, in addition to the active one. If False, all kernel + packages other than the active one will be removed. + ''' + removed = [] + + # Loop over all installed kernel packages + for kernel in list_installed(): + + # Keep the active kernel package + if kernel == active(): + continue + + # Optionally keep the latest kernel package + if keep_latest and kernel == latest_installed(): + continue + + # Remove the kernel package + removed.extend(remove(kernel)['removed']) + + return {'removed': removed} + + def _package_name(): ''' Return static string for the package name From 898d69b1e1d48cb460f84a47fc1faaf4bf61ecda Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Wed, 16 Aug 2017 17:29:07 -0600 Subject: [PATCH 083/639] Update kernelpkg documentation --- doc/ref/modules/all/salt.modules.kernelpkg.rst | 4 ++-- salt/modules/kernelpkg_linux_apt.py | 18 +++++++++++++++--- salt/modules/kernelpkg_linux_yum.py | 18 +++++++++++++++--- salt/states/kernelpkg.py | 14 +++++++------- 4 files changed, 39 insertions(+), 15 deletions(-) diff --git a/doc/ref/modules/all/salt.modules.kernelpkg.rst b/doc/ref/modules/all/salt.modules.kernelpkg.rst index 6eada67428..9b4d8b2beb 100644 --- a/doc/ref/modules/all/salt.modules.kernelpkg.rst +++ b/doc/ref/modules/all/salt.modules.kernelpkg.rst @@ -13,7 +13,7 @@ salt.modules.kernelpkg Execution Module Used for ============================================ ======================================== :py:mod:`~salt.modules.kernelpkg_linux_apt` Debian/Ubuntu-based distros which use - ``apt-get(8)`` for package management + ``apt-get`` for package management :py:mod:`~salt.modules.kernelpkg_linux_yum` RedHat-based distros and derivatives - using ``yum(8)`` or ``dnf(8)`` + using ``yum`` or ``dnf`` ============================================ ======================================== diff --git a/salt/modules/kernelpkg_linux_apt.py b/salt/modules/kernelpkg_linux_apt.py index fa9f5ac378..34512a5f5c 100644 --- a/salt/modules/kernelpkg_linux_apt.py +++ b/salt/modules/kernelpkg_linux_apt.py @@ -116,9 +116,9 @@ def latest_installed(): .. note:: This function may not return the same value as - :py:func:`~salt.modules.kernelpkg.active` if a new kernel + :py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel has been installed and the system has not yet been rebooted. - The :py:func:`~salt.modules.kernelpkg.needs_reboot` function + The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function exists to detect this condition. ''' pkgs = list_installed() @@ -206,8 +206,14 @@ def remove(release): release The release number of an installed kernel. This must be the entire release - number as returned by :py:func:`~salt.modules.kernelpkg.list_installed`, + number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`, not the package name. + + CLI Example: + + .. code-block:: bash + + salt '*' kernelpkg.remove 4.4.0-70-generic ''' if release not in list_installed(): raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release)) @@ -231,6 +237,12 @@ def cleanup(keep_latest=True): In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. + + CLI Example: + + .. code-block:: bash + + salt '*' kernelpkg.cleanup ''' removed = [] diff --git a/salt/modules/kernelpkg_linux_yum.py b/salt/modules/kernelpkg_linux_yum.py index 973801aeb1..39110e5a16 100644 --- a/salt/modules/kernelpkg_linux_yum.py +++ b/salt/modules/kernelpkg_linux_yum.py @@ -108,9 +108,9 @@ def latest_installed(): .. note:: This function may not return the same value as - :py:func:`~salt.modules.kernelpkg.active` if a new kernel + :py:func:`~salt.modules.kernelpkg_linux_yum.active` if a new kernel has been installed and the system has not yet been rebooted. - The :py:func:`~salt.modules.kernelpkg.needs_reboot` function + The :py:func:`~salt.modules.kernelpkg_linux_yum.needs_reboot` function exists to detect this condition. ''' pkgs = list_installed() @@ -197,8 +197,14 @@ def remove(release): release The release number of an installed kernel. This must be the entire release - number as returned by :py:func:`~salt.modules.kernelpkg.list_installed`, + number as returned by :py:func:`~salt.modules.kernelpkg_linux_yum.list_installed`, not the package name. + + CLI Example: + + .. code-block:: bash + + salt '*' kernelpkg.remove 3.10.0-327.el7 ''' if release not in list_installed(): raise CommandExecutionError('Kernel release \'{0}\' is not installed'.format(release)) @@ -247,6 +253,12 @@ def cleanup(keep_latest=True): In the event that the active kernel is not the latest one installed, setting this to True will retain the latest kernel package, in addition to the active one. If False, all kernel packages other than the active one will be removed. + + CLI Example: + + .. code-block:: bash + + salt '*' kernelpkg.cleanup ''' removed = [] diff --git a/salt/states/kernelpkg.py b/salt/states/kernelpkg.py index f9ceb2a0e7..2e44ec630b 100644 --- a/salt/states/kernelpkg.py +++ b/salt/states/kernelpkg.py @@ -52,7 +52,7 @@ log = logging.getLogger(__name__) def __virtual__(): ''' - Only make these states available if a pkg provider has been detected or + Only make these states available if a kernelpkg provider has been detected or assigned for this minion ''' return 'kernelpkg.upgrade' in __salt__ @@ -67,8 +67,8 @@ def latest_installed(name, **kwargs): # pylint: disable=unused-argument This state only installs the kernel, but does not activate it. The new kernel should become active at the next reboot. - See :mod:`kernelpkg.needs_reboot ` for details on - how to detect this condition, :mod:`kernelpkg.latest_active ` + See :py:func:`kernelpkg.needs_reboot ` for details on + how to detect this condition, and :py:func:`~salt.states.kernelpkg.latest_active` to initiale a reboot when needed. name @@ -113,8 +113,8 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum system. If the running version is not the latest one installed, this state will reboot the system. - See :mod:`kernelpkg.upgrade ` and - :mod:`kernelpkg.latest_installed ` + See :py:func:`kernelpkg.upgrade ` and + :py:func:`~salt.states.kernelpkg.latest_installed` for ways to install new kernel packages. This module does not attempt to understand or manage boot loader configurations @@ -124,7 +124,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum Because this state function may cause the system to reboot, it may be preferable to move it to the very end of the state run. - See :mod:`kernelpkg.latest_wait ` + See :py:func:`~salt.states.kernelpkg.latest_wait` for a waitable state that can be called with the `listen` requesite. name @@ -170,7 +170,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum def latest_wait(name, at_time=None, **kwargs): # pylint: disable=unused-argument ''' Initiate a reboot if the running kernel is not the latest one installed. This is the - waitable version of :mod:`kernelpkg.latest_active ` and + waitable version of :py:func:`~salt.states.kernelpkg.latest_active` and will not take any action unless triggered by a watch or listen requesite. .. note:: From 7f5ee55f5746e256e013fe351e59d8d976cf2855 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 17 Aug 2017 14:20:25 -0600 Subject: [PATCH 084/639] Fix `unit.utils.test_url` for Windows Detect escaped urls in Windows Unescape urls in Windows Fix tests to deal with sanitized Windows paths --- salt/utils/url.py | 16 ++++++++-------- tests/unit/utils/test_url.py | 12 ++++++++++++ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/salt/utils/url.py b/salt/utils/url.py index 0998ee5a0c..df6e64443b 100644 --- a/salt/utils/url.py +++ b/salt/utils/url.py @@ -60,15 +60,15 @@ def is_escaped(url): ''' test whether `url` is escaped with `|` ''' - if salt.utils.is_windows(): - return False - scheme = urlparse(url).scheme if not scheme: return url.startswith('|') elif scheme == 'salt': path, saltenv = parse(url) - return path.startswith('|') + if salt.utils.is_windows() and '|' in url: + return path.startswith('_') + else: + return path.startswith('|') else: return False @@ -100,15 +100,15 @@ def unescape(url): ''' remove escape character `|` from `url` ''' - if salt.utils.is_windows(): - return url - scheme = urlparse(url).scheme if not scheme: return url.lstrip('|') elif scheme == 'salt': path, saltenv = parse(url) - return create(path.lstrip('|'), saltenv) + if salt.utils.is_windows() and '|' in url: + return create(path.lstrip('_'), saltenv) + else: + return create(path.lstrip('|'), saltenv) else: return url diff --git a/tests/unit/utils/test_url.py b/tests/unit/utils/test_url.py index 95caa0253e..af7efba323 100644 --- a/tests/unit/utils/test_url.py +++ b/tests/unit/utils/test_url.py @@ -38,6 +38,8 @@ class UrlTestCase(TestCase): ''' path = '?funny/path with {interesting|chars}' url = 'salt://' + path + if salt.utils.is_windows(): + path = '_funny/path with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, None)) @@ -48,6 +50,8 @@ class UrlTestCase(TestCase): saltenv = 'ambience' path = '?funny/path&with {interesting|chars}' url = 'salt://' + path + '?saltenv=' + saltenv + if salt.utils.is_windows(): + path = '_funny/path&with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, saltenv)) @@ -59,6 +63,8 @@ class UrlTestCase(TestCase): ''' path = '? interesting/&path.filetype' url = 'salt://' + path + if salt.utils.is_windows(): + url = 'salt://_ interesting/&path.filetype' self.assertEqual(salt.utils.url.create(path), url) @@ -68,6 +74,8 @@ class UrlTestCase(TestCase): ''' saltenv = 'raumklang' path = '? interesting/&path.filetype' + if salt.utils.is_windows(): + path = '_ interesting/&path.filetype' url = 'salt://' + path + '?saltenv=' + saltenv @@ -149,6 +157,8 @@ class UrlTestCase(TestCase): ''' path = 'dir/file.conf' escaped_path = '|' + path + if salt.utils.is_windows(): + escaped_path = path self.assertEqual(salt.utils.url.escape(path), escaped_path) @@ -167,6 +177,8 @@ class UrlTestCase(TestCase): path = 'dir/file.conf' url = 'salt://' + path escaped_url = 'salt://|' + path + if salt.utils.is_windows(): + escaped_url = url self.assertEqual(salt.utils.url.escape(url), escaped_url) From 73315f0cf03f28a8efa30c91d463e4a33fb8fe46 Mon Sep 17 00:00:00 2001 From: matt Date: Thu, 17 Aug 2017 17:06:17 -0400 Subject: [PATCH 085/639] Issue #43036 Bhyve virtual grain in Linux VMs --- salt/grains/core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py index 248984edf3..57937f2035 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -764,6 +764,8 @@ def _virtual(osdata): grains['virtual_subtype'] = 'ovirt' elif 'Google' in output: grains['virtual'] = 'gce' + elif 'BHYVE' in output: + grains['virtual'] = 'bhyve' except IOError: pass elif osdata['kernel'] == 'FreeBSD': From 4ec37326a9077b3a257b50e91ea239a37b691def Mon Sep 17 00:00:00 2001 From: Darren Demicoli Date: Fri, 18 Aug 2017 01:36:51 +0200 Subject: [PATCH 086/639] Do not try to match pillarenv with __env__ --- salt/pillar/git_pillar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 55d8da6897..c7b418d567 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -562,7 +562,7 @@ def ext_pillar(minion_id, repo, pillar_dirs): ) for pillar_dir, env in six.iteritems(pillar.pillar_dirs): # If pillarenv is set, only grab pillars with that match pillarenv - if opts['pillarenv'] and env != opts['pillarenv']: + if opts['pillarenv'] and env != opts['pillarenv'] and env != '__env__': log.debug( 'env \'%s\' for pillar dir \'%s\' does not match ' 'pillarenv \'%s\', skipping', From 2b5af5b59df98f1f1270d68a5afa10a80aa9cff0 Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Fri, 18 Aug 2017 18:24:31 +1000 Subject: [PATCH 087/639] Remove refs/tags prefix from remote tags --- salt/states/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/git.py b/salt/states/git.py index f12ae29c26..d6f8daaa23 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1309,7 +1309,7 @@ def latest(name, comments ) remote_tags = set([ - x.split('/')[-1] for x in __salt__['git.ls_remote']( + x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", From 8048fbcfa7293ddcf190418c14d1f9595de90b05 Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Wed, 16 Aug 2017 14:05:31 +0100 Subject: [PATCH 088/639] Remove check_duplicates from create_network call The create_network function in dockermod.py doesn't take a check_duplicates argument so passing it here causes a failure. Fixes #42976 --- salt/states/docker_network.py | 3 +-- tests/unit/states/test_docker_network.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index a58d923aea..b90a36b1dc 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -144,8 +144,7 @@ def present(name, ret['changes']['created'] = __salt__['docker.create_network']( name, driver=driver, - driver_opts=driver_opts, - check_duplicate=True) + driver_opts=driver_opts) except Exception as exc: ret['comment'] = ('Failed to create network \'{0}\': {1}' diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index c1d885c976..84fbf3ea1a 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -64,8 +64,7 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): ) docker_create_network.assert_called_with('network_foo', driver=None, - driver_opts=None, - check_duplicate=True) + driver_opts=None) docker_connect_container_to_network.assert_called_with('abcd', 'network_foo') self.assertEqual(ret, {'name': 'network_foo', From ec2a8a82c8d2690806cc9793a9fec56b686a3aa2 Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Thu, 17 Aug 2017 07:26:08 +0100 Subject: [PATCH 089/639] Add support for IPAM config in Docker networks Fixes #43047 --- salt/modules/dockermod.py | 35 +++++++++++++++++++++++- salt/states/docker_network.py | 29 +++++++++++++++++++- tests/unit/modules/test_dockermod.py | 15 +++++++++- tests/unit/states/test_docker_network.py | 8 +++++- 4 files changed, 83 insertions(+), 4 deletions(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index 0f35d34549..69838d8cf9 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -4034,7 +4034,10 @@ def networks(names=None, ids=None): def create_network(name, driver=None, - driver_opts=None): + driver_opts=None, + gateway=None, + ip_range=None, + subnet=None): ''' Create a new network @@ -4047,16 +4050,46 @@ def create_network(name, driver_opts Options for the network driver. + gateway + IPv4 or IPv6 gateway for the master subnet + + ip_range + Allocate container IP from a sub-range within the subnet + + subnet: + Subnet in CIDR format that represents a network segment + CLI Example: .. code-block:: bash salt myminion docker.create_network web_network driver=bridge + salt myminion docker.create_network macvlan_network \ + driver=macvlan \ + driver_opts="{'parent':'eth0'}" \ + gateway=172.20.0.1 \ + subnet=172.20.0.0/24 ''' + # If any settings which need to be set via the IPAM config are specified, create the IPAM config data structure + # with these values set. + if gateway or ip_range or subnet: + ipam = { + 'Config': [{ + 'Gateway': gateway, + 'IPRange': ip_range, + 'Subnet': subnet + }], + 'Driver': 'default', + 'Options': {} + } + else: + ipam = None + response = _client_wrapper('create_network', name, driver=driver, options=driver_opts, + ipam=ipam, check_duplicate=True) _clear_context() diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index b90a36b1dc..b6c5f27a3b 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -56,6 +56,9 @@ def __virtual__(): def present(name, driver=None, driver_opts=None, + gateway=None, + ip_range=None, + subnet=None, containers=None): ''' Ensure that a network is present. @@ -69,9 +72,18 @@ def present(name, driver_opts Options for the network driver. + gateway + IPv4 or IPv6 gateway for the master subnet + + ip_range + Allocate container IP from a sub-range within the subnet + containers: List of container names that should be part of this network + subnet: + Subnet in CIDR format that represents a network segment + Usage Examples: .. code-block:: yaml @@ -91,6 +103,18 @@ def present(name, - cont1 - cont2 + + .. code-block:: yaml + + network_baz: + docker_network.present + - name: baz + - driver_opts: + - parent: eth0 + - gateway: "172.20.0.1" + - ip_range: "172.20.0.128/25" + - subnet: "172.20.0.0/24" + ''' ret = {'name': name, 'changes': {}, @@ -144,7 +168,10 @@ def present(name, ret['changes']['created'] = __salt__['docker.create_network']( name, driver=driver, - driver_opts=driver_opts) + driver_opts=driver_opts, + gateway=gateway, + ip_range=ip_range, + subnet=subnet) except Exception as exc: ret['comment'] = ('Failed to create network \'{0}\': {1}' diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 2746c98ddb..6a9fc28fd7 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -184,10 +184,23 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): with patch.object(docker_mod, '_get_client', get_client_mock): docker_mod.create_network('foo', driver='bridge', - driver_opts={}) + driver_opts={}, + gateway='192.168.0.1', + ip_range='192.168.0.128/25', + subnet='192.168.0.0/24' + ) client.create_network.assert_called_once_with('foo', driver='bridge', options={}, + ipam={ + 'Config': [{ + 'Gateway': '192.168.0.1', + 'IPRange': '192.168.0.128/25', + 'Subnet': '192.168.0.0/24' + }], + 'Driver': 'default', + 'Options': {} + }, check_duplicate=True) @skipIf(docker_version < (1, 5, 0), diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index 84fbf3ea1a..b210bf5abb 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -61,10 +61,16 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): ret = docker_state.present( 'network_foo', containers=['container'], + gateway='192.168.0.1', + ip_range='192.168.0.128/25', + subnet='192.168.0.0/24' ) docker_create_network.assert_called_with('network_foo', driver=None, - driver_opts=None) + driver_opts=None, + gateway='192.168.0.1', + ip_range='192.168.0.128/25', + subnet='192.168.0.0/24') docker_connect_container_to_network.assert_called_with('abcd', 'network_foo') self.assertEqual(ret, {'name': 'network_foo', From d59d0adfeb36a66f68c3e200707a8b1cfd93aafa Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Fri, 18 Aug 2017 12:52:23 +0100 Subject: [PATCH 090/639] Add ability to re-configure docker networks Previously any changes to network states wouldn't result in any change to the network when the state was applied. This was less of an issue the only attributes you could set for a network were its name and driver, but with the recent support for driver options and now IPAM config being added, it's useful for the state to be able to modify the network if it needs to. The Docker API doesn't provide any facility to modify an existing network, so the only option is to recreate it with the new options, which means disconnecting and reconnecting all containers. Fixes #43047 --- salt/states/docker_network.py | 196 ++++++++++++++++++++--- tests/unit/states/test_docker_network.py | 80 +++++++++ 2 files changed, 255 insertions(+), 21 deletions(-) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index b6c5f27a3b..a99421f6c6 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -124,10 +124,10 @@ def present(name, if salt.utils.is_dictlist(driver_opts): driver_opts = salt.utils.repack_dictlist(driver_opts) - if containers is None: - containers = [] - # map containers to container's Ids. - containers = [__salt__['docker.inspect_container'](c)['Id'] for c in containers] + # If any containers are specified, get details of each one, we need the Id and Name fields later + if containers is not None: + containers = [__salt__['docker.inspect_container'](c) for c in containers] + networks = __salt__['docker.networks'](names=[name]) log.trace( 'docker_network.present: current networks: {0}'.format(networks) @@ -142,24 +142,162 @@ def present(name, network = network_iter break + # We might disconnect containers in the process of recreating the network, we'll need to keep track these containers + # so we can reconnect them later. + containers_disconnected = {} + + # If the network already exists if network is not None: - if all(c in network['Containers'] for c in containers): - ret['result'] = True - ret['comment'] = 'Network \'{0}\' already exists.'.format(name) + log.debug('Network \'{0}\' already exists'.format(name)) + + # Set the comment now to say that it already exists, if we need to recreate the network with new config we'll + # update the comment later. + ret['comment'] = 'Network \'{0}\' already exists'.format(name) + + # Update network details with result from network inspect, which will contain details of any containers + # attached to the network. + network = __salt__['docker.inspect_network'](network_id=network['Id']) + + log.trace('Details of \'{0}\' network: {1}'.format(name, network)) + + # For the IPAM and driver config options which can be passed, check that if they are passed, they match the + # current configuration. + original_config = {} + new_config = {} + + if driver and driver != network['Driver']: + new_config['driver'] = driver + original_config['driver'] = network['Driver'] + + if driver_opts and driver_opts != network['Options']: + new_config['driver_opts'] = driver_opts + original_config['driver_opts'] = network['Options'] + + # Multiple IPAM configs is probably not that common so for now we'll only worry about the simple case where + # there's a single IPAM config. If there's more than one (or none at all) then we'll bail out. + if len(network['IPAM']['Config']) != 1: + ret['comment'] = ('docker_network.present does only supports Docker networks with a single IPAM config,' + 'network \'{0}\' has {1}'.format(name, len(network['IPAM']['Config']))) return ret + + ipam = network['IPAM']['Config'][0] + + if gateway and gateway != ipam['Gateway']: + new_config['gateway'] = gateway + original_config['gateway'] = ipam['Gateway'] + + if subnet and subnet != ipam['Subnet']: + new_config['subnet'] = subnet + original_config['subnet'] = ipam['Subnet'] + + if ip_range: + # IPRange isn't always configured so check it's even set before attempting to compare it. + if 'IPRange' in ipam and ip_range != ipam['IPRange']: + new_config['ip_range'] = ip_range + original_config['ip_range'] = ipam['IPRange'] + elif 'IPRange' not in ipam: + new_config['ip_range'] = ip_range + original_config['ip_range'] = '' + + if new_config != original_config: + log.debug('New config is different to current;\nnew: {0}\ncurrent: {1}'.format(new_config, original_config)) + + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Network {0} will be recreated with new config'.format(name) + return ret + + remove_result = _remove_network(name, network['Containers']) + if not remove_result['result']: + return remove_result + + # We've removed the network, so there are now no containers attached to it. + if network['Containers']: + containers_disconnected = network['Containers'] + network['Containers'] = [] + + try: + __salt__['docker.create_network']( + name, + driver=driver, + driver_opts=driver_opts, + gateway=gateway, + ip_range=ip_range, + subnet=subnet) + except Exception as exc: + ret['comment'] = ('Failed to replace network \'{0}\': {1}' + .format(name, exc)) + return ret + + ret['changes']['updated'] = {name: {'old': original_config, 'new': new_config}} + ret['comment'] = 'Network \'{0}\' was replaced with updated config'.format(name) + + # Figure out the list of containers should now now be connected. + containers_to_connect = {} + # If no containers were specified in the state but we have disconnected some in the process of recreating the + # network, we should reconnect those containers. + if containers is None and containers_disconnected: + containers_to_connect = containers_disconnected + # If containers were specified in the state, regardless of what we've disconnected, we should now just connect + # the containers specified. + elif containers: + for container in containers: + containers_to_connect[container['Id']] = container + + # At this point, if all the containers we want connected are already connected to the network, we can set our + # result and finish. + if all(c in network['Containers'] for c in containers_to_connect): + ret['result'] = True + return ret + + # If we've not exited by this point it's because we have containers which we need to connect to the network. result = True - for container in containers: - if container not in network['Containers']: + reconnected_containers = [] + connected_containers = [] + for container_id, container in containers_to_connect.iteritems(): + if container_id not in network['Containers']: try: - ret['changes']['connected'] = __salt__['docker.connect_container_to_network']( - container, name) + connect_result = __salt__['docker.connect_container_to_network'](container_id, name) + log.trace( + 'docker.connect_container_to_network({0}, {1}) result: {2}'. + format(container, name, connect_result) + ) + # If this container was one we disconnected earlier, add it to the reconnected list. + if container_id in containers_disconnected: + reconnected_containers.append(container['Name']) + # Otherwise add it to the connected list. + else: + connected_containers.append(container['Name']) + except Exception as exc: ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( - container, name, exc)) + container['Name'], name, exc)) result = False - ret['result'] = result + # If we populated any of our container lists then add them to our list of changes. + if connected_containers: + ret['changes']['connected'] = connected_containers + if reconnected_containers: + ret['changes']['reconnected'] = reconnected_containers + + # Figure out if we removed any containers as a result of replacing the network and then not re-connecting the + # containers, because they weren't specified in the state. + disconnected_containers = [] + for container_id, container in containers_disconnected.iteritems(): + if container_id not in containers_to_connect: + disconnected_containers.append(container['Name']) + + if disconnected_containers: + ret['changes']['disconnected'] = disconnected_containers + + ret['result'] = result + + # If the network does not yet exist, we create it else: + if containers is None: + containers = {} + + log.debug('The network \'{0}\' will be created'.format(name)) if __opts__['test']: ret['result'] = None ret['comment'] = ('The network \'{0}\' will be created'.format(name)) @@ -181,10 +319,10 @@ def present(name, for container in containers: try: ret['changes']['connected'] = __salt__['docker.connect_container_to_network']( - container, name) + container['Id'], name) except Exception as exc: ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( - container, name, exc)) + container['Id'], name, exc)) result = False ret['result'] = result return ret @@ -234,16 +372,32 @@ def absent(name, driver=None): ret['comment'] = ('The network \'{0}\' will be removed'.format(name)) return ret - for container in networks[0]['Containers']: + return _remove_network(network=name, containers=networks[0]['Containers']) + + +def _remove_network(network, containers=None): + ''' + Remove network, removing any specified containers from it beforehand + ''' + + ret = {'name': network, + 'changes': {}, + 'result': False, + 'comment': ''} + + if containers is None: + containers = [] + for container in containers: try: - ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, name) + ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, network) except Exception as exc: - ret['comment'] = ('Failed to disconnect container \'{0}\' to network \'{1}\' {2}'.format( - container, name, exc)) + ret['comment'] = ('Failed to disconnect container \'{0}\' from network \'{1}\' {2}'.format( + container, network, exc)) try: - ret['changes']['removed'] = __salt__['docker.remove_network'](name) + ret['changes']['removed'] = __salt__['docker.remove_network'](network) ret['result'] = True except Exception as exc: ret['comment'] = ('Failed to remove network \'{0}\': {1}' - .format(name, exc)) + .format(network, exc)) + return ret diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index b210bf5abb..959363492d 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -79,6 +79,86 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): 'created': 'created'}, 'result': True}) + def test_present_with_change(self): + ''' + Test docker_network.present when the specified network has properties differing from the already present network + ''' + network_details = { + 'Id': 'abcd', + 'Name': 'network_foo', + 'Driver': 'macvlan', + 'Containers': { + 'abcd': {} + }, + 'Options': { + 'parent': 'eth0' + }, + 'IPAM': { + 'Config': [ + { + 'Subnet': '192.168.0.0/24', + 'Gateway': '192.168.0.1' + } + ] + } + } + docker_networks = Mock(return_value=[network_details]) + network_details['Containers'] = {'abcd': {'Id': 'abcd', 'Name': 'container_bar'}} + docker_inspect_network = Mock(return_value=network_details) + docker_inspect_container = Mock(return_value={'Id': 'abcd', 'Name': 'container_bar'}) + docker_disconnect_container_from_network = Mock(return_value='disconnected') + docker_remove_network = Mock(return_value='removed') + docker_create_network = Mock(return_value='created') + docker_connect_container_to_network = Mock(return_value='connected') + + __salt__ = {'docker.networks': docker_networks, + 'docker.inspect_network': docker_inspect_network, + 'docker.inspect_container': docker_inspect_container, + 'docker.disconnect_container_from_network': docker_disconnect_container_from_network, + 'docker.remove_network': docker_remove_network, + 'docker.create_network': docker_create_network, + 'docker.connect_container_to_network': docker_connect_container_to_network, + } + with patch.dict(docker_state.__dict__, + {'__salt__': __salt__}): + ret = docker_state.present( + 'network_foo', + driver='macvlan', + gateway='192.168.1.1', + subnet='192.168.1.0/24', + driver_opts={'parent': 'eth1'}, + containers=['abcd'] + ) + + docker_disconnect_container_from_network.assert_called_with('abcd', 'network_foo') + docker_remove_network.assert_called_with('network_foo') + docker_create_network.assert_called_with('network_foo', + driver='macvlan', + driver_opts={'parent': 'eth1'}, + gateway='192.168.1.1', + ip_range=None, + subnet='192.168.1.0/24') + docker_connect_container_to_network.assert_called_with('abcd', 'network_foo') + + self.assertEqual(ret, {'name': 'network_foo', + 'comment': 'Network \'network_foo\' was replaced with updated config', + 'changes': { + 'updated': {'network_foo': { + 'old': { + 'driver_opts': {'parent': 'eth0'}, + 'gateway': '192.168.0.1', + 'subnet': '192.168.0.0/24' + }, + 'new': { + 'driver_opts': {'parent': 'eth1'}, + 'gateway': '192.168.1.1', + 'subnet': '192.168.1.0/24' + } + }}, + 'reconnected': ['container_bar'] + }, + 'result': True}) + def test_absent(self): ''' Test docker_network.absent From ab20377d27edb3f147509585312ae792f279b540 Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Fri, 18 Aug 2017 12:59:56 +0100 Subject: [PATCH 091/639] Remove superfluous 'else' I'm not sure what this was for or how it wasn't a syntax error..? But then I am new to Python. Unit tests and quick run of state.apply seems to be fine without it. --- salt/states/docker_network.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index a99421f6c6..7a0adb5a63 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -314,17 +314,16 @@ def present(name, except Exception as exc: ret['comment'] = ('Failed to create network \'{0}\': {1}' .format(name, exc)) - else: - result = True - for container in containers: - try: - ret['changes']['connected'] = __salt__['docker.connect_container_to_network']( - container['Id'], name) - except Exception as exc: - ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( - container['Id'], name, exc)) - result = False - ret['result'] = result + result = True + for container in containers: + try: + ret['changes']['connected'] = __salt__['docker.connect_container_to_network']( + container['Id'], name) + except Exception as exc: + ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( + container['Id'], name, exc)) + result = False + ret['result'] = result return ret From d4507f1a085d60751a7479909dcd6f00fdaa13aa Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Fri, 18 Aug 2017 13:20:16 +0100 Subject: [PATCH 092/639] Re-use container connection logic on net creation Re-factoring a little following the previous commit, so that the same container connection logic can be used in both places, rather than it being repeated. --- salt/states/docker_network.py | 126 +++++++++++------------ tests/unit/states/test_docker_network.py | 4 +- 2 files changed, 60 insertions(+), 70 deletions(-) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index 7a0adb5a63..9c4b65e259 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -232,71 +232,8 @@ def present(name, ret['changes']['updated'] = {name: {'old': original_config, 'new': new_config}} ret['comment'] = 'Network \'{0}\' was replaced with updated config'.format(name) - # Figure out the list of containers should now now be connected. - containers_to_connect = {} - # If no containers were specified in the state but we have disconnected some in the process of recreating the - # network, we should reconnect those containers. - if containers is None and containers_disconnected: - containers_to_connect = containers_disconnected - # If containers were specified in the state, regardless of what we've disconnected, we should now just connect - # the containers specified. - elif containers: - for container in containers: - containers_to_connect[container['Id']] = container - - # At this point, if all the containers we want connected are already connected to the network, we can set our - # result and finish. - if all(c in network['Containers'] for c in containers_to_connect): - ret['result'] = True - return ret - - # If we've not exited by this point it's because we have containers which we need to connect to the network. - result = True - reconnected_containers = [] - connected_containers = [] - for container_id, container in containers_to_connect.iteritems(): - if container_id not in network['Containers']: - try: - connect_result = __salt__['docker.connect_container_to_network'](container_id, name) - log.trace( - 'docker.connect_container_to_network({0}, {1}) result: {2}'. - format(container, name, connect_result) - ) - # If this container was one we disconnected earlier, add it to the reconnected list. - if container_id in containers_disconnected: - reconnected_containers.append(container['Name']) - # Otherwise add it to the connected list. - else: - connected_containers.append(container['Name']) - - except Exception as exc: - ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( - container['Name'], name, exc)) - result = False - - # If we populated any of our container lists then add them to our list of changes. - if connected_containers: - ret['changes']['connected'] = connected_containers - if reconnected_containers: - ret['changes']['reconnected'] = reconnected_containers - - # Figure out if we removed any containers as a result of replacing the network and then not re-connecting the - # containers, because they weren't specified in the state. - disconnected_containers = [] - for container_id, container in containers_disconnected.iteritems(): - if container_id not in containers_to_connect: - disconnected_containers.append(container['Name']) - - if disconnected_containers: - ret['changes']['disconnected'] = disconnected_containers - - ret['result'] = result - # If the network does not yet exist, we create it else: - if containers is None: - containers = {} - log.debug('The network \'{0}\' will be created'.format(name)) if __opts__['test']: ret['result'] = None @@ -314,16 +251,69 @@ def present(name, except Exception as exc: ret['comment'] = ('Failed to create network \'{0}\': {1}' .format(name, exc)) - result = True + + # Finally, figure out the list of containers which should now be connected. + containers_to_connect = {} + # If no containers were specified in the state but we have disconnected some in the process of recreating the + # network, we should reconnect those containers. + if containers is None and containers_disconnected: + containers_to_connect = containers_disconnected + # If containers were specified in the state, regardless of what we've disconnected, we should now just connect + # the containers specified. + elif containers: for container in containers: + containers_to_connect[container['Id']] = container + + if network is None: + network = {'Containers': {}} + + # At this point, if all the containers we want connected are already connected to the network, we can set our + # result and finish. + if all(c in network['Containers'] for c in containers_to_connect): + ret['result'] = True + return ret + + # If we've not exited by this point it's because we have containers which we need to connect to the network. + result = True + reconnected_containers = [] + connected_containers = [] + for container_id, container in containers_to_connect.iteritems(): + if container_id not in network['Containers']: try: - ret['changes']['connected'] = __salt__['docker.connect_container_to_network']( - container['Id'], name) + connect_result = __salt__['docker.connect_container_to_network'](container_id, name) + log.trace( + 'docker.connect_container_to_network({0}, {1}) result: {2}'. + format(container, name, connect_result) + ) + # If this container was one we disconnected earlier, add it to the reconnected list. + if container_id in containers_disconnected: + reconnected_containers.append(container['Name']) + # Otherwise add it to the connected list. + else: + connected_containers.append(container['Name']) + except Exception as exc: ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format( - container['Id'], name, exc)) + container['Name'], name, exc)) result = False - ret['result'] = result + + # If we populated any of our container lists then add them to our list of changes. + if connected_containers: + ret['changes']['connected'] = connected_containers + if reconnected_containers: + ret['changes']['reconnected'] = reconnected_containers + + # Figure out if we removed any containers as a result of replacing the network and then not re-connecting the + # containers, because they weren't specified in the state. + disconnected_containers = [] + for container_id, container in containers_disconnected.iteritems(): + if container_id not in containers_to_connect: + disconnected_containers.append(container['Name']) + + if disconnected_containers: + ret['changes']['disconnected'] = disconnected_containers + + ret['result'] = result return ret diff --git a/tests/unit/states/test_docker_network.py b/tests/unit/states/test_docker_network.py index 959363492d..178fa4d9c6 100644 --- a/tests/unit/states/test_docker_network.py +++ b/tests/unit/states/test_docker_network.py @@ -42,7 +42,7 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): ''' docker_create_network = Mock(return_value='created') docker_connect_container_to_network = Mock(return_value='connected') - docker_inspect_container = Mock(return_value={'Id': 'abcd'}) + docker_inspect_container = Mock(return_value={'Id': 'abcd', 'Name': 'container_bar'}) # Get docker.networks to return a network with a name which is a superset of the name of # the network which is to be created, despite this network existing we should still expect # that the new network will be created. @@ -75,7 +75,7 @@ class DockerNetworkTestCase(TestCase, LoaderModuleMockMixin): 'network_foo') self.assertEqual(ret, {'name': 'network_foo', 'comment': '', - 'changes': {'connected': 'connected', + 'changes': {'connected': ['container_bar'], 'created': 'created'}, 'result': True}) From caf78d206d5b58c8f2a5c4cc15af6c165cef0f94 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Fri, 18 Aug 2017 15:15:20 +0200 Subject: [PATCH 093/639] Fixed imports for pytest Imports now match the new test-suite introduced in 2017.7. --- tests/unit/modules/kubernetes_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/unit/modules/kubernetes_test.py b/tests/unit/modules/kubernetes_test.py index 5f2dcdc1dc..6efc4d790d 100644 --- a/tests/unit/modules/kubernetes_test.py +++ b/tests/unit/modules/kubernetes_test.py @@ -5,11 +5,10 @@ # Import Python Libs from __future__ import absolute_import -import os # Import Salt Testing Libs -from salttesting import TestCase, skipIf -from salttesting.mock import ( +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( Mock, patch, NO_MOCK, From 3c99e61637f91f5e4210277a81e95be6904fe92f Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Fri, 18 Aug 2017 15:19:34 +0200 Subject: [PATCH 094/639] Renamed test to match new convention --- tests/unit/modules/{kubernetes_test.py => test_kubernetes.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/unit/modules/{kubernetes_test.py => test_kubernetes.py} (100%) diff --git a/tests/unit/modules/kubernetes_test.py b/tests/unit/modules/test_kubernetes.py similarity index 100% rename from tests/unit/modules/kubernetes_test.py rename to tests/unit/modules/test_kubernetes.py From 964cebd954d341d091008fbb960f18cdd49a3fd7 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 19 Aug 2017 02:23:26 +1000 Subject: [PATCH 095/639] safe_filename_leaf(file_basename) and safe_filepath(file_path_name) fe_filename_leaf input the basename of a file, without the directory tree, and returns a safe name to use i.e. only the required characters are converted by urllib.quote If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible windows is \ / : * ? " < > | posix is / safe_filepath input the full path and filename, splits on directory separator and calls safe_filename_leaf for each part of the path. --- salt/utils/__init__.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index ec018f1ad7..04fcf017e5 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -163,6 +163,37 @@ def is_empty(filename): return False +def safe_filename_leaf(file_basename): + ''' + input the basename of a file, without the directory tree, and returns a safe name to use + i.e. only the required characters are converted by urllib.quote + If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. + For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible + windows is \ / : * ? " < > | posix is / + ''' + def _replace(re_obj): + return urllib.quote(re_obj.group(0), safe=u'') + if not isinstance(file_basename, six.text_type): + # the following string is not prefixed with u + return re.sub('[\\\/:*?"<>|]', + _replace,six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) + # the following string is prefixed with u + return re.sub(u'[\\\/:*?"<>|]', _replace, file_basename, flags=re.UNICODE) + + +def safe_filepath(file_path_name): + ''' + input the full path and filename, splits on directory separator and calls safe_filename_leaf for + each part of the path. + ''' + (drive,path) = os.path.splitdrive(file_path_name) + path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) + if drive: + return os.sep.join([drive, path]) + else: + return path + + def is_hex(value): ''' Returns True if value is a hexidecimal string, otherwise returns False From 8c864f02c7f7ef72abf1a754c07be27d69832e8c Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 19 Aug 2017 02:35:37 +1000 Subject: [PATCH 096/639] fix missing imports --- salt/utils/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 04fcf017e5..cbdf4ae138 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -16,6 +16,7 @@ import json import logging import numbers import os +import os.path import posixpath import random import re @@ -32,6 +33,7 @@ import warnings import string import subprocess import getpass +import urllib # Import 3rd-party libs from salt.ext import six From ebdca3a0f54b2897fb1e0fca2eb222c346633a51 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 18 Aug 2017 11:25:32 -0600 Subject: [PATCH 097/639] Update pkg-scripts Improves logging Removes /opt/salt/bin directory before install Removes symlink to salt-config before install --- pkg/osx/pkg-scripts/postinstall | 124 ++++++++++++++++++++++++-------- pkg/osx/pkg-scripts/preinstall | 90 ++++++++++++++++++++--- 2 files changed, 173 insertions(+), 41 deletions(-) diff --git a/pkg/osx/pkg-scripts/postinstall b/pkg/osx/pkg-scripts/postinstall index ed8ee7c142..f521666a6f 100755 --- a/pkg/osx/pkg-scripts/postinstall +++ b/pkg/osx/pkg-scripts/postinstall @@ -15,66 +15,130 @@ # This script is run as a part of the macOS Salt Installation # ############################################################################### -echo "Post install started on:" > /tmp/postinstall.txt -date >> /tmp/postinstall.txt + +############################################################################### +# Define Variables +############################################################################### +# Get Minor Version +OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]') +MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) +# Path Variables +INSTALL_DIR="/opt/salt" +BIN_DIR="$INSTALL_DIR/bin" +CONFIG_DIR="/etc/salt" +TEMP_DIR="/tmp" +SBIN_DIR="/usr/local/sbin" + +############################################################################### +# Set up logging and error handling +############################################################################### +echo "Post install script started on:" > "$TEMP_DIR/postinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt" trap 'quit_on_error $LINENO $BASH_COMMAND' ERR quit_on_error() { - echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/postinstall.txt + echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/postinstall.txt" exit -1 } ############################################################################### # Check for existing minion config, copy if it doesn't exist ############################################################################### -if [ ! -f /etc/salt/minion ]; then - echo "Config copy: Started..." >> /tmp/postinstall.txt - cp /etc/salt/minion.dist /etc/salt/minion - echo "Config copy: Successful" >> /tmp/postinstall.txt +if [ ! -f "$CONFIG_DIR/minion" ]; then + echo "Config: Copy Started..." >> "$TEMP_DIR/postinstall.txt" + cp "$CONFIG_DIR/minion.dist" "$CONFIG_DIR/minion" + echo "Config: Copied Successfully" >> "$TEMP_DIR/postinstall.txt" fi ############################################################################### # Create symlink to salt-config.sh ############################################################################### -# echo "Symlink: Creating symlink for salt-config..." >> /tmp/postinstall.txt -if [ ! -d "/usr/local/sbin" ]; then - mkdir /usr/local/sbin +if [ ! -d "$SBIN_DIR" ]; then + echo "Symlink: Creating $SBIN_DIR..." >> "$TEMP_DIR/postinstall.txt" + mkdir "$SBIN_DIR" + echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt" fi -ln -sf /opt/salt/bin/salt-config.sh /usr/local/sbin/salt-config +echo "Symlink: Creating symlink for salt-config..." >> "$TEMP_DIR/postinstall.txt" +ln -sf "$BIN_DIR/salt-config.sh" "$SBIN_DIR/salt-config" +echo "Symlink: Created Successfully" >> "$TEMP_DIR/postinstall.txt" ############################################################################### # Add salt to paths.d ############################################################################### -# echo "Path: Adding salt to the path..." >> /tmp/postinstall.txt if [ ! -d "/etc/paths.d" ]; then + echo "Path: Creating paths.d directory..." >> "$TEMP_DIR/postinstall.txt" mkdir /etc/paths.d + echo "Path: Created Successfully" >> "$TEMP_DIR/postinstall.txt" fi -sh -c 'echo "/opt/salt/bin" > /etc/paths.d/salt' -sh -c 'echo "/usr/local/sbin" >> /etc/paths.d/salt' +echo "Path: Adding salt to the path..." >> "$TEMP_DIR/postinstall.txt" +sh -c "echo \"$BIN_DIR\" > /etc/paths.d/salt" +sh -c "echo \"$SBIN_DIR\" >> /etc/paths.d/salt" +echo "Path: Added Successfully" >> "$TEMP_DIR/postinstall.txt" ############################################################################### # Register Salt as a service ############################################################################### -echo "Service start: Enabling service..." >> /tmp/postinstall.txt -launchctl enable system/com.saltstack.salt.minion -echo "Service start: Bootstrapping service..." >> /tmp/postinstall.txt -launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist +setup_services_maverick() { + echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt" + if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then + echo "Service: Stopping salt-minion..." >> "$TEMP_DIR/postinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/postinstall.txt" + fi; + echo "Service: Starting salt-minion..." >> "$TEMP_DIR/postinstall.txt" + launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt" -if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Service is running" >> /tmp/postinstall.txt -else - echo "Service start: Kickstarting service..." >> /tmp/postinstall.txt - launchctl kickstart -kp system/com.saltstack.salt.minion -fi + echo "Service: Disabling Master, Syndic, and API services..." >> "$TEMP_DIR/postinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt" -echo "Service start: Successful" >> /tmp/postinstall.txt + return 0 +} -echo "Service disable: Disabling Master, Syndic, and API" >> /tmp/postinstall.txt +setup_services_yosemite_and_later() { + echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/postinstall.txt" + echo "Service: Enabling salt-minion..." >> "$TEMP_DIR/postinstall.txt" + launchctl enable system/com.saltstack.salt.minion + echo "Service: Enabled Successfully" >> "$TEMP_DIR/postinstall.txt" -launchctl disable system/com.saltstack.salt.master -launchctl disable system/com.saltstack.salt.syndic -launchctl disable system/com.saltstack.salt.api + echo "Service: Bootstrapping salt-minion..." >> "$TEMP_DIR/postinstall.txt" + launchctl bootstrap system /Library/LaunchDaemons/com.saltstack.salt.minion.plist + echo "Service: Bootstrapped Successfully" >> "$TEMP_DIR/postinstall.txt" -echo "Post install completed successfully" >> /tmp/postinstall.txt + if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then + echo "Service: Service Running" >> "$TEMP_DIR/postinstall.txt" + else + echo "Service: Kickstarting Service..." >> "$TEMP_DIR/postinstall.txt" + launchctl kickstart -kp system/com.saltstack.salt.minion + echo "Service: Kickstarted Successfully" >> "$TEMP_DIR/postinstall.txt" + fi + + echo "Service: Started Successfully" >> "$TEMP_DIR/postinstall.txt" + + echo "Service: Disabling Master, Syndic, and API services" >> "$TEMP_DIR/postinstall.txt" + launchctl disable system/com.saltstack.salt.master + launchctl disable system/com.saltstack.salt.syndic + launchctl disable system/com.saltstack.salt.api + echo "Service: Disabled Successfully" >> "$TEMP_DIR/postinstall.txt" + + return 0 +} + +echo "Service: Configuring..." >> "$TEMP_DIR/postinstall.txt" +case $MINOR in + 9 ) + setup_services_maverick; + ;; + * ) + setup_services_yosemite_and_later; + ;; +esac +echo "Service: Configured Successfully" >> "$TEMP_DIR/postinstall.txt" + +echo "Post install completed successfully on:" >> "$TEMP_DIR/postinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/postinstall.txt" exit 0 diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index 4112e07f5f..c29d07c6b7 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -6,7 +6,8 @@ # Date: December 2015 # # Description: This script stops the salt minion service before attempting to -# install Salt on macOS +# install Salt on macOS. It also removes the /opt/salt/bin +# directory. # # Requirements: # - None @@ -15,26 +16,93 @@ # This script is run as a part of the macOS Salt Installation # ############################################################################### -echo "Preinstall started on:" > /tmp/preinstall.txt -date >> /tmp/preinstall.txt + +############################################################################### +# Define Variables +############################################################################### +# Get Minor Version +OSX_VERSION=$(sw_vers | grep ProductVersion | cut -f 2 -d: | tr -d '[:space:]') +MINOR=$(echo ${OSX_VERSION} | cut -f 2 -d.) +# Path Variables +INSTALL_DIR="/opt/salt" +BIN_DIR="$INSTALL_DIR/bin" +CONFIG_DIR="/etc/salt" +TEMP_DIR="/tmp" +SBIN_DIR="/usr/local/sbin" + +############################################################################### +# Set up logging and error handling +############################################################################### +echo "Preinstall started on:" > "$TEMP_DIR/preinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt" trap 'quit_on_error $LINENO $BASH_COMMAND' ERR quit_on_error() { - echo "$(basename $0) caught error on line : $1 command was: $2" >> /tmp/preinstall.txt + echo "$(basename $0) caught error on line : $1 command was: $2" >> "$TEMP_DIR/preinstall.txt" exit -1 } ############################################################################### # Stop the service ############################################################################### -if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Stop service: Started..." >> /tmp/preinstall.txt -# /bin/launchctl unload "/Library/LaunchDaemons/com.saltstack.salt.minion.plist" - launchctl disable system/com.saltstack.salt.minion - launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist - echo "Stop service: Successful" >> /tmp/preinstall.txt +stop_service_maverick() { + echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" + if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then + echo "Service: Unloading..." >> "$TEMP_DIR/preinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi +} + +stop_service_yosemite_and_later() { + echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" + if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then + echo "Service: Stopping..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.minion + launchctl disable system/com.saltstack.salt.master + launchctl disable system/com.saltstack.salt.syndic + launchctl disable system/com.saltstack.salt.api + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi +} + +echo "Service: Configuring..." >> "$TEMP_DIR/preinstall.txt" +case $MINOR in + 9 ) + stop_service_maverick; + ;; + * ) + stop_service_yosemite_and_later; + ;; +esac +echo "Service: Configured Successfully" >> "$TEMP_DIR/preinstall.txt" + +############################################################################### +# Remove the Symlink to salt-config.sh +############################################################################### +if [ -L "$SBIN_DIR/salt-config" ]; then + echo "Cleanup: Removing Symlink $BIN_DIR/salt-config" >> "$TEMP_DIR/preinstall.txt" + rm "$SBIN_DIR/salt-config" + echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" fi -echo "Preinstall Completed Successfully" >> /tmp/preinstall.txt +############################################################################### +# Remove the $BIN_DIR directory +############################################################################### +if [ -d "$BIN_DIR" ]; then + echo "Cleanup: Removing $BIN_DIR" >> "$TEMP_DIR/preinstall.txt" + rm -rf "$BIN_DIR" + echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" +fi + +echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt" +date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt" exit 0 From 3b62bf953cff57ac83db27eb2bb4c5e9329f4322 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 18 Aug 2017 11:44:35 -0600 Subject: [PATCH 098/639] Remove salt from the path --- pkg/osx/pkg-scripts/preinstall | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index c29d07c6b7..e4ae38baa5 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -102,6 +102,15 @@ if [ -d "$BIN_DIR" ]; then echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" fi +############################################################################### +# Remove the salt from the paths.d +############################################################################### +if [ ! -f "/etc/paths.d/salt" ]; then + echo "Path: Removing salt from the path..." >> "$TEMP_DIR/preinstall.txt" + rm "/etc/paths.d/salt" + echo "Path: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" +fi + echo "Preinstall Completed Successfully on:" >> "$TEMP_DIR/preinstall.txt" date "+%Y/%m/%d %H:%m:%S" >> "$TEMP_DIR/preinstall.txt" From f44f5b70dc5955552f60cb2234f8cd06873f0c9b Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 18 Aug 2017 11:52:34 -0600 Subject: [PATCH 099/639] Only stop services if they are running Otherwise it will cause an error and the installation will fail --- pkg/osx/pkg-scripts/preinstall | 36 ++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index e4ae38baa5..bdd9a27341 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -48,26 +48,50 @@ quit_on_error() { stop_service_maverick() { echo "Service: Using old (< 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Service: Unloading..." >> "$TEMP_DIR/preinstall.txt" + echo "Service: Unloading minion..." >> "$TEMP_DIR/preinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist - launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then + echo "Service: Unloading master..." >> "$TEMP_DIR/preinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then + echo "Service: Unloading syndic..." >> "$TEMP_DIR/preinstall.txt" launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.syndic.plist echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" fi + if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then + echo "Service: Unloading api..." >> "$TEMP_DIR/preinstall.txt" + launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.api.plist + echo "Service: Unloaded Successfully" >> "$TEMP_DIR/preinstall.txt" + fi } stop_service_yosemite_and_later() { echo "Service: Using new (>= 10.10) launchctl interface" >> "$TEMP_DIR/preinstall.txt" if /bin/launchctl list "com.saltstack.salt.minion" &> /dev/null; then - echo "Service: Stopping..." >> "$TEMP_DIR/preinstall.txt" + echo "Service: Stopping minion..." >> "$TEMP_DIR/preinstall.txt" launchctl disable system/com.saltstack.salt.minion - launchctl disable system/com.saltstack.salt.master - launchctl disable system/com.saltstack.salt.syndic - launchctl disable system/com.saltstack.salt.api launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.minion.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.master" &> /dev/null; then + echo "Service: Stopping master..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.master launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.master.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.syndic" &> /dev/null; then + echo "Service: Stopping syndic..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.syndic launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.syndic.plist + echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" + fi + if /bin/launchctl list "com.saltstack.salt.api" &> /dev/null; then + echo "Service: Stopping api..." >> "$TEMP_DIR/preinstall.txt" + launchctl disable system/com.saltstack.salt.api launchctl bootout system /Library/LaunchDaemons/com.saltstack.salt.api.plist echo "Service: Stopped Successfully" >> "$TEMP_DIR/preinstall.txt" fi From 2dd62aa1daef85be8046170f3ad4ae7feac436cd Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 18 Aug 2017 12:09:08 -0600 Subject: [PATCH 100/639] Add more information to the description --- pkg/osx/pkg-scripts/preinstall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index bdd9a27341..c28a5e9e43 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -7,7 +7,7 @@ # # Description: This script stops the salt minion service before attempting to # install Salt on macOS. It also removes the /opt/salt/bin -# directory. +# directory, symlink to salt-config, and salt from paths.d. # # Requirements: # - None From ef8a14cdf93de26e272e4e4e73a01e14a1aca2b7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 18 Aug 2017 12:19:04 -0600 Subject: [PATCH 101/639] Remove /opt/salt instead of /opt/salt/bin --- pkg/osx/pkg-scripts/preinstall | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/osx/pkg-scripts/preinstall b/pkg/osx/pkg-scripts/preinstall index c28a5e9e43..c919cafcb1 100755 --- a/pkg/osx/pkg-scripts/preinstall +++ b/pkg/osx/pkg-scripts/preinstall @@ -118,11 +118,11 @@ if [ -L "$SBIN_DIR/salt-config" ]; then fi ############################################################################### -# Remove the $BIN_DIR directory +# Remove the $INSTALL_DIR directory ############################################################################### -if [ -d "$BIN_DIR" ]; then - echo "Cleanup: Removing $BIN_DIR" >> "$TEMP_DIR/preinstall.txt" - rm -rf "$BIN_DIR" +if [ -d "$INSTALL_DIR" ]; then + echo "Cleanup: Removing $INSTALL_DIR" >> "$TEMP_DIR/preinstall.txt" + rm -rf "$INSTALL_DIR" echo "Cleanup: Removed Successfully" >> "$TEMP_DIR/preinstall.txt" fi From 0ffc57d1df97b5c6de5d6cdd5257d022e56bf6e0 Mon Sep 17 00:00:00 2001 From: Pablo Hernandez Date: Fri, 18 Aug 2017 14:29:19 -0400 Subject: [PATCH 102/639] Have docker.save use the image name when valid if not use image id, issue when loading and image is savid with id issue #43043 --- salt/modules/dockermod.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index d796e49913..ef33254f9f 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -3880,8 +3880,9 @@ def save(name, saved_path = salt.utils.files.mkstemp() else: saved_path = path - - cmd = ['docker', 'save', '-o', saved_path, inspect_image(name)['Id']] + # use the image name if its valid if not use the image id + image_to_save = name if name in inspect_image(name)['RepoTags'] else inspect_image(name)['Id'] + cmd = ['docker', 'save', '-o', saved_path, image_to_save] time_started = time.time() result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: From ca1b1bb6334da6597b97ae411d65b98844c8f661 Mon Sep 17 00:00:00 2001 From: Arount Date: Fri, 30 Jun 2017 14:54:54 +0200 Subject: [PATCH 103/639] use configparser to parse yum repo file --- salt/modules/yumpkg.py | 46 +++++++++++++----------------------------- 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index b0a183d70a..9d98322cc8 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2524,41 +2524,23 @@ def _parse_repo_file(filename): ''' Turn a single repo file into a dict ''' - repos = {} - header = '' - repo = '' - with salt.utils.fopen(filename, 'r') as rfile: - for line in rfile: - if line.startswith('['): - repo = line.strip().replace('[', '').replace(']', '') - repos[repo] = {} + parsed = configparser.ConfigParser() + parsed.read(filename) + config = {} - # Even though these are essentially uselss, I want to allow the - # user to maintain their own comments, etc - if not line: - if not repo: - header += line - if line.startswith('#'): - if not repo: - header += line - else: - if 'comments' not in repos[repo]: - repos[repo]['comments'] = [] - repos[repo]['comments'].append(line.strip()) - continue + for section in parsed._sections: + section_dict = dict(parsed._sections[section]) + section_dict.pop('__name__') + config[section] = section_dict - # These are the actual configuration lines that matter - if '=' in line: - try: - comps = line.strip().split('=') - repos[repo][comps[0].strip()] = '='.join(comps[1:]) - except KeyError: - log.error( - 'Failed to parse line in %s, offending line was ' - '\'%s\'', filename, line.rstrip() - ) + # Try to extract leading comments + headers = '' + with salt.utils.fopen(filename, 'r') as rawfile: + for line in rawfile: + if line.strip().startswith('#'): + headers += '{0}\n'.format(line.strip()) - return (header, repos) + return (headers, config) def file_list(*packages): From d7f65dc7a7447a7aa7db401430dafef6511d2783 Mon Sep 17 00:00:00 2001 From: Arount Date: Fri, 30 Jun 2017 15:28:52 +0200 Subject: [PATCH 104/639] fix configparser import & log if error was raised --- salt/modules/yumpkg.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 9d98322cc8..40cde4196e 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -35,8 +35,10 @@ try: import yum HAS_YUM = True except ImportError: - from salt.ext.six.moves import configparser HAS_YUM = False + +from salt.ext.six.moves import configparser + # pylint: enable=import-error,redefined-builtin # Import salt libs @@ -2525,9 +2527,16 @@ def _parse_repo_file(filename): Turn a single repo file into a dict ''' parsed = configparser.ConfigParser() - parsed.read(filename) config = {} + try: + parsed.read(filename) + except configparser.MissingSectionHeaderError as err: + log.error( + 'Failed to parser file {0}, error: {1}'.format(filename, err.message) + ) + return ('', {}) + for section in parsed._sections: section_dict = dict(parsed._sections[section]) section_dict.pop('__name__') From 38add0e4a25218b78ae13ce0bee659122c54f2ed Mon Sep 17 00:00:00 2001 From: Arount Date: Sat, 1 Jul 2017 00:02:16 +0200 Subject: [PATCH 105/639] break if leading comments are all fetched --- salt/modules/yumpkg.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 40cde4196e..1df90f04bf 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2548,6 +2548,8 @@ def _parse_repo_file(filename): for line in rawfile: if line.strip().startswith('#'): headers += '{0}\n'.format(line.strip()) + else: + break return (headers, config) From 3b2cb81a72b9e89e6536b5f58abaef83495f00fc Mon Sep 17 00:00:00 2001 From: Arount Date: Sat, 1 Jul 2017 01:11:18 +0200 Subject: [PATCH 106/639] fix typo in salt.modules.yumpkg --- salt/modules/yumpkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 1df90f04bf..14cdf0d899 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2533,7 +2533,7 @@ def _parse_repo_file(filename): parsed.read(filename) except configparser.MissingSectionHeaderError as err: log.error( - 'Failed to parser file {0}, error: {1}'.format(filename, err.message) + 'Failed to parse file {0}, error: {1}'.format(filename, err.message) ) return ('', {}) From 9768341da7d4e97c93815c7a183edafa29a02e41 Mon Sep 17 00:00:00 2001 From: Nathan Embery Date: Fri, 18 Aug 2017 14:43:45 -0400 Subject: [PATCH 107/639] add lightweight connection check to ping and alive in junos proxy module --- salt/proxy/junos.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index c8278fdc71..f6d8baff66 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -37,7 +37,6 @@ Run the salt proxy via the following command: ''' from __future__ import absolute_import -# Import python libs import logging # Import 3rd-party libs @@ -47,6 +46,10 @@ try: import jnpr.junos.utils import jnpr.junos.utils.config import jnpr.junos.utils.sw + from jnpr.junos.exception import RpcTimeoutError + from jnpr.junos.exception import ConnectClosedError + from jnpr.junos.exception import RpcError + from jnpr.junos.exception import ConnectError except ImportError: HAS_JUNOS = False @@ -118,10 +121,13 @@ def conn(): def alive(opts): ''' - Return the connection status with the remote device. + Validate and return the connection status with the remote device. .. versionadded:: Oxygen ''' + + thisproxy['conn'].connected = ping() + return thisproxy['conn'].connected @@ -150,6 +156,16 @@ def ping(): ''' Ping? Pong! ''' + + try: + thisproxy['conn'].rpc.file_list(path='/dev/null', dev_timeout=2) + + except RpcTimeoutError: + try: + thisproxy['conn'].close() + except (RpcError, ConnectError): + pass + return thisproxy['conn'].connected From c15048cec9e6e7c35a9f20ef69f6f7b91091b8a1 Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Fri, 18 Aug 2017 22:07:25 +0100 Subject: [PATCH 108/639] Replace dict.iteritems() with six.iteritems(dict) To fix Python 3 compatibility breakage. --- salt/states/docker_network.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index 9c4b65e259..932d497fa1 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -34,6 +34,7 @@ from __future__ import absolute_import import logging # Import salt libs +from salt.ext import six import salt.utils # Enable proper logging @@ -277,7 +278,7 @@ def present(name, result = True reconnected_containers = [] connected_containers = [] - for container_id, container in containers_to_connect.iteritems(): + for container_id, container in six.iteritems(containers_to_connect): if container_id not in network['Containers']: try: connect_result = __salt__['docker.connect_container_to_network'](container_id, name) @@ -306,7 +307,7 @@ def present(name, # Figure out if we removed any containers as a result of replacing the network and then not re-connecting the # containers, because they weren't specified in the state. disconnected_containers = [] - for container_id, container in containers_disconnected.iteritems(): + for container_id, container in six.iteritems(containers_disconnected): if container_id not in containers_to_connect: disconnected_containers.append(container['Name']) From 093c0c2f7767b8f9bdd66671a2407a9e53b858b3 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 18 Aug 2017 16:19:40 -0500 Subject: [PATCH 109/639] Fix race condition in git.latest The git.latest state runs a `git ls-remote` on the remote repo to discover which SHA it should end up at, and whether or not it needs to fetch from the repo to get the commit it needs. However, since we fast-forward using a `git merge` to the branch specified in the `rev` argument, this leaves the state susceptible to a race condition when someone pushes to the remote repo between when we run the `git ls-remote` and when we fetch the remote repo. We will successfully fast-forward to the head of the branch, but that branch will be pointing to a different commit than the one identified in the `git ls-remote`, so we will report the state as having failed. This commit fixes that race condition by fast-forwarding to the commit identified in the `git ls-remote`, rather than to the branch named by `rev`. NOTE: This means that in these edge cases, we will report a `True` result despite the head of the branch having advanced past the commit to which we fast-forwarded, but that seems like a reasonable trade-off for avoiding a race condition. If we repeated the `git ls-remote` after fetching, we'd just be opening up a window (albeit a smaller one) for another race condition. --- salt/states/git.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/states/git.py b/salt/states/git.py index caf75399de..0654d46d19 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -1472,8 +1472,6 @@ def latest(name, user=user, password=password, ignore_retcode=True): - merge_rev = remote_rev if rev == 'HEAD' \ - else desired_upstream if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not @@ -1490,7 +1488,7 @@ def latest(name, __salt__['git.merge']( target, - rev=merge_rev, + rev=remote_rev, opts=merge_opts, user=user, password=password) From 3f490f63f58c323ade5406c4e4ac266e0d3bd946 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 18 Aug 2017 19:24:30 -0500 Subject: [PATCH 110/639] fixes for pylint --- salt/modules/saltcheck.py | 4 +- tests/unit/modules/test_saltcheck.py | 78 +++++++++++++++------------- 2 files changed, 45 insertions(+), 37 deletions(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 902ffc8757..43c45cf79c 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -541,8 +541,8 @@ class StateTestLoader(object): loads in one test file ''' try: - # with salt.utils.files.fopen(filepath, 'r') as myfile: - with open(filepath, 'r') as myfile: + with salt.utils.files.fopen(filepath, 'r') as myfile: + # with open(filepath, 'r') as myfile: contents_yaml = yaml.load(myfile) for key, value in contents_yaml.items(): self.test_dict[key] = value diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 086b1467b1..9b2d3a40f4 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -1,49 +1,54 @@ # -*- coding: utf-8 -*- +'''Unit test for saltcheck execution module''' # Import python libs from __future__ import absolute_import, print_function # Import Salt Testing libs -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.unit import skipIf, TestCase -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch +# from tests.support.mixins import LoaderModuleMockMixin +# from tests.support.unit import skipIf, TestCase +from tests.support.unit import TestCase +# from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch +from tests.support.mock import MagicMock, patch # Import salt libs -from salt.exceptions import CommandExecutionError +# from salt.exceptions import CommandExecutionError import salt.modules.saltcheck as saltcheck saltcheck.__salt__ = {} + class SaltCheckTestCase(TestCase): ''' SaltCheckTestCase''' def test_update_master_cache(self): + '''test master cache''' self.assertTrue(saltcheck.update_master_cache) - def test_call_salt_command(self): + '''test simple test.echo module''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'sys.list_modules': MagicMock(return_value=['module1']), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertEqual(returned, 'hello') def test_call_salt_command2(self): + '''test simple test.echo module again''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'sys.list_modules': MagicMock(return_value=['module1']), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertNotEqual(returned, 'not-hello') - def test__assert_equal1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = {'a': 1, 'b': 2} b = {'a': 1, 'b': 2} @@ -53,7 +58,7 @@ class SaltCheckTestCase(TestCase): def test__assert_equal2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = {'a': 1, 'b': 2} b = {'a': 1, 'b': 2, 'c': 3} @@ -63,7 +68,7 @@ class SaltCheckTestCase(TestCase): def test__assert_not_equal1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = {'a': 1, 'b': 2} b = {'a': 1, 'b': 2, 'c': 3} @@ -73,7 +78,7 @@ class SaltCheckTestCase(TestCase): def test__assert_not_equal2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = {'a': 1, 'b': 2} b = {'a': 1, 'b': 2} @@ -83,7 +88,7 @@ class SaltCheckTestCase(TestCase): def test__assert_true1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() mybool = sc._SaltCheck__assert_equal(True, True) self.assertTrue(mybool) @@ -91,7 +96,7 @@ class SaltCheckTestCase(TestCase): def test__assert_true2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() mybool = sc._SaltCheck__assert_equal(False, True) self.assertNotEqual(mybool, True) @@ -99,7 +104,7 @@ class SaltCheckTestCase(TestCase): def test__assert_false1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() mybool = sc._SaltCheck__assert_false(False) self.assertTrue(mybool) @@ -107,7 +112,7 @@ class SaltCheckTestCase(TestCase): def test__assert_false2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() mybool = sc._SaltCheck__assert_false(True) self.assertNotEqual(mybool, True) @@ -115,7 +120,7 @@ class SaltCheckTestCase(TestCase): def test__assert_in1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = "bob" mylist = ['alice', 'bob', 'charles', 'dana'] @@ -125,7 +130,7 @@ class SaltCheckTestCase(TestCase): def test__assert_in2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = "elaine" mylist = ['alice', 'bob', 'charles', 'dana'] @@ -135,7 +140,7 @@ class SaltCheckTestCase(TestCase): def test__assert_not_in1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = "elaine" mylist = ['alice', 'bob', 'charles', 'dana'] @@ -145,7 +150,7 @@ class SaltCheckTestCase(TestCase): def test__assert_not_in2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = "bob" mylist = ['alice', 'bob', 'charles', 'dana'] @@ -155,7 +160,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 110 b = 100 @@ -165,7 +170,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 110 @@ -175,7 +180,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater3(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 100 @@ -185,7 +190,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater_equal_equal1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 110 b = 100 @@ -195,7 +200,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater_equal2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 110 @@ -205,7 +210,7 @@ class SaltCheckTestCase(TestCase): def test__assert_greater_equal3(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 100 @@ -215,7 +220,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 99 b = 100 @@ -225,7 +230,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 110 b = 99 @@ -235,7 +240,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less3(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 100 @@ -245,7 +250,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less_equal1(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 99 b = 100 @@ -255,7 +260,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less_equal2(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 110 b = 99 @@ -265,7 +270,7 @@ class SaltCheckTestCase(TestCase): def test__assert_less_equal3(self): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) - }): + }): sc = saltcheck.SaltCheck() a = 100 b = 100 @@ -276,7 +281,10 @@ class SaltCheckTestCase(TestCase): with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'sys.list_modules': MagicMock(return_value=['test']), 'sys.list_functions': MagicMock(return_value=['test.echo']), - 'cp.cache_master': MagicMock(return_value=[True]) - }): - returned = saltcheck.run_test(test={"module_and_function": "test.echo", "assertion": "assertEqual", "expected-return": "This works!", "args":["This works!"] }) + 'cp.cache_master': MagicMock(return_value=[True])}): + returned = saltcheck.run_test(test={"module_and_function": "test.echo", + "assertion": "assertEqual", + "expected-return": "This works!", + "args": ["This works!"] + }) self.assertEqual(returned, 'Pass') From e9cdbe72667dae9b059ebf8b25d40fc72356fe71 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Fri, 18 Aug 2017 19:45:17 -0500 Subject: [PATCH 111/639] fixing pylint issues --- tests/unit/modules/test_saltcheck.py | 214 +++++++++++++++------------ 1 file changed, 118 insertions(+), 96 deletions(-) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 9b2d3a40f4..7bc45f4318 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -7,13 +7,12 @@ from __future__ import absolute_import, print_function # Import Salt Testing libs # from tests.support.mixins import LoaderModuleMockMixin # from tests.support.unit import skipIf, TestCase -from tests.support.unit import TestCase -# from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch -from tests.support.mock import MagicMock, patch - -# Import salt libs -# from salt.exceptions import CommandExecutionError -import salt.modules.saltcheck as saltcheck +try: + from tests.support.unit import TestCase + from tests.support.mock import MagicMock, patch + import salt.modules.saltcheck as saltcheck +except ImportError as error: + raise ImportError('Unable to import modules: {}'.format(error)) saltcheck.__salt__ = {} @@ -31,8 +30,8 @@ class SaltCheckTestCase(TestCase): 'sys.list_modules': MagicMock(return_value=['module1']), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + sc_instance = saltcheck.SaltCheck() + returned = sc_instance.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertEqual(returned, 'hello') def test_call_salt_command2(self): @@ -41,243 +40,266 @@ class SaltCheckTestCase(TestCase): 'sys.list_modules': MagicMock(return_value=['module1']), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - returned = sc.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) + sc_instance = saltcheck.SaltCheck() + returned = sc_instance.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertNotEqual(returned, 'not-hello') def test__assert_equal1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = {'a': 1, 'b': 2} + bbb = {'a': 1, 'b': 2} + mybool = sc_instance._SaltCheck__assert_equal(aaa, bbb) self.assertTrue(mybool) def test__assert_equal2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_equal(False, True) + sc_instance = saltcheck.SaltCheck() + mybool = sc_instance._SaltCheck__assert_equal(False, True) self.assertNotEqual(mybool, True) def test__assert_not_equal1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2, 'c': 3} - mybool = sc._SaltCheck__assert_not_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = {'a': 1, 'b': 2} + bbb = {'a': 1, 'b': 2, 'c': 3} + mybool = sc_instance._SaltCheck__assert_not_equal(aaa, bbb) self.assertTrue(mybool) def test__assert_not_equal2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = {'a': 1, 'b': 2} - b = {'a': 1, 'b': 2} - mybool = sc._SaltCheck__assert_not_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = {'a': 1, 'b': 2} + bbb = {'a': 1, 'b': 2} + mybool = sc_instance._SaltCheck__assert_not_equal(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_true1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(True, True) + sc_instance = saltcheck.SaltCheck() + mybool = sc_instance._SaltCheck__assert_equal(True, True) self.assertTrue(mybool) def test__assert_true2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_equal(False, True) + sc_instance = saltcheck.SaltCheck() + mybool = sc_instance._SaltCheck__assert_equal(False, True) self.assertNotEqual(mybool, True) def test__assert_false1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(False) + sc_instance = saltcheck.SaltCheck() + mybool = sc_instance._SaltCheck__assert_false(False) self.assertTrue(mybool) def test__assert_false2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - mybool = sc._SaltCheck__assert_false(True) + sc_instance = saltcheck.SaltCheck() + mybool = sc_instance._SaltCheck__assert_false(True) self.assertNotEqual(mybool, True) def test__assert_in1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = "bob" + sc_instance = saltcheck.SaltCheck() + aaa = "bob" mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) + mybool = sc_instance._SaltCheck__assert_in(aaa, mylist) self.assertTrue(mybool, True) def test__assert_in2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = "elaine" + sc_instance = saltcheck.SaltCheck() + aaa = "elaine" mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_in(a, mylist) + mybool = sc_instance._SaltCheck__assert_in(aaa, mylist) self.assertNotEqual(mybool, True) def test__assert_not_in1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = "elaine" + sc_instance = saltcheck.SaltCheck() + aaa = "elaine" mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) + mybool = sc_instance._SaltCheck__assert_not_in(aaa, mylist) self.assertTrue(mybool, True) def test__assert_not_in2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = "bob" + sc_instance = saltcheck.SaltCheck() + aaa = "bob" mylist = ['alice', 'bob', 'charles', 'dana'] - mybool = sc._SaltCheck__assert_not_in(a, mylist) + mybool = sc_instance._SaltCheck__assert_not_in(aaa, mylist) self.assertNotEqual(mybool, True) def test__assert_greater1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 110 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb) self.assertTrue(mybool, True) def test__assert_greater2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 110 + mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_greater3(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb) self.assertNotEqual(mybool, True) - def test__assert_greater_equal_equal1(self): + def test__assert_greater_equal1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 110 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 110 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb) self.assertTrue(mybool, True) def test__assert_greater_equal2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 110 - mybool = sc._SaltCheck__assert_greater_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 110 + mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_greater_equal3(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_greater_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb) self.assertEqual(mybool, 'Pass') def test__assert_less1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 99 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_less(aaa, bbb) self.assertTrue(mybool, True) def test__assert_less2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 110 + bbb = 99 + mybool = sc_instance._SaltCheck__assert_less(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_less3(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_less(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_less_equal1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 99 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 99 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb) self.assertTrue(mybool, True) def test__assert_less_equal2(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 110 - b = 99 - mybool = sc._SaltCheck__assert_less_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 110 + bbb = 99 + mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb) self.assertNotEqual(mybool, True) def test__assert_less_equal3(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'cp.cache_master': MagicMock(return_value=[True]) }): - sc = saltcheck.SaltCheck() - a = 100 - b = 100 - mybool = sc._SaltCheck__assert_less_equal(a, b) + sc_instance = saltcheck.SaltCheck() + aaa = 100 + bbb = 100 + mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb) self.assertEqual(mybool, 'Pass') def test_run_test_1(self): + '''test''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), 'sys.list_modules': MagicMock(return_value=['test']), 'sys.list_functions': MagicMock(return_value=['test.echo']), From ee41171c9f6a517652028ee2cc77fefe908185c3 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 19 Aug 2017 11:39:41 +1000 Subject: [PATCH 112/639] lint fixes --- salt/utils/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index cbdf4ae138..1cceb88d52 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -177,8 +177,9 @@ def safe_filename_leaf(file_basename): return urllib.quote(re_obj.group(0), safe=u'') if not isinstance(file_basename, six.text_type): # the following string is not prefixed with u - return re.sub('[\\\/:*?"<>|]', - _replace,six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) + return re.sub('[\\\/:*?"<>|]', + _replace, + six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) # the following string is prefixed with u return re.sub(u'[\\\/:*?"<>|]', _replace, file_basename, flags=re.UNICODE) @@ -188,7 +189,7 @@ def safe_filepath(file_path_name): input the full path and filename, splits on directory separator and calls safe_filename_leaf for each part of the path. ''' - (drive,path) = os.path.splitdrive(file_path_name) + (drive, path) = os.path.splitdrive(file_path_name) path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) if drive: return os.sep.join([drive, path]) From 6e9c0957fbb90a986d17210c7fb7dc2ce52d9915 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 19 Aug 2017 15:21:48 +1000 Subject: [PATCH 113/639] fix typo --- salt/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 1cceb88d52..4a9938dc9f 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -177,11 +177,11 @@ def safe_filename_leaf(file_basename): return urllib.quote(re_obj.group(0), safe=u'') if not isinstance(file_basename, six.text_type): # the following string is not prefixed with u - return re.sub('[\\\/:*?"<>|]', + return re.sub('[\\\\:/*?"<>|]', _replace, six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) # the following string is prefixed with u - return re.sub(u'[\\\/:*?"<>|]', _replace, file_basename, flags=re.UNICODE) + return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) def safe_filepath(file_path_name): From 08ded1546e01da3a31a7e0717a8fbcf8296ea699 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sat, 19 Aug 2017 17:07:28 +1000 Subject: [PATCH 114/639] more lint --- salt/utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 4a9938dc9f..c03e0a6d39 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -171,7 +171,7 @@ def safe_filename_leaf(file_basename): i.e. only the required characters are converted by urllib.quote If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible - windows is \ / : * ? " < > | posix is / + windows is \\ / : * ? " < > | posix is / ''' def _replace(re_obj): return urllib.quote(re_obj.group(0), safe=u'') From d675223f3be84101d65caf6a7d11cb6bec1672d7 Mon Sep 17 00:00:00 2001 From: darcoli Date: Sun, 20 Aug 2017 00:00:16 +0200 Subject: [PATCH 115/639] Consider saltenv from kwargs with pillarenv_from_saltenv --- salt/modules/state.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/state.py b/salt/modules/state.py index fa5b997ef7..2ddee4f06c 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -269,12 +269,12 @@ def _get_opts(**kwargs): else: opts['environment'] = kwargs['saltenv'] - if 'pillarenv' in kwargs: - pillarenv = kwargs['pillarenv'] + if 'pillarenv' in kwargs or opts.get('pillarenv_from_saltenv', False): + pillarenv = kwargs.get('pillarenv') or kwargs.get('saltenv') if pillarenv is not None and not isinstance(pillarenv, six.string_types): - opts['pillarenv'] = str(kwargs['pillarenv']) + opts['pillarenv'] = str(pillarenv) else: - opts['pillarenv'] = kwargs['pillarenv'] + opts['pillarenv'] = pillarenv return opts From 13404a47b551f1cc1b16f662d5d50b5843ffcbdb Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Sun, 20 Aug 2017 18:03:16 +0300 Subject: [PATCH 116/639] Fix bug #42936 - win_iis module Fix set_container_setting by adding map from string to numeric and vice versa. --- salt/modules/win_iis.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index 9309873ac4..bc8abbbbe7 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -1255,6 +1255,9 @@ def set_container_setting(name, container, settings): salt '*' win_iis.set_container_setting name='MyTestPool' container='AppPools' settings="{'managedPipeLineMode': 'Integrated'}" ''' + + identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'} + identityType_map2numeric = {'LocalSystem': '0', 'LocalService': '1', 'NetworkService': '2', 'SpecificUser': '3', 'ApplicationPoolIdentity': '4'} ps_cmd = list() container_path = r"IIS:\{0}\{1}".format(container, name) @@ -1281,6 +1284,10 @@ def set_container_setting(name, container, settings): except ValueError: value = "'{0}'".format(settings[setting]) + # Map to numeric to support server 2008 + if (setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys()): + value = identityType_map2numeric[settings[setting]] + ps_cmd.extend(['Set-ItemProperty', '-Path', "'{0}'".format(container_path), '-Name', "'{0}'".format(setting), @@ -1300,6 +1307,10 @@ def set_container_setting(name, container, settings): failed_settings = dict() for setting in settings: + # map identity type from numeric to string for comparing + if (setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys()): + settings[setting] = identityType_map2string[settings[setting]] + if str(settings[setting]) != str(new_settings[setting]): failed_settings[setting] = settings[setting] From dc793f9a05596c526eeb22fa7b18eb10383a4410 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Sun, 20 Aug 2017 18:06:19 +0300 Subject: [PATCH 117/639] Fix bug #42936 - win_iis state Fix container_setting by adding map from string to numeric to support server 2008 --- salt/states/win_iis.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index 69d35e5c4a..b9940b5dd7 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -481,7 +481,7 @@ def container_setting(name, container, settings=None): :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. - + Example of usage for the ``AppPools`` container: .. code-block:: yaml @@ -496,7 +496,8 @@ def container_setting(name, container, settings=None): processModel.userName: TestUser processModel.password: TestPassword processModel.identityType: SpecificUser - + + Example of usage for the ``Sites`` container: .. code-block:: yaml @@ -510,6 +511,9 @@ def container_setting(name, container, settings=None): logFile.period: Daily limits.maxUrlSegments: 32 ''' + + identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'} + ret = {'name': name, 'changes': {}, 'comment': str(), @@ -529,6 +533,10 @@ def container_setting(name, container, settings=None): container=container, settings=settings.keys()) for setting in settings: + # map identity type from numeric to string for comparing + if (setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys()): + settings[setting] = identityType_map2string[settings[setting]] + if str(settings[setting]) != str(current_settings[setting]): ret_settings['changes'][setting] = {'old': current_settings[setting], 'new': settings[setting]} @@ -541,8 +549,8 @@ def container_setting(name, container, settings=None): ret['changes'] = ret_settings return ret - __salt__['win_iis.set_container_setting'](name=name, container=container, - settings=settings) + __salt__['win_iis.set_container_setting'](name=name, container=container, settings=settings) + new_settings = __salt__['win_iis.get_container_setting'](name=name, container=container, settings=settings.keys()) From 2c40a2383aff27bc6add437386c486d3ee2caddb Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 20 Aug 2017 08:23:21 -0700 Subject: [PATCH 118/639] boto_elbv2 add create, delete target group states --- salt/modules/boto_elbv2.py | 20 ++++-- salt/states/boto_elbv2.py | 142 ++++++++++++++++++++++++++++++++++++- 2 files changed, 156 insertions(+), 6 deletions(-) diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py index 11121b1960..4487ad1e10 100644 --- a/salt/modules/boto_elbv2.py +++ b/salt/modules/boto_elbv2.py @@ -154,7 +154,7 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): Delete target group. name - (string) - The Amazon Resource Name (ARN) of the resource. + (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. @@ -166,10 +166,22 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + tg = target_group_exists(name, region, key, keyid, profile) + + if not tg: + return True try: - conn.delete_target_group(TargetGroupArn=name) - log.info('Deleted target group {0}'.format(name)) + if name.startswith('arn:aws:elasticloadbalancing'): + conn.delete_target_group(TargetGroupArn=name) + log.info('Deleted target group {0}'.format(name)) + else: + tg_info = conn.describe_target_groups(Names=[name]) + if len(tg_info['TargetGroups']) != 1: + return False + arn = tg_info['TargetGroups'][0]['TargetGroupArn'] + conn.delete_target_group(TargetGroupArn=arn) + log.info('Deleted target group {0} ARN {1}'.format(name, arn)) return True except ClientError as error: log.debug(error) @@ -200,7 +212,7 @@ def target_group_exists(name, region=None, key=None, keyid=None, profile=None): log.warning('The target group does not exist in region {0}'.format(region)) return False except ClientError as error: - log.warning(error) + log.warning('target_group_exists check for {0} returned: {1}'.format(name,error)) return False diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 8f08b7431b..51b2c80d2d 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -52,10 +52,148 @@ def __virtual__(): ''' if 'boto_elbv2.target_group_exists' in __salt__: return 'boto_elbv2' - else - return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + +def create_target_group(name, protocol, port, vpc_id, + region=None, key=None, keyid=None, profile=None, + health_check_protocol='HTTP', health_check_port='traffic-port', + health_check_path='/', health_check_interval_seconds=30, + health_check_timeout_seconds=5, healthy_threshold_count=5, + unhealthy_threshold_count=2, **kwargs): + + ''' + .. versionadded:: 2017.11.0 + + Create target group if not present. + + name + (string) - The name of the target group. + protocol + (string) - The protocol to use for routing traffic to the targets + port + (int) - The port on which the targets receive traffic. This port is used unless + you specify a port override when registering the traffic. + vpc_id + (string) - The identifier of the virtual private cloud (VPC). + health_check_protocol + (string) - The protocol the load balancer uses when performing health check on + targets. The default is the HTTP protocol. + health_check_port + (string) - The port the load balancer uses when performing health checks on + targets. The default is 'traffic-port', which indicates the port on which each + target receives traffic from the load balancer. + health_check_path + (string) - The ping path that is the destination on the targets for health + checks. The default is /. + health_check_interval_seconds + (integer) - The approximate amount of time, in seconds, between health checks + of an individual target. The default is 30 seconds. + health_check_timeout_seconds + (integer) - The amount of time, in seconds, during which no response from a + target means a failed health check. The default is 5 seconds. + healthy_threshold_count + (integer) - The number of consecutive health checks successes required before + considering an unhealthy target healthy. The default is 5. + unhealthy_threshold_count + (integer) - The number of consecutive health check failures required before + considering a target unhealthy. The default is 2. + + returns + (bool) - True on success, False on failure. + + CLI example: + .. code-block:: yaml + + create-target: + boto_elb2.create_targets_group: + - name: myALB + - protocol: https + - port: 443 + - vpc_id: myVPC + ''' + ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} + + tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) + + if tg: + ret['result'] = True + ret['comment'] = 'Target Group {0} already exists'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Target Group {0} will be created'.format(name) + return ret + + state = __salt__['boto_elbv2.create_target_group'](name, + protocol, + port, + vpc_id, + region, + key, + keyid, + profile) + + if state: + changes = True + ret['changes']['target_group'] = name + ret['result'] = True + ret['comment'] = 'Target Group {0} created'.format(name) + else: + ret['result'] = False + ret['comment'] = 'Target Group {0} creation failed'.format(name) + failure = True + return ret +def delete_target_group(name, region=None, key=None, keyid=None, profile=None): + ''' + Delete target group. + + name + (string) - The Amazon Resource Name (ARN) of the resource. + + returns + (bool) - True on success, False on failure. + + CLI example: + + .. code-block:: bash + + check-target: + boto_elb2.delete_targets_group: + - name: myALB + - protocol: https + - port: 443 + - vpc_id: myVPC + ''' + ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} + + tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) + + if not tg: + ret['result'] = True + ret['comment'] = 'Target Group {0} does not exists'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Target Group {0} will be deleted'.format(name) + return ret + + state = __salt__['boto_elbv2.delete_target_group'](name, + region, + key, + keyid, + profile) + + if state: + ret['result'] = True + ret['changes']['target_group'] = name + ret['comment'] = 'Target Group {0} deleted'.format(name) + else: + ret['result'] = False + ret['comment'] = 'Target Group {0} deletion failed'.format(name) + failure = True + return ret def targets_registered(name, targets, region=None, key=None, keyid=None, profile=None, **kwargs): From 94179c44f5a341b383b11bd0699c1c4478be7e2a Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 13 Aug 2017 16:24:39 -0700 Subject: [PATCH 119/639] boto_elbv2 state add error message to virtual func --- salt/states/boto_elbv2.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 4bf0e12bac..8f08b7431b 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -50,7 +50,11 @@ def __virtual__(): ''' Only load if boto is available. ''' - return 'boto_elbv2' if 'boto_elbv2.target_group_exists' in __salt__ else False + if 'boto_elbv2.target_group_exists' in __salt__: + return 'boto_elbv2' + else + return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + def targets_registered(name, targets, region=None, key=None, keyid=None, From f3a9b833ecd4086ceb43bb215fda0a37aa2308ee Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 20 Aug 2017 08:23:21 -0700 Subject: [PATCH 120/639] boto_elbv2 add create, delete target group states --- salt/modules/boto_elbv2.py | 20 ++++-- salt/states/boto_elbv2.py | 142 ++++++++++++++++++++++++++++++++++++- 2 files changed, 156 insertions(+), 6 deletions(-) diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py index 11121b1960..4487ad1e10 100644 --- a/salt/modules/boto_elbv2.py +++ b/salt/modules/boto_elbv2.py @@ -154,7 +154,7 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): Delete target group. name - (string) - The Amazon Resource Name (ARN) of the resource. + (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. @@ -166,10 +166,22 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + tg = target_group_exists(name, region, key, keyid, profile) + + if not tg: + return True try: - conn.delete_target_group(TargetGroupArn=name) - log.info('Deleted target group {0}'.format(name)) + if name.startswith('arn:aws:elasticloadbalancing'): + conn.delete_target_group(TargetGroupArn=name) + log.info('Deleted target group {0}'.format(name)) + else: + tg_info = conn.describe_target_groups(Names=[name]) + if len(tg_info['TargetGroups']) != 1: + return False + arn = tg_info['TargetGroups'][0]['TargetGroupArn'] + conn.delete_target_group(TargetGroupArn=arn) + log.info('Deleted target group {0} ARN {1}'.format(name, arn)) return True except ClientError as error: log.debug(error) @@ -200,7 +212,7 @@ def target_group_exists(name, region=None, key=None, keyid=None, profile=None): log.warning('The target group does not exist in region {0}'.format(region)) return False except ClientError as error: - log.warning(error) + log.warning('target_group_exists check for {0} returned: {1}'.format(name,error)) return False diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 8f08b7431b..51b2c80d2d 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -52,10 +52,148 @@ def __virtual__(): ''' if 'boto_elbv2.target_group_exists' in __salt__: return 'boto_elbv2' - else - return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + +def create_target_group(name, protocol, port, vpc_id, + region=None, key=None, keyid=None, profile=None, + health_check_protocol='HTTP', health_check_port='traffic-port', + health_check_path='/', health_check_interval_seconds=30, + health_check_timeout_seconds=5, healthy_threshold_count=5, + unhealthy_threshold_count=2, **kwargs): + + ''' + .. versionadded:: 2017.11.0 + + Create target group if not present. + + name + (string) - The name of the target group. + protocol + (string) - The protocol to use for routing traffic to the targets + port + (int) - The port on which the targets receive traffic. This port is used unless + you specify a port override when registering the traffic. + vpc_id + (string) - The identifier of the virtual private cloud (VPC). + health_check_protocol + (string) - The protocol the load balancer uses when performing health check on + targets. The default is the HTTP protocol. + health_check_port + (string) - The port the load balancer uses when performing health checks on + targets. The default is 'traffic-port', which indicates the port on which each + target receives traffic from the load balancer. + health_check_path + (string) - The ping path that is the destination on the targets for health + checks. The default is /. + health_check_interval_seconds + (integer) - The approximate amount of time, in seconds, between health checks + of an individual target. The default is 30 seconds. + health_check_timeout_seconds + (integer) - The amount of time, in seconds, during which no response from a + target means a failed health check. The default is 5 seconds. + healthy_threshold_count + (integer) - The number of consecutive health checks successes required before + considering an unhealthy target healthy. The default is 5. + unhealthy_threshold_count + (integer) - The number of consecutive health check failures required before + considering a target unhealthy. The default is 2. + + returns + (bool) - True on success, False on failure. + + CLI example: + .. code-block:: yaml + + create-target: + boto_elb2.create_targets_group: + - name: myALB + - protocol: https + - port: 443 + - vpc_id: myVPC + ''' + ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} + + tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) + + if tg: + ret['result'] = True + ret['comment'] = 'Target Group {0} already exists'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Target Group {0} will be created'.format(name) + return ret + + state = __salt__['boto_elbv2.create_target_group'](name, + protocol, + port, + vpc_id, + region, + key, + keyid, + profile) + + if state: + changes = True + ret['changes']['target_group'] = name + ret['result'] = True + ret['comment'] = 'Target Group {0} created'.format(name) + else: + ret['result'] = False + ret['comment'] = 'Target Group {0} creation failed'.format(name) + failure = True + return ret +def delete_target_group(name, region=None, key=None, keyid=None, profile=None): + ''' + Delete target group. + + name + (string) - The Amazon Resource Name (ARN) of the resource. + + returns + (bool) - True on success, False on failure. + + CLI example: + + .. code-block:: bash + + check-target: + boto_elb2.delete_targets_group: + - name: myALB + - protocol: https + - port: 443 + - vpc_id: myVPC + ''' + ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} + + tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) + + if not tg: + ret['result'] = True + ret['comment'] = 'Target Group {0} does not exists'.format(name) + return ret + + if __opts__['test']: + ret['comment'] = 'Target Group {0} will be deleted'.format(name) + return ret + + state = __salt__['boto_elbv2.delete_target_group'](name, + region, + key, + keyid, + profile) + + if state: + ret['result'] = True + ret['changes']['target_group'] = name + ret['comment'] = 'Target Group {0} deleted'.format(name) + else: + ret['result'] = False + ret['comment'] = 'Target Group {0} deletion failed'.format(name) + failure = True + return ret def targets_registered(name, targets, region=None, key=None, keyid=None, profile=None, **kwargs): From 3a27e8ab3d1127582e0db5a7aeabd5ba1370bd5f Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 20 Aug 2017 09:19:56 -0700 Subject: [PATCH 121/639] boto_elbv2 module Linter fixes Style fixes based on results of linter --- salt/modules/boto_elbv2.py | 109 +++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 41 deletions(-) diff --git a/salt/modules/boto_elbv2.py b/salt/modules/boto_elbv2.py index 4487ad1e10..7fd1833a8b 100644 --- a/salt/modules/boto_elbv2.py +++ b/salt/modules/boto_elbv2.py @@ -72,11 +72,20 @@ def __virtual__(): return True -def create_target_group(name, protocol, port, vpc_id, - region=None, key=None, keyid=None, profile=None, - health_check_protocol='HTTP', health_check_port='traffic-port', - health_check_path='/', health_check_interval_seconds=30, - health_check_timeout_seconds=5, healthy_threshold_count=5, +def create_target_group(name, + protocol, + port, + vpc_id, + region=None, + key=None, + keyid=None, + profile=None, + health_check_protocol='HTTP', + health_check_port='traffic-port', + health_check_path='/', + health_check_interval_seconds=30, + health_check_timeout_seconds=5, + healthy_threshold_count=5, unhealthy_threshold_count=2): ''' Create target group if not present. @@ -125,31 +134,35 @@ def create_target_group(name, protocol, port, vpc_id, conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if target_group_exists(name, region, key, keyid, profile): return True - else: - try: - lb = conn.create_target_group(Name=name, Protocol=protocol, Port=port, - VpcId=vpc_id, HealthCheckProtocol=health_check_protocol, - HealthCheckPort=health_check_port, - HealthCheckPath=health_check_path, - HealthCheckIntervalSeconds=health_check_interval_seconds, - HealthCheckTimeoutSeconds=health_check_timeout_seconds, - HealthyThresholdCount=healthy_threshold_count, - UnhealthyThresholdCount=unhealthy_threshold_count) - if lb: - log.info('Created ALB {0}: {1}'.format(name, - lb['TargetGroups'][0]['TargetGroupArn'])) - return True - else: - log.error('Failed to create ALB {0}'.format(name)) - return False - except ClientError as error: - log.debug(error) - log.error('Failed to create ALB {0}: {1}: {2}'.format(name, - error.response['Error']['Code'], - error.response['Error']['Message'])) + + try: + alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port, + VpcId=vpc_id, HealthCheckProtocol=health_check_protocol, + HealthCheckPort=health_check_port, + HealthCheckPath=health_check_path, + HealthCheckIntervalSeconds=health_check_interval_seconds, + HealthCheckTimeoutSeconds=health_check_timeout_seconds, + HealthyThresholdCount=healthy_threshold_count, + UnhealthyThresholdCount=unhealthy_threshold_count) + if alb: + log.info('Created ALB {0}: {1}'.format(name, + alb['TargetGroups'][0]['TargetGroupArn'])) + return True + else: + log.error('Failed to create ALB {0}'.format(name)) + return False + except ClientError as error: + log.debug(error) + log.error('Failed to create ALB {0}: {1}: {2}'.format(name, + error.response['Error']['Code'], + error.response['Error']['Message'])) -def delete_target_group(name, region=None, key=None, keyid=None, profile=None): +def delete_target_group(name, + region=None, + key=None, + keyid=None, + profile=None): ''' Delete target group. @@ -166,9 +179,8 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - tg = target_group_exists(name, region, key, keyid, profile) - if not tg: + if not target_group_exists(name, region, key, keyid, profile): return True try: @@ -189,7 +201,11 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): return False -def target_group_exists(name, region=None, key=None, keyid=None, profile=None): +def target_group_exists(name, + region=None, + key=None, + keyid=None, + profile=None): ''' Check to see if an target group exists. @@ -212,11 +228,16 @@ def target_group_exists(name, region=None, key=None, keyid=None, profile=None): log.warning('The target group does not exist in region {0}'.format(region)) return False except ClientError as error: - log.warning('target_group_exists check for {0} returned: {1}'.format(name,error)) + log.warning('target_group_exists check for {0} returned: {1}'.format(name, error)) return False -def describe_target_health(name, targets=None, region=None, key=None, keyid=None, profile=None): +def describe_target_health(name, + targets=None, + region=None, + key=None, + keyid=None, + profile=None): ''' Get the curret health check status for targets in a target group. @@ -246,8 +267,12 @@ def describe_target_health(name, targets=None, region=None, key=None, keyid=None return {} -def register_targets(name, targets, region=None, key=None, keyid=None, - profile=None): +def register_targets(name, + targets, + region=None, + key=None, + keyid=None, + profile=None): ''' Register targets to a target froup of an ALB. ``targets`` is either a instance id string or a list of instance id's. @@ -276,15 +301,18 @@ def register_targets(name, targets, region=None, key=None, keyid=None, registered_targets = conn.register_targets(TargetGroupArn=name, Targets=targetsdict) if registered_targets: return True - else: - return False + return False except ClientError as error: log.warning(error) return False -def deregister_targets(name, targets, region=None, key=None, keyid=None, - profile=None): +def deregister_targets(name, + targets, + region=None, + key=None, + keyid=None, + profile=None): ''' Deregister targets to a target froup of an ALB. ``targets`` is either a instance id string or a list of instance id's. @@ -313,8 +341,7 @@ def deregister_targets(name, targets, region=None, key=None, keyid=None, registered_targets = conn.deregister_targets(TargetGroupArn=name, Targets=targetsdict) if registered_targets: return True - else: - return False + return False except ClientError as error: log.warning(error) return False From 4ebfaa407093d900dfc23b19209c7e8b79a7b494 Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 20 Aug 2017 10:21:22 -0700 Subject: [PATCH 122/639] boto_elbv2 state Linter fixes Some of these changes also make the states more consistent. --- salt/states/boto_elbv2.py | 85 +++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 51b2c80d2d..e5ef2b1397 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -113,9 +113,7 @@ def create_target_group(name, protocol, port, vpc_id, ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} - tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) - - if tg: + if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile): ret['result'] = True ret['comment'] = 'Target Group {0} already exists'.format(name) return ret @@ -128,20 +126,26 @@ def create_target_group(name, protocol, port, vpc_id, protocol, port, vpc_id, - region, - key, - keyid, - profile) + region=region, + key=key, + keyid=keyid, + profile=profile, + health_check_protocol=health_check_protocol, + health_check_port=health_check_port, + health_check_path=health_check_path, + health_check_interval_seconds=health_check_interval_seconds, + health_check_timeout_seconds=health_check_timeout_seconds, + healthy_threshold_count=healthy_threshold_count, + unhealthy_threshold_count=unhealthy_threshold_count, + **kwargs) if state: - changes = True ret['changes']['target_group'] = name ret['result'] = True ret['comment'] = 'Target Group {0} created'.format(name) else: ret['result'] = False ret['comment'] = 'Target Group {0} creation failed'.format(name) - failure = True return ret @@ -168,9 +172,7 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} - tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) - - if not tg: + if not __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile): ret['result'] = True ret['comment'] = 'Target Group {0} does not exists'.format(name) return ret @@ -180,10 +182,10 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): return ret state = __salt__['boto_elbv2.delete_target_group'](name, - region, - key, - keyid, - profile) + region=region, + key=key, + keyid=keyid, + profile=profile) if state: ret['result'] = True @@ -192,7 +194,6 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): else: ret['result'] = False ret['comment'] = 'Target Group {0} deletion failed'.format(name) - failure = True return ret def targets_registered(name, targets, region=None, key=None, keyid=None, @@ -219,10 +220,13 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, - instance-id2 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} - tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) - if tg: - health = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile) + if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile): + health = __salt__['boto_elbv2.describe_target_health'](name, + region=region, + key=key, + keyid=keyid, + profile=profile) failure = False changes = False newhealth_mock = copy.copy(health) @@ -241,10 +245,10 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, else: state = __salt__['boto_elbv2.register_targets'](name, targets, - region, - key, - keyid, - profile) + region=region, + key=key, + keyid=keyid, + profile=profile) if state: changes = True ret['result'] = True @@ -261,7 +265,11 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, ret['changes']['new'] = newhealth_mock else: ret['comment'] = 'Target Group {0} has been changed'.format(name) - newhealth = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile) + newhealth = __salt__['boto_elbv2.describe_target_health'](name, + region=region, + key=key, + keyid=keyid, + profile=profile) ret['changes']['new'] = newhealth return ret else: @@ -270,7 +278,7 @@ def targets_registered(name, targets, region=None, key=None, keyid=None, def targets_deregistered(name, targets, region=None, key=None, keyid=None, - profile=None, **kwargs): + profile=None, **kwargs): ''' Remove targets to an Application Load Balancer target group. @@ -292,9 +300,12 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None, - instance-id2 ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} - tg = __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile) - if tg: - health = __salt__['boto_elbv2.describe_target_health'](name, region=region, key=key, keyid=keyid, profile=profile) + if __salt__['boto_elbv2.target_group_exists'](name, region, key, keyid, profile): + health = __salt__['boto_elbv2.describe_target_health'](name, + region=region, + key=key, + keyid=keyid, + profile=profile) failure = False changes = False newhealth_mock = copy.copy(health) @@ -310,11 +321,11 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None, newhealth_mock.update({target: "draining"}) else: state = __salt__['boto_elbv2.deregister_targets'](name, - targets, - region, - key, - keyid, - profile) + targets, + region=region, + key=key, + keyid=keyid, + profile=profile) if state: changes = True ret['result'] = True @@ -331,7 +342,11 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None, ret['changes']['new'] = newhealth_mock else: ret['comment'] = 'Target Group {0} has been changed'.format(name) - newhealth = __salt__['boto_elbv2.describe_target_health'](name, region, key, keyid, profile) + newhealth = __salt__['boto_elbv2.describe_target_health'](name, + region, + key, + keyid, + profile) ret['changes']['new'] = newhealth return ret else: From adc98e08e96670bb95571dd4f870506a120673ff Mon Sep 17 00:00:00 2001 From: Todd Wells Date: Sun, 20 Aug 2017 10:51:29 -0700 Subject: [PATCH 123/639] One more style/linter change --- salt/states/boto_elbv2.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index e5ef2b1397..0fa22f99a5 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -343,10 +343,10 @@ def targets_deregistered(name, targets, region=None, key=None, keyid=None, else: ret['comment'] = 'Target Group {0} has been changed'.format(name) newhealth = __salt__['boto_elbv2.describe_target_health'](name, - region, - key, - keyid, - profile) + region=region, + key=key, + keyid=keyid, + profile=profile) ret['changes']['new'] = newhealth return ret else: From d4b113acdf8378d71106d0f669735a56062a70b0 Mon Sep 17 00:00:00 2001 From: Tobias Macey Date: Mon, 21 Aug 2017 13:29:00 -0400 Subject: [PATCH 124/639] Fixed issue with silently passing all tests in Testinfra module The Testinfra module had a line where the collection of passed arguments was silently overwritten so that it would not actually perform any assertions. Updated the variable names to address the issue of the parameters being clobbered so that asertions are performed properly. --- salt/modules/testinframod.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/salt/modules/testinframod.py b/salt/modules/testinframod.py index d705a3a2c9..265f7ad9b0 100644 --- a/salt/modules/testinframod.py +++ b/salt/modules/testinframod.py @@ -242,6 +242,8 @@ def _copy_function(module_name, name=None): elif hasattr(mod, '__call__'): mod_sig = inspect.getargspec(mod.__call__) parameters = mod_sig.args + log.debug('Parameters accepted by module {0}: {1}'.format(module_name, + parameters)) additional_args = {} for arg in set(parameters).intersection(set(methods.keys())): additional_args[arg] = methods.pop(arg) @@ -251,12 +253,15 @@ def _copy_function(module_name, name=None): else: modinstance = mod() except TypeError: - modinstance = None - methods = {} + log.exception('Module failed to instantiate') + raise + valid_methods = {} + log.debug('Called methods are: {0}'.format(methods)) for meth_name in methods: if not meth_name.startswith('_'): - methods[meth_name] = methods[meth_name] - for meth, arg in methods.items(): + valid_methods[meth_name] = methods[meth_name] + log.debug('Valid methods are: {0}'.format(valid_methods)) + for meth, arg in valid_methods.items(): result = _get_method_result(mod, modinstance, meth, arg) assertion_result = _apply_assertion(arg, result) if not assertion_result: From 93390de88b71471731e9e8237f4c62e30d98f4a2 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 16 Aug 2017 15:34:37 -0600 Subject: [PATCH 125/639] Fix malformed requisite for Windows --- salt/state.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/state.py b/salt/state.py index 8d1ce6dae0..b1bbf4d74b 100644 --- a/salt/state.py +++ b/salt/state.py @@ -2122,11 +2122,14 @@ class State(object): reqs[r_state].append(chunk) continue try: - if (fnmatch.fnmatch(chunk['name'], req_val) or - fnmatch.fnmatch(chunk['__id__'], req_val)): - if req_key == 'id' or chunk['state'] == req_key: - found = True - reqs[r_state].append(chunk) + if isinstance(req_val, six.string_types): + if (fnmatch.fnmatch(chunk['name'], req_val) or + fnmatch.fnmatch(chunk['__id__'], req_val)): + if req_key == 'id' or chunk['state'] == req_key: + found = True + reqs[r_state].append(chunk) + else: + raise KeyError except KeyError as exc: raise SaltRenderError( 'Could not locate requisite of [{0}] present in state with name [{1}]'.format( From 4f4e34c79f25a1153d4620c2af74e2be22e19ff7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 21 Aug 2017 18:39:09 -0600 Subject: [PATCH 126/639] Fix group state for Windows group no longer fails when domain is not specified group.present now accepts group names without domains local groups are assumed if domain is not specified documentation improved moved fix_local_user function to salt.utils.win_functions now called get_sam_name --- salt/modules/win_groupadd.py | 125 ++++++++++++++++++++++++++--------- salt/states/group.py | 99 +++++++++++++++++++++------ salt/utils/win_functions.py | 19 ++++++ 3 files changed, 190 insertions(+), 53 deletions(-) diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py index d466380d70..4368bb0201 100644 --- a/salt/modules/win_groupadd.py +++ b/salt/modules/win_groupadd.py @@ -12,6 +12,7 @@ from __future__ import absolute_import # Import salt libs import salt.utils +import salt.utils.win_functions try: @@ -35,10 +36,18 @@ def __virtual__(): return (False, "Module win_groupadd: module only works on Windows systems") -def add(name, gid=None, system=False): +def add(name, **kwargs): ''' Add the specified group + Args: + + name (str): + The name of the group to add + + Returns: + dict: A dictionary of results + CLI Example: .. code-block:: bash @@ -57,21 +66,16 @@ def add(name, gid=None, system=False): compObj = nt.GetObject('', 'WinNT://.,computer') newGroup = compObj.Create('group', name) newGroup.SetInfo() - ret['changes'].append(( - 'Successfully created group {0}' - ).format(name)) + ret['changes'].append('Successfully created group {0}'.format(name)) except pywintypes.com_error as com_err: ret['result'] = False if len(com_err.excepinfo) >= 2: friendly_error = com_err.excepinfo[2].rstrip('\r\n') - ret['comment'] = ( - 'Failed to create group {0}. {1}' - ).format(name, friendly_error) + ret['comment'] = 'Failed to create group {0}. {1}' \ + ''.format(name, friendly_error) else: ret['result'] = None - ret['comment'] = ( - 'The group {0} already exists.' - ).format(name) + ret['comment'] = 'The group {0} already exists.'.format(name) return ret @@ -80,6 +84,14 @@ def delete(name): ''' Remove the named group + Args: + + name (str): + The name of the group to remove + + Returns: + dict: A dictionary of results + CLI Example: .. code-block:: bash @@ -118,6 +130,14 @@ def info(name): ''' Return information about a group + Args: + + name (str): + The name of the group for which to get information + + Returns: + dict: A dictionary of information about the group + CLI Example: .. code-block:: bash @@ -151,6 +171,17 @@ def getent(refresh=False): ''' Return info on all groups + Args: + + refresh (bool): + Refresh the info for all groups in ``__context__``. If False only + the groups in ``__context__`` wil be returned. If True the + ``__context__`` will be refreshed with current data and returned. + Default is False + + Returns: + A list of groups and their information + CLI Example: .. code-block:: bash @@ -184,14 +215,24 @@ def getent(refresh=False): def adduser(name, username): ''' - add a user to a group + Add a user to a group + + Args: + + name (str): + The name of the group to modify + + username (str): + The name of the user to add to the group + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.adduser foo username - ''' ret = {'name': name, @@ -209,7 +250,7 @@ def adduser(name, username): '/', '\\').encode('ascii', 'backslashreplace').lower()) try: - if __fixlocaluser(username.lower()) not in existingMembers: + if salt.utils.win_functions.get_sam_name(username) not in existingMembers: if not __opts__['test']: groupObj.Add('WinNT://' + username.replace('\\', '/')) @@ -233,14 +274,24 @@ def adduser(name, username): def deluser(name, username): ''' - remove a user from a group + Remove a user from a group + + Args: + + name (str): + The name of the group to modify + + username (str): + The name of the user to remove from the group + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.deluser foo username - ''' ret = {'name': name, @@ -258,7 +309,7 @@ def deluser(name, username): '/', '\\').encode('ascii', 'backslashreplace').lower()) try: - if __fixlocaluser(username.lower()) in existingMembers: + if salt.utils.win_functions.get_sam_name(username) in existingMembers: if not __opts__['test']: groupObj.Remove('WinNT://' + username.replace('\\', '/')) @@ -282,14 +333,25 @@ def deluser(name, username): def members(name, members_list): ''' - remove a user from a group + Ensure a group contains only the members in the list + + Args: + + name (str): + The name of the group to modify + + members_list (str): + A single user or a comma separated list of users. The group will + contain only the users specified in this list. + + Returns: + dict: A dictionary of results CLI Example: .. code-block:: bash salt '*' group.members foo 'user1,user2,user3' - ''' ret = {'name': name, @@ -297,7 +359,7 @@ def members(name, members_list): 'changes': {'Users Added': [], 'Users Removed': []}, 'comment': []} - members_list = [__fixlocaluser(thisMember) for thisMember in members_list.lower().split(",")] + members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")] if not isinstance(members_list, list): ret['result'] = False ret['comment'].append('Members is not a list object') @@ -364,27 +426,26 @@ def members(name, members_list): return ret -def __fixlocaluser(username): - ''' - prefixes a username w/o a backslash with the computername - - i.e. __fixlocaluser('Administrator') would return 'computername\administrator' - ''' - if '\\' not in username: - username = ('{0}\\{1}').format(__salt__['grains.get']('host'), username) - - return username.lower() - - def list_groups(refresh=False): ''' Return a list of groups + Args: + + refresh (bool): + Refresh the info for all groups in ``__context__``. If False only + the groups in ``__context__`` wil be returned. If True, the + ``__context__`` will be refreshed with current data and returned. + Default is False + + Returns: + list: A list of groups on the machine + CLI Example: .. code-block:: bash - salt '*' group.getent + salt '*' group.list_groups ''' if 'group.list_groups' in __context__ and not refresh: return __context__['group.getent'] diff --git a/salt/states/group.py b/salt/states/group.py index 8218e415d6..78f3568c74 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -3,8 +3,13 @@ Management of user groups ========================= -The group module is used to create and manage unix group settings, groups -can be either present or absent: +The group module is used to create and manage group settings, groups can be +either present or absent. User/Group names can be passed to the ``adduser``, +``deluser``, and ``members`` parameters. ``adduser`` and ``deluser`` can be used +together but not with ``members``. + +In Windows, if no domain is specified in the user or group name (ie: +`DOMAIN\username``) the module will assume a local user or group. .. code-block:: yaml @@ -36,6 +41,10 @@ import sys # Import 3rd-party libs import salt.ext.six as six +# Import Salt libs +import salt.utils +import salt.utils.win_functions + def _changes(name, gid=None, @@ -50,6 +59,18 @@ def _changes(name, if not lgrp: return False + # User and Domain names are not case sensitive in Windows. Let's make them + # all lower case so we can compare properly + if salt.utils.is_windows(): + if lgrp['members']: + lgrp['members'] = [user.lower() for user in lgrp['members']] + if members: + members = [salt.utils.win_functions.get_sam_name(user) for user in members] + if addusers: + addusers = [salt.utils.win_functions.get_sam_name(user) for user in addusers] + if delusers: + delusers = [salt.utils.win_functions.get_sam_name(user) for user in delusers] + change = {} if gid: if lgrp['gid'] != gid: @@ -57,7 +78,7 @@ def _changes(name, if members: # -- if new member list if different than the current - if set(lgrp['members']) ^ set(members): + if set(lgrp['members']).symmetric_difference(members): change['members'] = members if addusers: @@ -82,28 +103,55 @@ def present(name, ''' Ensure that a group is present - name - The name of the group to manage + Args: - gid - The group id to assign to the named group; if left empty, then the next - available group id will be assigned + name (str): + The name of the group to manage - system - Whether or not the named group is a system group. This is essentially - the '-r' option of 'groupadd'. + gid (str): + The group id to assign to the named group; if left empty, then the + next available group id will be assigned. Ignored on Windows - addusers - List of additional users to be added as a group members. + system (bool): + Whether or not the named group is a system group. This is essentially + the '-r' option of 'groupadd'. Ignored on Windows - delusers - Ensure these user are removed from the group membership. + addusers (list): + List of additional users to be added as a group members. Cannot + conflict with names in delusers. Cannot be used in conjunction with + members. - members - Replace existing group members with a list of new members. + delusers (list): + Ensure these user are removed from the group membership. Cannot + conflict with names in addusers. Cannot be used in conjunction with + members. - Note: Options 'members' and 'addusers/delusers' are mutually exclusive and - can not be used together. + members (list): + Replace existing group members with a list of new members. Cannot be + used in conjunction with addusers or delusers. + + Example: + + .. code-block:: yaml + + # Adds DOMAIN\db_admins and Administrators to the local db_admin group + # Removes Users + db_admin: + group.present: + - addusers: + - DOMAIN\db_admins + - Administrators + - delusers: + - Users + + # Ensures only DOMAIN\domain_admins and the local Administrator are + # members of the local Administrators group. All other users are + # removed + Administrators: + group.present: + - members: + - DOMAIN\domain_admins + - Administrator ''' ret = {'name': name, 'changes': {}, @@ -233,8 +281,17 @@ def absent(name): ''' Ensure that the named group is absent - name - The name of the group to remove + Args: + name (str): + The name of the group to remove + + Example: + + .. code-block:: yaml + + # Removes the local group `db_admin` + db_admin: + group.absent ''' ret = {'name': name, 'changes': {}, diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py index 23ee3edf04..4e3ec9663c 100644 --- a/salt/utils/win_functions.py +++ b/salt/utils/win_functions.py @@ -4,6 +4,9 @@ Various functions to be used by windows during start up and to monkey patch missing functions in other modules ''' from __future__ import absolute_import +import platform + +# Import Salt Libs from salt.exceptions import CommandExecutionError # Import 3rd Party Libs @@ -138,3 +141,19 @@ def get_current_user(): return False return user_name + + +def get_sam_name(username): + ''' + Gets the SAM name for a user. It basically prefixes a username without a + backslash with the computer name. If the username contains a backslash, it + is returned as is. + + Everything is returned lower case + + i.e. salt.utils.fix_local_user('Administrator') would return 'computername\administrator' + ''' + if '\\' not in username: + username = '{0}\\{1}'.format(platform.node(), username) + + return username.lower() From 9ffe315d7d12ad853b4404681526e8c59770d859 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 21 Aug 2017 18:45:11 -0600 Subject: [PATCH 127/639] Add kwargs --- salt/modules/win_groupadd.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py index 4368bb0201..f584a0ab94 100644 --- a/salt/modules/win_groupadd.py +++ b/salt/modules/win_groupadd.py @@ -80,7 +80,7 @@ def add(name, **kwargs): return ret -def delete(name): +def delete(name, **kwargs): ''' Remove the named group @@ -213,7 +213,7 @@ def getent(refresh=False): return ret -def adduser(name, username): +def adduser(name, username, **kwargs): ''' Add a user to a group @@ -272,7 +272,7 @@ def adduser(name, username): return ret -def deluser(name, username): +def deluser(name, username, **kwargs): ''' Remove a user from a group @@ -331,7 +331,7 @@ def deluser(name, username): return ret -def members(name, members_list): +def members(name, members_list, **kwargs): ''' Ensure a group contains only the members in the list From c525a06070f6c8867da81de70871de5ad2d12347 Mon Sep 17 00:00:00 2001 From: vnitinv Date: Tue, 22 Aug 2017 11:14:23 +0530 Subject: [PATCH 128/639] check queue before calling rpc for ping/keep-alive --- salt/proxy/junos.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index f6d8baff66..2f70750b0b 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -157,16 +157,18 @@ def ping(): Ping? Pong! ''' - try: - thisproxy['conn'].rpc.file_list(path='/dev/null', dev_timeout=2) - - except RpcTimeoutError: + dev = conn() + # call rpc only if ncclient queue is empty. If not empty that means other + # rpc call is going on. + if hasattr(dev, '_session') and not dev._session._q.empty(): try: - thisproxy['conn'].close() - except (RpcError, ConnectError): - pass - - return thisproxy['conn'].connected + dev.rpc.file_list(path='/dev/null', dev_timeout=2) + except RpcTimeoutError: + try: + dev.close() + except (RpcError, ConnectError): + pass + return dev.connected def shutdown(opts): From af743ff6c34b9e03e8865d1fa065af218b0dfb77 Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Tue, 22 Aug 2017 12:10:20 +0300 Subject: [PATCH 129/639] [DOCS] Add missing `utils` sub-dir listed for `extension_modules` --- conf/master | 9 ++++----- doc/ref/configuration/master.rst | 4 ++-- doc/topics/utils/index.rst | 4 ++-- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/conf/master b/conf/master index 1fc76cb89a..8c1db0ae67 100644 --- a/conf/master +++ b/conf/master @@ -59,15 +59,14 @@ # Directory for custom modules. This directory can contain subdirectories for # each of Salt's module types such as "runners", "output", "wheel", "modules", -# "states", "returners", etc. -#extension_modules: +# "states", "returners", "engines", "utils", etc. +#extension_modules: /var/cache/salt/master/extmods # Directory for custom modules. This directory can contain subdirectories for # each of Salt's module types such as "runners", "output", "wheel", "modules", -# "states", "returners", "engines", etc. +# "states", "returners", "engines", "utils", etc. # Like 'extension_modules' but can take an array of paths -#module_dirs: -# - /var/cache/salt/minion/extmods +#module_dirs: [] # Verify and set permissions on configuration directories at startup: #verify_env: True diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index fd310773bf..815b8a6d97 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -180,8 +180,8 @@ The directory to store the pki authentication keys. Directory for custom modules. This directory can contain subdirectories for each of Salt's module types such as ``runners``, ``output``, ``wheel``, -``modules``, ``states``, ``returners``, ``engines``, etc. This path is appended to -:conf_master:`root_dir`. +``modules``, ``states``, ``returners``, ``engines``, ``utils``, etc. +This path is appended to :conf_master:`root_dir`. .. code-block:: yaml diff --git a/doc/topics/utils/index.rst b/doc/topics/utils/index.rst index 44380f3541..19a0974d29 100644 --- a/doc/topics/utils/index.rst +++ b/doc/topics/utils/index.rst @@ -87,8 +87,8 @@ Also you could even write your utility modules in object oriented fashion: # -*- coding: utf-8 -*- ''' - My utils module - --------------- + My OOP-style utils module + ------------------------- This module contains common functions for use in my other custom types. ''' From f1765472dddcbbe319da2d376ec2001e6776d51b Mon Sep 17 00:00:00 2001 From: Johannes Renner Date: Wed, 16 Aug 2017 16:54:41 +0200 Subject: [PATCH 130/639] Notify systemd synchronously (via NOTIFY_SOCKET) Forking the systemd-notify command is known to be unreliable at least with older versions of the kernel and/or systemd. When systemd receives the notification the systemd-notify process may have already exited causing an error in the logs while waiting for a (90 seconds) timeout. This patch instead notifies the systemd NOTIFY_SOCKET synchronously in case the systemd.daemon python library is not available. --- salt/utils/process.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/utils/process.py b/salt/utils/process.py index 1b83eb7d42..51681ec364 100644 --- a/salt/utils/process.py +++ b/salt/utils/process.py @@ -15,6 +15,7 @@ import contextlib import subprocess import multiprocessing import multiprocessing.util +import socket # Import salt libs @@ -55,7 +56,17 @@ def notify_systemd(): import systemd.daemon except ImportError: if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'): - return systemd_notify_call('--ready') + # Notify systemd synchronously + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + # Handle abstract namespace socket + if notify_socket.startswith('@'): + notify_socket = '\0{0}'.format(notify_socket[1:]) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.connect(notify_socket) + sock.sendall('READY=1'.encode()) + sock.close() + return True return False if systemd.daemon.booted(): From 1a987cb9481da365e6ad1a49c6526c970bdfe605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tarjei=20Hus=C3=B8y?= Date: Wed, 16 Aug 2017 10:30:45 -0700 Subject: [PATCH 131/639] Fix broken negation in iptables Introduced in 7c6ff77c and released with 2017.7. --- salt/modules/iptables.py | 5 ++++- tests/unit/modules/test_iptables.py | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py index 37d5842eea..a96c975da7 100644 --- a/salt/modules/iptables.py +++ b/salt/modules/iptables.py @@ -493,8 +493,11 @@ def build_rule(table='filter', chain=None, command=None, position='', full=None, after_jump.append('--{0} {1}'.format(after_jump_argument, value)) del kwargs[after_jump_argument] - for key, value in kwargs.items(): + for key in kwargs: negation = maybe_add_negation(key) + # don't use .items() since maybe_add_negation removes the prefix from + # the value in the kwargs, thus we need to fetch it after that has run + value = kwargs[key] flag = '-' if len(key) == 1 else '--' value = '' if value in (None, '') else ' {0}'.format(value) rule.append('{0}{1}{2}{3}'.format(negation, flag, key, value)) diff --git a/tests/unit/modules/test_iptables.py b/tests/unit/modules/test_iptables.py index 1c4f34118f..6fe9e91285 100644 --- a/tests/unit/modules/test_iptables.py +++ b/tests/unit/modules/test_iptables.py @@ -60,6 +60,9 @@ class IptablesTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(iptables.build_rule(**{'if': 'not eth0'}), '! -i eth0') + self.assertEqual(iptables.build_rule(**{'proto': 'tcp', 'syn': '!'}), + '-p tcp ! --syn') + self.assertEqual(iptables.build_rule(dports=[80, 443], proto='tcp'), '-p tcp -m multiport --dports 80,443') From 79c53f3f8193c22566978ce0e627818bfed2f4fa Mon Sep 17 00:00:00 2001 From: Johannes Renner Date: Tue, 22 Aug 2017 15:56:17 +0200 Subject: [PATCH 132/639] Fallback to systemd_notify_call() in case of socket.error --- salt/utils/process.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/utils/process.py b/salt/utils/process.py index 51681ec364..e63c888100 100644 --- a/salt/utils/process.py +++ b/salt/utils/process.py @@ -62,10 +62,13 @@ def notify_systemd(): # Handle abstract namespace socket if notify_socket.startswith('@'): notify_socket = '\0{0}'.format(notify_socket[1:]) - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - sock.connect(notify_socket) - sock.sendall('READY=1'.encode()) - sock.close() + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.connect(notify_socket) + sock.sendall('READY=1'.encode()) + sock.close() + except socket.error: + return systemd_notify_call('--ready') return True return False From e560a92e583034ccb17dcd00ed6809a090f14485 Mon Sep 17 00:00:00 2001 From: Andrew Bulford Date: Tue, 22 Aug 2017 15:11:23 +0100 Subject: [PATCH 133/639] Ensure error is returned if create_network fails --- salt/states/docker_network.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/states/docker_network.py b/salt/states/docker_network.py index 932d497fa1..d5d57afc6b 100644 --- a/salt/states/docker_network.py +++ b/salt/states/docker_network.py @@ -252,6 +252,7 @@ def present(name, except Exception as exc: ret['comment'] = ('Failed to create network \'{0}\': {1}' .format(name, exc)) + return ret # Finally, figure out the list of containers which should now be connected. containers_to_connect = {} From 30bde1d7f3f2180e491b9b7980257bd6abeb3959 Mon Sep 17 00:00:00 2001 From: Steven Joseph Date: Wed, 23 Aug 2017 00:21:03 +1000 Subject: [PATCH 134/639] Updated as per PR comments, - add version checking - add doco --- doc/topics/releases/oxygen.rst | 5 +++++ salt/cloud/clouds/azurearm.py | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 4dda3bab84..0b75a9fcb8 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -113,6 +113,11 @@ Profitbricks Cloud Updated Dependency The minimum version of the `profitbrick` python package for the `profitbricks` cloud driver has changed from 3.0.0 to 3.1.0. +Azure Cloud Updated Dependency +------------------------------ + +The azure sdk used for the ``azurearm`` cloud driver now depends on ``azure-cli>=2.0.12`` + Module Deprecations ------------------- diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index 8d65b9c9c0..c3f5777a64 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -73,6 +73,7 @@ from salt.exceptions import ( SaltCloudExecutionTimeout, ) from salt.ext.six.moves import filter +from distutils.version import LooseVersion # Import 3rd-party libs HAS_LIBS = False @@ -115,7 +116,8 @@ try: from azure.mgmt.web import WebSiteManagementClient from msrestazure.azure_exceptions import CloudError from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount - HAS_LIBS = True + from azure.cli import core + HAS_LIBS = LooseVersion(core.__version__) >= LooseVersion("2.0.12") except ImportError: pass # pylint: enable=wrong-import-position,wrong-import-order From 42a118ff56bbfeeb3ca605aba70ca954d131ed61 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Tue, 22 Aug 2017 14:01:24 +0200 Subject: [PATCH 135/639] fixed cmd composition and unified his making across module --- salt/modules/groupadd.py | 42 ++++++++++++++--------------- tests/unit/modules/groupadd_test.py | 24 ++++++++--------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/salt/modules/groupadd.py b/salt/modules/groupadd.py index 2f79c47dd9..f02a5811a7 100644 --- a/salt/modules/groupadd.py +++ b/salt/modules/groupadd.py @@ -31,7 +31,7 @@ def __virtual__(): if __grains__['kernel'] in ('Linux', 'OpenBSD', 'NetBSD'): return __virtualname__ return (False, 'The groupadd execution module cannot be loaded: ' - ' only available on Linux, OpenBSD and NetBSD') + ' only available on Linux, OpenBSD and NetBSD') def add(name, gid=None, system=False, root=None): @@ -44,12 +44,12 @@ def add(name, gid=None, system=False, root=None): salt '*' group.add foo 3456 ''' - cmd = 'groupadd ' + cmd = ['groupadd'] if gid: - cmd += '-g {0} '.format(gid) + cmd.append('-g {0}'.format(gid)) if system and __grains__['kernel'] != 'OpenBSD': - cmd += '-r ' - cmd += name + cmd.append('-r') + cmd.append(name) if root is not None: cmd.extend(('-R', root)) @@ -69,7 +69,7 @@ def delete(name, root=None): salt '*' group.delete foo ''' - cmd = ('groupdel', name) + cmd = ['groupdel', name] if root is not None: cmd.extend(('-R', root)) @@ -140,7 +140,7 @@ def chgid(name, gid, root=None): pre_gid = __salt__['file.group_to_gid'](name) if gid == pre_gid: return True - cmd = ('groupmod', '-g', gid, name) + cmd = ['groupmod', '-g', gid, name] if root is not None: cmd.extend(('-R', root)) @@ -170,15 +170,15 @@ def adduser(name, username, root=None): if __grains__['kernel'] == 'Linux': if on_redhat_5: - cmd = ('gpasswd', '-a', username, name) + cmd = ['gpasswd', '-a', username, name] elif on_suse_11: - cmd = ('usermod', '-A', name, username) + cmd = ['usermod', '-A', name, username] else: - cmd = ('gpasswd', '--add', username, name) + cmd = ['gpasswd', '--add', username, name] if root is not None: cmd.extend(('-Q', root)) else: - cmd = ('usermod', '-G', name, username) + cmd = ['usermod', '-G', name, username] if root is not None: cmd.extend(('-R', root)) @@ -208,20 +208,20 @@ def deluser(name, username, root=None): if username in grp_info['members']: if __grains__['kernel'] == 'Linux': if on_redhat_5: - cmd = ('gpasswd', '-d', username, name) + cmd = ['gpasswd', '-d', username, name] elif on_suse_11: - cmd = ('usermod', '-R', name, username) + cmd = ['usermod', '-R', name, username] else: - cmd = ('gpasswd', '--del', username, name) + cmd = ['gpasswd', '--del', username, name] if root is not None: cmd.extend(('-R', root)) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) elif __grains__['kernel'] == 'OpenBSD': out = __salt__['cmd.run_stdout']('id -Gn {0}'.format(username), python_shell=False) - cmd = 'usermod -S ' - cmd += ','.join([g for g in out.split() if g != str(name)]) - cmd += ' {0}'.format(username) + cmd = ['usermod', '-S'] + cmd.append(','.join([g for g in out.split() if g != str(name)])) + cmd.append('{0}'.format(username)) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) else: log.error('group.deluser is not yet supported on this platform') @@ -249,13 +249,13 @@ def members(name, members_list, root=None): if __grains__['kernel'] == 'Linux': if on_redhat_5: - cmd = ('gpasswd', '-M', members_list, name) + cmd = ['gpasswd', '-M', members_list, name] elif on_suse_11: for old_member in __salt__['group.info'](name).get('members'): __salt__['cmd.run']('groupmod -R {0} {1}'.format(old_member, name), python_shell=False) - cmd = ('groupmod', '-A', members_list, name) + cmd = ['groupmod', '-A', members_list, name] else: - cmd = ('gpasswd', '--members', members_list, name) + cmd = ['gpasswd', '--members', members_list, name] if root is not None: cmd.extend(('-R', root)) retcode = __salt__['cmd.retcode'](cmd, python_shell=False) @@ -270,7 +270,7 @@ def members(name, members_list, root=None): for user in members_list.split(","): if user: retcode = __salt__['cmd.retcode']( - 'usermod -G {0} {1}'.format(name, user), + ['usermod', '-G', name, user], python_shell=False) if not retcode == 0: break diff --git a/tests/unit/modules/groupadd_test.py b/tests/unit/modules/groupadd_test.py index b03fa622d6..bab0c80764 100644 --- a/tests/unit/modules/groupadd_test.py +++ b/tests/unit/modules/groupadd_test.py @@ -114,16 +114,16 @@ class GroupAddTestCase(TestCase): ''' os_version_list = [ {'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'}, - 'cmd': ('gpasswd', '-a', 'root', 'test')}, + 'cmd': ['gpasswd', '-a', 'root', 'test']}, {'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'}, - 'cmd': ('usermod', '-A', 'test', 'root')}, + 'cmd': ['usermod', '-A', 'test', 'root']}, {'grains': {'kernel': 'Linux'}, - 'cmd': ('gpasswd', '--add', 'root', 'test')}, + 'cmd': ['gpasswd', '--add', 'root', 'test']}, {'grains': {'kernel': 'OTHERKERNEL'}, - 'cmd': ('usermod', '-G', 'test', 'root')}, + 'cmd': ['usermod', '-G', 'test', 'root']}, ] for os_version in os_version_list: @@ -141,16 +141,16 @@ class GroupAddTestCase(TestCase): ''' os_version_list = [ {'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'}, - 'cmd': ('gpasswd', '-d', 'root', 'test')}, + 'cmd': ['gpasswd', '-d', 'root', 'test']}, {'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'}, - 'cmd': ('usermod', '-R', 'test', 'root')}, + 'cmd': ['usermod', '-R', 'test', 'root']}, {'grains': {'kernel': 'Linux'}, - 'cmd': ('gpasswd', '--del', 'root', 'test')}, + 'cmd': ['gpasswd', '--del', 'root', 'test']}, {'grains': {'kernel': 'OpenBSD'}, - 'cmd': 'usermod -S foo root'}, + 'cmd': ['usermod', '-S', 'foo', 'root']}, ] for os_version in os_version_list: @@ -176,16 +176,16 @@ class GroupAddTestCase(TestCase): ''' os_version_list = [ {'grains': {'kernel': 'Linux', 'os_family': 'RedHat', 'osmajorrelease': '5'}, - 'cmd': ('gpasswd', '-M', 'foo', 'test')}, + 'cmd': ['gpasswd', '-M', 'foo', 'test']}, {'grains': {'kernel': 'Linux', 'os_family': 'Suse', 'osmajorrelease': '11'}, - 'cmd': ('groupmod', '-A', 'foo', 'test')}, + 'cmd': ['groupmod', '-A', 'foo', 'test']}, {'grains': {'kernel': 'Linux'}, - 'cmd': ('gpasswd', '--members', 'foo', 'test')}, + 'cmd': ['gpasswd', '--members', 'foo', 'test']}, {'grains': {'kernel': 'OpenBSD'}, - 'cmd': 'usermod -G test foo'}, + 'cmd': ['usermod', '-G', 'test', 'foo']}, ] for os_version in os_version_list: From 009ef6686b49cd523f846eb7ec9282c9bf03e694 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Tue, 22 Aug 2017 17:27:55 +0300 Subject: [PATCH 136/639] Fix dictionary keys from string to int identity type values are int. --- salt/states/win_iis.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index b9940b5dd7..74507c683c 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -495,8 +495,7 @@ def container_setting(name, container, settings=None): processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword - processModel.identityType: SpecificUser - + processModel.identityType: SpecificUser Example of usage for the ``Sites`` container: @@ -511,9 +510,9 @@ def container_setting(name, container, settings=None): logFile.period: Daily limits.maxUrlSegments: 32 ''' - - identityType_map2string = {'0': 'LocalSystem', '1': 'LocalService', '2': 'NetworkService', '3': 'SpecificUser', '4': 'ApplicationPoolIdentity'} - + + identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'} + ret = {'name': name, 'changes': {}, 'comment': str(), From ec20e9a19a2cf08c7d005a5ca1fe8d7c380a0c99 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Tue, 22 Aug 2017 17:52:20 +0300 Subject: [PATCH 137/639] Fix bug #43110 - win_iis module Fix func create_cert_binding. win 2008 uses the following format: ip!port and not ip!port! --- salt/modules/win_iis.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index bc8abbbbe7..80bba6c438 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -837,6 +837,11 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, # IIS 7.5 and earlier have different syntax for associating a certificate with a site # Modify IP spec to IIS 7.5 format iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") + + # win 2008 uses the following format: ip!port and not ip!port! + if iis7path.endswith("!"): + iis7path = iis7path[:-1] + ps_cmd = ['New-Item', '-Path', "'{0}'".format(iis7path), From cf6864aef7fd6bb2748762373ca47e1e9364484d Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 22 Aug 2017 09:54:58 -0600 Subject: [PATCH 138/639] Add newlines to satisfy linter --- salt/states/boto_elbv2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/states/boto_elbv2.py b/salt/states/boto_elbv2.py index 0fa22f99a5..cd66b18d9a 100644 --- a/salt/states/boto_elbv2.py +++ b/salt/states/boto_elbv2.py @@ -54,6 +54,7 @@ def __virtual__(): return 'boto_elbv2' return (False, "The boto_elbv2 module cannot be loaded: boto3 library not found") + def create_target_group(name, protocol, port, vpc_id, region=None, key=None, keyid=None, profile=None, health_check_protocol='HTTP', health_check_port='traffic-port', @@ -196,6 +197,7 @@ def delete_target_group(name, region=None, key=None, keyid=None, profile=None): ret['comment'] = 'Target Group {0} deletion failed'.format(name) return ret + def targets_registered(name, targets, region=None, key=None, keyid=None, profile=None, **kwargs): ''' From 43b03607639d586da1d7de368f70d61015330185 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 22 Aug 2017 10:54:01 -0600 Subject: [PATCH 139/639] Fix lint --- salt/states/group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/group.py b/salt/states/group.py index 78f3568c74..d280243e08 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -''' +r''' Management of user groups ========================= @@ -100,7 +100,7 @@ def present(name, addusers=None, delusers=None, members=None): - ''' + r''' Ensure that a group is present Args: From 9a5ae2bba14256537a03eed19557737878a14a01 Mon Sep 17 00:00:00 2001 From: Vitaliy Fuks Date: Sat, 1 Jul 2017 02:07:00 +0000 Subject: [PATCH 140/639] Removed several uses of name.split('.')[0] in SoftLayer driver. This code is breaking when server names with multiple periods are used - for example, name=server1.prod.dc1 domain=example.com. Simply "split and use first one isn't the reverse operation of "'.'.join([name, domain])" which is done when VM is created here: https://github.com/saltstack/salt/commit/9dcd11c1551975faf32de54bed913b566a7d12d0#diff-ac8112a3f8d2ebde0edf104797fe64d7 We've been running with these changes for a while and don't have any issues creating or destroying instances. The caveat that some operations (such as destroying) through SoftLayer API require hostname (without domain) is already documented in Salt and still applies. --- salt/cloud/__init__.py | 9 --------- salt/cloud/clouds/softlayer.py | 5 +---- salt/cloud/clouds/softlayer_hw.py | 3 --- 3 files changed, 1 insertion(+), 16 deletions(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index d7f1be2fd4..bb9530f8e4 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -730,15 +730,6 @@ class Cloud(object): continue for vm_name, details in six.iteritems(vms): - # If VM was created with use_fqdn with either of the softlayer drivers, - # we need to strip the VM name and only search for the short hostname. - if driver == 'softlayer' or driver == 'softlayer_hw': - ret = [] - for name in names: - name = name.split('.')[0] - ret.append(name) - if vm_name not in ret: - continue # XXX: The logic below can be removed once the aws driver # is removed elif vm_name not in names: diff --git a/salt/cloud/clouds/softlayer.py b/salt/cloud/clouds/softlayer.py index 457447b431..9b9343a1e0 100644 --- a/salt/cloud/clouds/softlayer.py +++ b/salt/cloud/clouds/softlayer.py @@ -508,7 +508,7 @@ def list_nodes_full(mask='mask[id]', call=None): conn = get_conn(service='SoftLayer_Account') response = conn.getVirtualGuests() for node_id in response: - hostname = node_id['hostname'].split('.')[0] + hostname = node_id['hostname'] ret[hostname] = node_id __utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__) return ret @@ -594,9 +594,6 @@ def destroy(name, call=None): transport=__opts__['transport'] ) - # If the VM was created with use_fqdn, the short hostname will be used instead. - name = name.split('.')[0] - node = show_instance(name, call='action') conn = get_conn() response = conn.deleteObject(id=node['id']) diff --git a/salt/cloud/clouds/softlayer_hw.py b/salt/cloud/clouds/softlayer_hw.py index 34dae95cca..030391fe6d 100644 --- a/salt/cloud/clouds/softlayer_hw.py +++ b/salt/cloud/clouds/softlayer_hw.py @@ -526,9 +526,6 @@ def destroy(name, call=None): transport=__opts__['transport'] ) - # If the VM was created with use_fqdn, the short hostname will be used instead. - name = name.split('.')[0] - node = show_instance(name, call='action') conn = get_conn(service='SoftLayer_Ticket') response = conn.createCancelServerTicket( From 8a6ad0a9cfa8cddd9f828c83983ee9be25ac3208 Mon Sep 17 00:00:00 2001 From: Vitaliy Fuks Date: Sat, 1 Jul 2017 12:47:52 +0000 Subject: [PATCH 141/639] Fixed typo. --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index bb9530f8e4..73327724ef 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -732,7 +732,7 @@ class Cloud(object): for vm_name, details in six.iteritems(vms): # XXX: The logic below can be removed once the aws driver # is removed - elif vm_name not in names: + if vm_name not in names: continue elif driver == 'ec2' and 'aws' in handled_drivers and \ From 017eb849f1bfafe2e857bfd357f9564c7e9039a3 Mon Sep 17 00:00:00 2001 From: Adam Mendlik Date: Tue, 22 Aug 2017 13:05:32 -0600 Subject: [PATCH 142/639] Remove kernelpkg test assertions added in Python 3.6 --- tests/unit/modules/test_kernelpkg_linux_apt.py | 2 -- tests/unit/modules/test_kernelpkg_linux_yum.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/tests/unit/modules/test_kernelpkg_linux_apt.py b/tests/unit/modules/test_kernelpkg_linux_apt.py index a24ba8d245..0a20ada986 100644 --- a/tests/unit/modules/test_kernelpkg_linux_apt.py +++ b/tests/unit/modules/test_kernelpkg_linux_apt.py @@ -87,7 +87,6 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) - self._kernelpkg.__salt__['pkg.purge'].assert_called_once() self.assertIn('removed', result) target = '{0}-{1}'.format(self._kernelpkg._package_prefix(), self.KERNEL_LIST[0]) # pylint: disable=protected-access self.assertListEqual(result['removed'], [target]) @@ -101,4 +100,3 @@ class AptKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release=self.KERNEL_LIST[0]) - self._kernelpkg.__salt__['pkg.purge'].assert_called_once() diff --git a/tests/unit/modules/test_kernelpkg_linux_yum.py b/tests/unit/modules/test_kernelpkg_linux_yum.py index 946689c619..95753337e0 100644 --- a/tests/unit/modules/test_kernelpkg_linux_yum.py +++ b/tests/unit/modules/test_kernelpkg_linux_yum.py @@ -87,7 +87,6 @@ class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): result = self._kernelpkg.remove(release=self.KERNEL_LIST[0]) - self._kernelpkg.__salt__['cmd.run_all'].assert_called_once() self.assertIn('removed', result) target = '{0}-{1}'.format(self._kernelpkg._package_name(), self.KERNEL_LIST[0]) # pylint: disable=protected-access self.assertListEqual(result['removed'], [target]) @@ -101,4 +100,3 @@ class YumKernelPkgTestCase(KernelPkgTestCase, TestCase, LoaderModuleMockMixin): with patch.object(self._kernelpkg, 'active', return_value=self.KERNEL_LIST[-1]): with patch.object(self._kernelpkg, 'list_installed', return_value=self.KERNEL_LIST): self.assertRaises(CommandExecutionError, self._kernelpkg.remove, release=self.KERNEL_LIST[0]) - self._kernelpkg.__salt__['cmd.run_all'].assert_called_once() From 9f3047c42023acecd662fc329450c5f43c3c07f3 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Mon, 17 Jul 2017 14:55:30 -0500 Subject: [PATCH 143/639] add additional checks for ADM policies that have the same ADMX policy ID (#42279) --- salt/modules/win_lgpo.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 347ca742f9..e55403adbc 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4590,11 +4590,35 @@ def _lookup_admin_template(policy_name, if adml_search_results: multiple_adml_entries = False suggested_policies = '' + adml_to_remove = [] if len(adml_search_results) > 1: multiple_adml_entries = True for adml_search_result in adml_search_results: if not getattr(adml_search_result, 'text', '').strip() == policy_name: - adml_search_results.remove(adml_search_result) + adml_to_remove.append(adml_search_result) + if hierarchy: + display_name_searchval = '$({0}.{1})'.format( + adml_search_result.tag.split('}')[1], + adml_search_result.attrib['id']) + policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( + adml_search_result.prefix, + display_name_searchval, + policy_class) + # this should only be 1 result + admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) + for search_result in admx_search_results: + this_hierarchy = _build_parent_list(search_result, + admx_policy_definitions, + True, + adml_policy_resources) + this_hierarchy.reverse() + if hierarchy != this_hierarchy: + adml_to_remove.append(adml_search_result) + for adml in adml_to_remove: + if adml in adml_search_results: + adml_search_results.remove(adml) + if len(adml_search_results) == 1 and multiple_adml_entries: + multiple_adml_entries = False for adml_search_result in adml_search_results: dmsg = 'found an ADML entry matching the string! {0} -- {1}' log.debug(dmsg.format(adml_search_result.tag, From ecd446fd55f65be7c9f29689bf650eb6a8bc4143 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Tue, 22 Aug 2017 09:04:31 -0500 Subject: [PATCH 144/639] track xml namespace to ensure policies w/duplicate IDs or Names do not conflict --- salt/modules/win_lgpo.py | 827 +++++++++++++++++++++------------------ 1 file changed, 446 insertions(+), 381 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index e55403adbc..ee0dbbad6d 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -3503,22 +3503,31 @@ def _checkAllAdmxPolicies(policy_class, not_configured_policies.remove(policy_item) for not_configured_policy in not_configured_policies: - policy_vals[not_configured_policy.attrib['name']] = 'Not Configured' + not_configured_policy_namespace = not_configured_policy.nsmap[not_configured_policy.prefix] + if not_configured_policy_namespace not in policy_vals: + policy_vals[not_configured_policy_namespace] = {} + policy_vals[not_configured_policy_namespace][not_configured_policy.attrib['name']] = 'Not Configured' if return_full_policy_names: - full_names[not_configured_policy.attrib['name']] = _getFullPolicyName( + if not_configured_policy_namespace not in full_names: + full_names[not_configured_policy_namespace] = {} + full_names[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _getFullPolicyName( not_configured_policy, not_configured_policy.attrib['name'], return_full_policy_names, adml_policy_resources) log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name'])) - hierarchy[not_configured_policy.attrib['name']] = _build_parent_list(not_configured_policy, - admx_policy_definitions, - return_full_policy_names, - adml_policy_resources) + if not_configured_policy_namespace not in hierarchy: + hierarchy[not_configured_policy_namespace] = {} + hierarchy[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _build_parent_list( + not_configured_policy, + admx_policy_definitions, + return_full_policy_names, + adml_policy_resources) for admx_policy in admx_policies: this_key = None this_valuename = None this_policyname = None + this_policynamespace = None this_policy_setting = 'Not Configured' element_only_enabled_disabled = True explicit_enable_disable_value_setting = False @@ -3537,6 +3546,7 @@ def _checkAllAdmxPolicies(policy_class, log.error('policy item {0} does not have the required "name" ' 'attribute'.format(admx_policy.attrib)) break + this_policynamespace = admx_policy.nsmap[admx_policy.prefix] if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3548,7 +3558,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3560,21 +3572,27 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if not explicit_enable_disable_value_setting and this_valuename: # the policy has a key/valuename but no explicit enabled/Disabled @@ -3587,7 +3605,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key, this_valuename, 'REG_DWORD', @@ -3596,7 +3616,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ELEMENTS_XPATH(admx_policy): if element_only_enabled_disabled or this_policy_setting == 'Enabled': @@ -3794,65 +3816,84 @@ def _checkAllAdmxPolicies(policy_class, and len(configured_elements.keys()) == len(required_elements.keys()): if policy_disabled_elements == len(required_elements.keys()): log.debug('{0} is disabled by all enum elements'.format(this_policyname)) - policy_vals[this_policyname] = 'Disabled' + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = 'Disabled' else: - policy_vals[this_policyname] = configured_elements + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements log.debug('{0} is enabled by enum elements'.format(this_policyname)) else: if this_policy_setting == 'Enabled': - policy_vals[this_policyname] = configured_elements - if return_full_policy_names and this_policyname in policy_vals: - full_names[this_policyname] = _getFullPolicyName( + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements + if return_full_policy_names and this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in full_names: + full_names[this_policynamespace] = {} + full_names[this_policynamespace][this_policyname] = _getFullPolicyName( admx_policy, admx_policy.attrib['name'], return_full_policy_names, adml_policy_resources) - if this_policyname in policy_vals: - hierarchy[this_policyname] = _build_parent_list(admx_policy, + if this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in hierarchy: + hierarchy[this_policynamespace] = {} + hierarchy[this_policynamespace][this_policyname] = _build_parent_list(admx_policy, admx_policy_definitions, return_full_policy_names, adml_policy_resources) if policy_vals and return_full_policy_names and not hierarchical_return: unpathed_dict = {} pathed_dict = {} - for policy_item in list(policy_vals): - if full_names[policy_item] in policy_vals: - # add this item with the path'd full name - full_path_list = hierarchy[policy_item] + for policy_namespace in list(policy_vals): + for policy_item in list(policy_vals[policy_namespace]): + if full_names[policy_namespace][policy_item] in policy_vals[policy_namespace]: + # add this item with the path'd full name + full_path_list = hierarchy[policy_namespace][policy_item] + full_path_list.reverse() + full_path_list.append(full_names[policy_namespace][policy_item]) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(policy_item) + pathed_dict[full_names[policy_namespace][policy_item]] = True + else: + policy_vals[policy_namespace][full_names[policy_namespace][policy_item]] = policy_vals[policy_namespace].pop(policy_item) + if policy_namespace not in unpathed_dict: + unpathed_dict[policy_namespace] = {} + unpathed_dict[policy_namespace][full_names[policy_namespace][policy_item]] = policy_item + # go back and remove any "unpathed" policies that need a full path + for path_needed in unpathed_dict[policy_namespace]: + # remove the item with the same full name and re-add it w/a path'd version + full_path_list = hierarchy[policy_namespace][unpathed_dict[policy_namespace][path_needed]] full_path_list.reverse() - full_path_list.append(full_names[policy_item]) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(policy_item) - pathed_dict[full_names[policy_item]] = True - else: - policy_vals[full_names[policy_item]] = policy_vals.pop(policy_item) - unpathed_dict[full_names[policy_item]] = policy_item - # go back and remove any "unpathed" policies that need a full path - for path_needed in unpathed_dict: - # remove the item with the same full name and re-add it w/a path'd version - full_path_list = hierarchy[unpathed_dict[path_needed]] - full_path_list.reverse() - full_path_list.append(path_needed) - log.debug('full_path_list == {0}'.format(full_path_list)) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(path_needed) + full_path_list.append(path_needed) + log.debug('full_path_list == {0}'.format(full_path_list)) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(path_needed) + for policy_namespace in list(policy_vals): + if policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) if policy_vals and hierarchical_return: if hierarchy: - for hierarchy_item in hierarchy: - if hierarchy_item in policy_vals: - tdict = {} - first_item = True - for item in hierarchy[hierarchy_item]: - newdict = {} - if first_item: - h_policy_name = hierarchy_item - if return_full_policy_names: - h_policy_name = full_names[hierarchy_item] - newdict[item] = {h_policy_name: policy_vals.pop(hierarchy_item)} - first_item = False - else: - newdict[item] = tdict - tdict = newdict - if tdict: - policy_vals = dictupdate.update(policy_vals, tdict) + for policy_namespace in hierarchy: + for hierarchy_item in hierarchy[policy_namespace]: + if hierarchy_item in policy_vals[policy_namespace]: + tdict = {} + first_item = True + for item in hierarchy[policy_namespace][hierarchy_item]: + newdict = {} + if first_item: + h_policy_name = hierarchy_item + if return_full_policy_names: + h_policy_name = full_names[policy_namespace][hierarchy_item] + newdict[item] = {h_policy_name: policy_vals[policy_namespace].pop(hierarchy_item)} + first_item = False + else: + newdict[item] = tdict + tdict = newdict + if tdict: + policy_vals = dictupdate.update(policy_vals, tdict) + if policy_namespace in policy_vals and policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) policy_vals = { module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: { 'Administrative Templates': policy_vals @@ -4116,6 +4157,7 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False): def _writeAdminTemplateRegPolFile(admtemplate_data, + admtemplate_namespace_data, admx_policy_definitions=None, adml_policy_resources=None, display_language='en-US', @@ -4132,7 +4174,9 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - policySearchXpath = etree.XPath('//*[@*[local-name() = "id"] = $id or @*[local-name() = "name"] = $id]') + #//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ] + #policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]') + policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( @@ -4144,298 +4188,305 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, hierarchical_return=False, return_not_configured=False) log.debug('preparing to loop through policies requested to be configured') - for adm_policy in admtemplate_data: - if str(admtemplate_data[adm_policy]).lower() == 'not configured': - if adm_policy in base_policy_settings: - base_policy_settings.pop(adm_policy) - else: - log.debug('adding {0} to base_policy_settings'.format(adm_policy)) - base_policy_settings[adm_policy] = admtemplate_data[adm_policy] - for admPolicy in base_policy_settings: - log.debug('working on admPolicy {0}'.format(admPolicy)) - explicit_enable_disable_value_setting = False - this_key = None - this_valuename = None - if str(base_policy_settings[admPolicy]).lower() == 'disabled': - log.debug('time to disable {0}'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - if DISABLED_VALUE_XPATH(this_policy): - # set the disabled value in the registry.pol file - explicit_enable_disable_value_setting = True - disabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - DISABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if DISABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - disabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - DISABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with disabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + for adm_namespace in admtemplate_data: + for adm_policy in admtemplate_data[adm_namespace]: + if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured': + if adm_policy in base_policy_settings[adm_namespace]: + base_policy_settings[adm_namespace].pop(adm_policy) + else: + log.debug('adding {0} to base_policy_settings'.format(adm_policy)) + if adm_namespace not in base_policy_settings: + base_policy_settings[adm_namespace] = {} + base_policy_settings[adm_namespace][adm_policy] = admtemplate_data[adm_namespace][adm_policy] + for adm_namespace in base_policy_settings: + for admPolicy in base_policy_settings[adm_namespace]: + log.debug('working on admPolicy {0}'.format(admPolicy)) + explicit_enable_disable_value_setting = False + this_key = None + this_valuename = None + if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': + log.debug('time to disable {0}'.format(admPolicy)) + #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace}) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + if DISABLED_VALUE_XPATH(this_policy): + # set the disabled value in the registry.pol file + explicit_enable_disable_value_setting = True + disabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + DISABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - disabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - None, - check_deleted=True) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - log.debug('checking elements of {0}'.format(admPolicy)) - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if etree.QName(child_item).localname == 'boolean' \ - and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - # WARNING: no OOB adm files use true/falseList items - # this has not been fully vetted - temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} - for this_list in temp_dict: - disabled_list_strings = _checkListItem( - child_item, - admPolicy, - child_key, - temp_dict[this_list], - None, - test_items=False) - log.debug('working with {1} portion of {0}'.format( - admPolicy, - this_list)) - existing_data = _policyFileReplaceOrAppendList( - disabled_list_strings, - existing_data) - elif etree.QName(child_item).localname == 'boolean' \ - or etree.QName(child_item).localname == 'decimal' \ - or etree.QName(child_item).localname == 'text' \ - or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText' \ - or etree.QName(child_item).localname == 'enum': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - elif etree.QName(child_item).localname == 'list': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - else: - msg = 'policy {0} was found but it does not appear to be valid for the class {1}' - log.error(msg.format(admPolicy, registry_class)) - else: - msg = 'policy item {0} does not have the requried "class" attribute' - log.error(msg.format(this_policy.attrib)) - else: - log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - - if ENABLED_VALUE_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - ENABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ENABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - ENABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with enabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - enabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - '1', - check_deleted=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if child_item.attrib['id'] in base_policy_settings[admPolicy]: - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - list_strings = [] - if base_policy_settings[admPolicy][child_item.attrib['id']]: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - TRUE_LIST_XPATH, - None, - test_items=False) - log.debug('working with trueList portion of {0}'.format(admPolicy)) - else: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - FALSE_LIST_XPATH, - None, - test_items=False) - existing_data = _policyFileReplaceOrAppendList( - list_strings, - existing_data) - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): - value_string = '' - if base_policy_settings[admPolicy][child_item.attrib['id']]: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - TRUE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - else: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - FALSE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - value_string, - existing_data) + if DISABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + disabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + DISABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with disabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + disabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + None, + check_deleted=True) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + log.debug('checking elements of {0}'.format(admPolicy)) + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] if etree.QName(child_item).localname == 'boolean' \ + and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + # WARNING: no OOB adm files use true/falseList items + # this has not been fully vetted + temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} + for this_list in temp_dict: + disabled_list_strings = _checkListItem( + child_item, + admPolicy, + child_key, + temp_dict[this_list], + None, + test_items=False) + log.debug('working with {1} portion of {0}'.format( + admPolicy, + this_list)) + existing_data = _policyFileReplaceOrAppendList( + disabled_list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ or etree.QName(child_item).localname == 'decimal' \ or etree.QName(child_item).localname == 'text' \ or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + or etree.QName(child_item).localname == 'multiText' \ + or etree.QName(child_item).localname == 'enum': + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, + disabled_value_string, existing_data) - elif etree.QName(child_item).localname == 'enum': - for enum_item in child_item.getchildren(): - if base_policy_settings[admPolicy][child_item.attrib['id']] == \ - _getAdmlDisplayName(adml_policy_resources, - enum_item.attrib['displayName'] - ).strip(): - enabled_value_string = _checkValueItemParent( - enum_item, - child_item.attrib['id'], - child_key, - child_valuename, - VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if VALUE_LIST_XPATH(enum_item): - enabled_list_strings = _checkListItem(enum_item, - admPolicy, - child_key, - VALUE_LIST_XPATH, - None, - test_items=False) - msg = 'working with valueList portion of {0}' - log.debug(msg.format(child_item.attrib['id'])) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - break elif etree.QName(child_item).localname == 'list': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data, - append_only=True) + disabled_value_string, + existing_data) + else: + msg = 'policy {0} was found but it does not appear to be valid for the class {1}' + log.error(msg.format(admPolicy, registry_class)) + else: + msg = 'policy item {0} does not have the requried "class" attribute' + log.error(msg.format(this_policy.attrib)) + else: + log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) + #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) + log.debug('found this_policy == {0}'.format(this_policy)) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + + if ENABLED_VALUE_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + ENABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ENABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + ENABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with enabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + enabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + '1', + check_deleted=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] + if child_item.attrib['id'] in base_policy_settings[adm_namespace][admPolicy]: + if etree.QName(child_item).localname == 'boolean' and ( + TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + list_strings = [] + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + TRUE_LIST_XPATH, + None, + test_items=False) + log.debug('working with trueList portion of {0}'.format(admPolicy)) + else: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + FALSE_LIST_XPATH, + None, + test_items=False) + existing_data = _policyFileReplaceOrAppendList( + list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' and ( + TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): + value_string = '' + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + TRUE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + else: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + FALSE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + value_string, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ + or etree.QName(child_item).localname == 'decimal' \ + or etree.QName(child_item).localname == 'text' \ + or etree.QName(child_item).localname == 'longDecimal' \ + or etree.QName(child_item).localname == 'multiText': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + elif etree.QName(child_item).localname == 'enum': + for enum_item in child_item.getchildren(): + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']] == \ + _getAdmlDisplayName(adml_policy_resources, + enum_item.attrib['displayName'] + ).strip(): + enabled_value_string = _checkValueItemParent( + enum_item, + child_item.attrib['id'], + child_key, + child_valuename, + VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if VALUE_LIST_XPATH(enum_item): + enabled_list_strings = _checkListItem(enum_item, + admPolicy, + child_key, + VALUE_LIST_XPATH, + None, + test_items=False) + msg = 'working with valueList portion of {0}' + log.debug(msg.format(child_item.attrib['id'])) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + break + elif etree.QName(child_item).localname == 'list': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data, + append_only=True) _write_regpol_data(existing_data, policy_data.admx_registry_classes[registry_class]['policy_path'], policy_data.gpt_ini_path, @@ -4551,6 +4602,7 @@ def _lookup_admin_template(policy_name, if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( display_language=adml_language) + admx_search_results = [] admx_search_results = ADMX_SEARCH_XPATH(admx_policy_definitions, policy_name=policy_name, registry_class=policy_class) @@ -4596,24 +4648,31 @@ def _lookup_admin_template(policy_name, for adml_search_result in adml_search_results: if not getattr(adml_search_result, 'text', '').strip() == policy_name: adml_to_remove.append(adml_search_result) - if hierarchy: - display_name_searchval = '$({0}.{1})'.format( - adml_search_result.tag.split('}')[1], - adml_search_result.attrib['id']) - policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( - adml_search_result.prefix, - display_name_searchval, - policy_class) - # this should only be 1 result - admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) - for search_result in admx_search_results: - this_hierarchy = _build_parent_list(search_result, - admx_policy_definitions, - True, - adml_policy_resources) - this_hierarchy.reverse() - if hierarchy != this_hierarchy: - adml_to_remove.append(adml_search_result) + else: + if hierarchy: + display_name_searchval = '$({0}.{1})'.format( + adml_search_result.tag.split('}')[1], + adml_search_result.attrib['id']) + #policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( + policy_search_string = '//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]'.format( + adml_search_result.prefix, + display_name_searchval, + policy_class) + admx_results = [] + admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) + for search_result in admx_search_results: + log.debug('policy_name == {0}'.format(policy_name)) + this_hierarchy = _build_parent_list(search_result, + admx_policy_definitions, + True, + adml_policy_resources) + this_hierarchy.reverse() + if hierarchy != this_hierarchy: + adml_to_remove.append(adml_search_result) + else: + admx_results.append(search_result) + if len(admx_results) == 1: + admx_search_results = admx_results for adml in adml_to_remove: if adml in adml_search_results: adml_search_results.remove(adml) @@ -4627,10 +4686,11 @@ def _lookup_admin_template(policy_name, adml_search_result.tag.split('}')[1], adml_search_result.attrib['id']) log.debug('searching for displayName == {0}'.format(display_name_searchval)) - admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( - admx_policy_definitions, - display_name=display_name_searchval, - registry_class=policy_class) + if not admx_search_results: + admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( + admx_policy_definitions, + display_name=display_name_searchval, + registry_class=policy_class) if admx_search_results: if len(admx_search_results) == 1 or hierarchy and not multiple_adml_entries: found = False @@ -4642,6 +4702,7 @@ def _lookup_admin_template(policy_name, True, adml_policy_resources) this_hierarchy.reverse() + log.debug('testing {0} == {1}'.format(hierarchy, this_hierarchy)) if hierarchy == this_hierarchy: found = True else: @@ -5100,6 +5161,7 @@ def set_(computer_policy=None, user_policy=None, if policies[p_class]: for policy_name in policies[p_class]: _pol = None + policy_namespace = None policy_key_name = policy_name if policy_name in _policydata.policies[p_class]['policies']: _pol = _policydata.policies[p_class]['policies'][policy_name] @@ -5149,16 +5211,19 @@ def set_(computer_policy=None, user_policy=None, adml_policy_resources=admlPolicyResources) if success: policy_name = the_policy.attrib['name'] - _admTemplateData[policy_name] = _value + policy_namespace = the_policy.nsmap[the_policy.prefix] + if policy_namespace not in _admTemplateData: + _admTemplateData[policy_namespace] = {} + _admTemplateData[policy_namespace][policy_name] = _value else: raise SaltInvocationError(msg) - if policy_name in _admTemplateData and the_policy is not None: - log.debug('setting == {0}'.format(_admTemplateData[policy_name]).lower()) - log.debug('{0}'.format(str(_admTemplateData[policy_name]).lower())) - if str(_admTemplateData[policy_name]).lower() != 'disabled' \ - and str(_admTemplateData[policy_name]).lower() != 'not configured': + if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None: + log.debug('setting == {0}'.format(_admTemplateData[policy_namespace][policy_name]).lower()) + log.debug('{0}'.format(str(_admTemplateData[policy_namespace][policy_name]).lower())) + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \ + and str(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured': if ELEMENTS_XPATH(the_policy): - if isinstance(_admTemplateData[policy_name], dict): + if isinstance(_admTemplateData[policy_namespace][policy_name], dict): for elements_item in ELEMENTS_XPATH(the_policy): for child_item in elements_item.getchildren(): # check each element @@ -5169,9 +5234,9 @@ def set_(computer_policy=None, user_policy=None, True, admlPolicyResources) log.debug('id attribute == "{0}" this_element_name == "{1}"'.format(child_item.attrib['id'], this_element_name)) - if this_element_name in _admTemplateData[policy_name]: + if this_element_name in _admTemplateData[policy_namespace][policy_name]: temp_element_name = this_element_name - elif child_item.attrib['id'] in _admTemplateData[policy_name]: + elif child_item.attrib['id'] in _admTemplateData[policy_namespace][policy_name]: temp_element_name = child_item.attrib['id'] else: msg = ('Element "{0}" must be included' @@ -5179,12 +5244,12 @@ def set_(computer_policy=None, user_policy=None, raise SaltInvocationError(msg.format(this_element_name, policy_name)) if 'required' in child_item.attrib \ and child_item.attrib['required'].lower() == 'true': - if not _admTemplateData[policy_name][temp_element_name]: + if not _admTemplateData[policy_namespace][policy_name][temp_element_name]: msg = 'Element "{0}" requires a value to be specified' raise SaltInvocationError(msg.format(temp_element_name)) if etree.QName(child_item).localname == 'boolean': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], bool): msg = 'Element {0} requires a boolean True or False' raise SaltInvocationError(msg.format(temp_element_name)) @@ -5196,9 +5261,9 @@ def set_(computer_policy=None, user_policy=None, min_val = int(child_item.attrib['minValue']) if 'maxValue' in child_item.attrib: max_val = int(child_item.attrib['maxValue']) - if int(_admTemplateData[policy_name][temp_element_name]) \ + if int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ < min_val or \ - int(_admTemplateData[policy_name][temp_element_name]) \ + int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ > max_val: msg = 'Element "{0}" value must be between {1} and {2}' raise SaltInvocationError(msg.format(temp_element_name, @@ -5208,7 +5273,7 @@ def set_(computer_policy=None, user_policy=None, # make sure the value is in the enumeration found = False for enum_item in child_item.getchildren(): - if _admTemplateData[policy_name][temp_element_name] == \ + if _admTemplateData[policy_namespace][policy_name][temp_element_name] == \ _getAdmlDisplayName( admlPolicyResources, enum_item.attrib['displayName']).strip(): @@ -5222,33 +5287,33 @@ def set_(computer_policy=None, user_policy=None, and child_item.attrib['explicitValue'].lower() == \ 'true': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], dict): msg = ('Each list item of element "{0}" ' 'requires a dict value') msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif etree.QName(child_item).localname == 'multiText': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) - _admTemplateData[policy_name][child_item.attrib['id']] = \ - _admTemplateData[policy_name].pop(temp_element_name) + _admTemplateData[policy_namespace][policy_name][child_item.attrib['id']] = \ + _admTemplateData[policy_namespace][policy_name].pop(temp_element_name) else: msg = 'The policy "{0}" has elements which must be configured' msg = msg.format(policy_name) raise SaltInvocationError(msg) else: - if str(_admTemplateData[policy_name]).lower() != 'enabled': + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'enabled': msg = ('The policy {0} must either be "Enabled", ' '"Disabled", or "Not Configured"') msg = msg.format(policy_name) @@ -5336,4 +5401,4 @@ def set_(computer_policy=None, user_policy=None, return True else: msg = 'You have to specify something!' - raise SaltInvocationError(msg) + raise SaltInvocationError(msg) \ No newline at end of file From f74480f11e7907e3af85dc110b01c120769c5924 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Tue, 22 Aug 2017 09:53:07 -0500 Subject: [PATCH 145/639] lint fix --- salt/modules/win_lgpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index ee0dbbad6d..766f9162be 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -5401,4 +5401,4 @@ def set_(computer_policy=None, user_policy=None, return True else: msg = 'You have to specify something!' - raise SaltInvocationError(msg) \ No newline at end of file + raise SaltInvocationError(msg) From ed97cff5f6b787c0895e71f842df1da9017f098d Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 22 Aug 2017 16:59:22 -0600 Subject: [PATCH 146/639] Fix `unit.utils.test_which` for Windows This test wasn't really written with Windows in mind. Uses PATHEXT that actually resembles a Windows environment. The test value has the correct path seperator for Windows. --- tests/unit/utils/test_which.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/unit/utils/test_which.py b/tests/unit/utils/test_which.py index 9ab674791d..6bb4cf6e1a 100644 --- a/tests/unit/utils/test_which.py +++ b/tests/unit/utils/test_which.py @@ -44,18 +44,21 @@ class TestWhich(TestCase): # The second, iterating through $PATH, should also return False, # still checking for Linux False, + # We will now also return False once so we get a .EXE back from + # the function, see PATHEXT below. + False, # Lastly return True, this is the windows check. True ] # Let's patch os.environ to provide a custom PATH variable - with patch.dict(os.environ, {'PATH': '/bin'}): + with patch.dict(os.environ, {'PATH': '/bin', + 'PATHEXT': '.COM;.EXE;.BAT;.CMD'}): # Let's also patch is_windows to return True with patch('salt.utils.is_windows', lambda: True): with patch('os.path.isfile', lambda x: True): self.assertEqual( salt.utils.which('this-binary-exists-under-windows'), - # The returned path should return the .exe suffix - '/bin/this-binary-exists-under-windows.EXE' + os.path.join('/bin', 'this-binary-exists-under-windows.EXE') ) def test_missing_binary_in_windows(self): @@ -106,6 +109,5 @@ class TestWhich(TestCase): with patch('os.path.isfile', lambda x: True): self.assertEqual( salt.utils.which('this-binary-exists-under-windows'), - # The returned path should return the .exe suffix - '/bin/this-binary-exists-under-windows.CMD' + os.path.join('/bin', 'this-binary-exists-under-windows.CMD') ) From a6b379f542fdbc6a3aeca6b1be3148db91b7e08b Mon Sep 17 00:00:00 2001 From: "John (JJ) Jawed" Date: Tue, 22 Aug 2017 23:11:39 -0700 Subject: [PATCH 147/639] Add distributed as a master selection strategy --- salt/config/__init__.py | 7 ++++--- salt/daemons/flo/core.py | 2 +- salt/minion.py | 26 ++++++++++++++++++++++---- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index a3c070a7e6..ce0183cec5 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -111,9 +111,10 @@ VALID_OPTS = { 'master_port': (six.string_types, int), # The behaviour of the minion when connecting to a master. Can specify 'failover', - # 'disable' or 'func'. If 'func' is specified, the 'master' option should be set to an - # exec module function to run to determine the master hostname. If 'disable' is specified - # the minion will run, but will not try to connect to a master. + # 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be + # set to an exec module function to run to determine the master hostname. If 'disable' is + # specified the minion will run, but will not try to connect to a master. If 'distributed' + # is specified the minion will try to deterministically pick a master based on its' id. 'master_type': str, # Specify the format in which the master address will be specified. Can diff --git a/salt/daemons/flo/core.py b/salt/daemons/flo/core.py index 91f1e1b6b4..1a11e08aed 100644 --- a/salt/daemons/flo/core.py +++ b/salt/daemons/flo/core.py @@ -400,7 +400,7 @@ class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed): kind=kinds.applKinds.master)) except gaierror as ex: log.warning("Unable to connect to master {0}: {1}".format(mha, ex)) - if self.opts.value.get('master_type') != 'failover': + if self.opts.value.get(u'master_type') not in (u'failover', u'distributed'): raise ex if not stack.remotes: raise ex diff --git a/salt/minion.py b/salt/minion.py index 5713a0edb6..5894873ba0 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -21,6 +21,7 @@ import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack +from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin @@ -443,13 +444,30 @@ class MinionBase(object): if opts[u'master_type'] == u'func': eval_master_func(opts) - # if failover is set, master has to be of type list - elif opts[u'master_type'] == u'failover': + # if failover or distributed is set, master has to be of type list + elif opts[u'master_type'] in (u'failover', u'distributed'): if isinstance(opts[u'master'], list): log.info( u'Got list of available master addresses: %s', opts[u'master'] ) + + if opts[u'master_type'] == u'distributed': + master_len = len(opts[u'master']) + if master_len > 1: + secondary_masters = opts[u'master'][1:] + master_idx = crc32(opts[u'id']) % master_len + try: + preferred_masters = opts[u'master'] + preferred_masters[0] = opts[u'master'][master_idx] + preferred_masters[1:] = [m for m in opts[u'master'] if m != preferred_masters[0]] + opts[u'master'] = preferred_masters + log.info(u'Distributed to the master at \'{0}\'.'.format(opts[u'master'][0])) + except (KeyError, AttributeError, TypeError): + log.warning(u'Failed to distribute to a specific master.') + else: + log.warning(u'master_type = distributed needs more than 1 master.') + if opts[u'master_shuffle']: if opts[u'master_failback']: secondary_masters = opts[u'master'][1:] @@ -497,7 +515,7 @@ class MinionBase(object): sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details - if opts[u'retry_dns']: + if opts[u'retry_dns'] and opts[u'master_type'] == u'failover': msg = (u'\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' u'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) @@ -845,7 +863,7 @@ class MinionManager(MinionBase): Spawn all the coroutines which will sign in to masters ''' masters = self.opts[u'master'] - if self.opts[u'master_type'] == u'failover' or not isinstance(self.opts[u'master'], list): + if (self.opts[u'master_type'] in (u'failover', u'distributed')) or not isinstance(self.opts[u'master'], list): masters = [masters] for master in masters: From f4f32421abd86fcc1b21c36dcd3d0f9d92abf4b2 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Wed, 23 Aug 2017 10:03:44 +0300 Subject: [PATCH 148/639] Fix plint errors Fix plint errors --- salt/states/win_iis.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index 74507c683c..38d2ed9ae4 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -481,7 +481,6 @@ def container_setting(name, container, settings=None): :param str container: The type of IIS container. The container types are: AppPools, Sites, SslBindings :param str settings: A dictionary of the setting names and their values. - Example of usage for the ``AppPools`` container: .. code-block:: yaml @@ -533,7 +532,7 @@ def container_setting(name, container, settings=None): settings=settings.keys()) for setting in settings: # map identity type from numeric to string for comparing - if (setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys()): + if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): settings[setting] = identityType_map2string[settings[setting]] if str(settings[setting]) != str(current_settings[setting]): From 290d7b54af1926f8fb977d4613897ed3e7081ce4 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Wed, 23 Aug 2017 10:05:17 +0300 Subject: [PATCH 149/639] Fix plint errors Fix plint errors --- salt/modules/win_iis.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index 80bba6c438..38217b07a5 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -1290,7 +1290,7 @@ def set_container_setting(name, container, settings): value = "'{0}'".format(settings[setting]) # Map to numeric to support server 2008 - if (setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys()): + if setting == 'processModel.identityType' and settings[setting] in identityType_map2numeric.keys(): value = identityType_map2numeric[settings[setting]] ps_cmd.extend(['Set-ItemProperty', @@ -1313,7 +1313,7 @@ def set_container_setting(name, container, settings): for setting in settings: # map identity type from numeric to string for comparing - if (setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys()): + if setting == 'processModel.identityType' and settings[setting] in identityType_map2string.keys(): settings[setting] = identityType_map2string[settings[setting]] if str(settings[setting]) != str(new_settings[setting]): From 330d989b462d1b311f9ca1c485bdc5b5e2a0e60c Mon Sep 17 00:00:00 2001 From: Denys Havrysh Date: Wed, 23 Aug 2017 11:28:02 +0300 Subject: [PATCH 150/639] Import util modules on Master like it works on Minion --- conf/master | 3 +++ doc/ref/configuration/master.rst | 16 ++++++++++++++++ doc/topics/utils/index.rst | 4 ++++ salt/config/__init__.py | 18 +++++++++++++++--- 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/conf/master b/conf/master index 3aa9742651..3eb7e1357f 100644 --- a/conf/master +++ b/conf/master @@ -533,6 +533,9 @@ # Add any additional locations to look for master runners: #runner_dirs: [] +# Add any additional locations to look for master utils: +#utils_dirs: [] + # Enable Cython for master side modules: #cython_enable: False diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index d78d0ef5ec..655eb74e56 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -1731,6 +1731,22 @@ Set additional directories to search for runner modules. runner_dirs: - /var/lib/salt/runners +.. conf_master:: utils_dirs + +``utils_dirs`` +--------------- + +.. versionadded:: Oxygen + +Default: ``[]`` + +Set additional directories to search for util modules. + +.. code-block:: yaml + + utils_dirs: + - /var/lib/salt/utils + .. conf_master:: cython_enable ``cython_enable`` diff --git a/doc/topics/utils/index.rst b/doc/topics/utils/index.rst index 44380f3541..7f08325f94 100644 --- a/doc/topics/utils/index.rst +++ b/doc/topics/utils/index.rst @@ -81,6 +81,10 @@ the ``foo`` utility module with a ``__virtual__`` function. def bar(): return 'baz' +.. versionadded:: Oxygen + Instantiating objects from classes declared in util modules works with + Master side modules, such as Runners, Outputters, etc. + Also you could even write your utility modules in object oriented fashion: .. code-block:: python diff --git a/salt/config/__init__.py b/salt/config/__init__.py index ee4649455f..593828af58 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -1473,8 +1473,9 @@ DEFAULT_MASTER_OPTS = { 'syndic_forward_all_events': False, 'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'), 'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'), - 'runner_dirs': [], 'outputter_dirs': [], + 'runner_dirs': [], + 'utils_dirs': [], 'client_acl_verify': True, 'publisher_acl': {}, 'publisher_acl_blacklist': {}, @@ -3601,12 +3602,23 @@ def apply_master_config(overrides=None, defaults=None): if len(opts['sock_dir']) > len(opts['cachedir']) + 10: opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix') + opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens') + opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics') + # Make sure ext_mods gets set if it is an untrue value + # (here to catch older bad configs) opts['extension_modules'] = ( opts.get('extension_modules') or os.path.join(opts['cachedir'], 'extmods') ) - opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens') - opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics') + # Set up the utils_dirs location from the extension_modules location + opts['utils_dirs'] = ( + opts.get('utils_dirs') or + [os.path.join(opts['extension_modules'], 'utils')] + ) + + # Insert all 'utils_dirs' directories to the system path + insert_system_path(opts, opts['utils_dirs']) + if (overrides or {}).get('ipc_write_buffer', '') == 'dynamic': opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER if 'ipc_write_buffer' not in overrides: From efc1c8c506c47e9240acc4d928cd590e3d14f4f0 Mon Sep 17 00:00:00 2001 From: Andreas Thienemann Date: Sat, 19 Aug 2017 00:19:25 +0200 Subject: [PATCH 151/639] Mark selinux._filetype_id_to_string as public function _filetype_id_to_string is a private function in the selinux module. The selinux state module calls the function as filetype_id_to_string which fails of course. Rename the function from the private one to a public one to make the state call work. Resolves #42505. --- salt/modules/selinux.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py index 8a060e418e..d227b12eb4 100644 --- a/salt/modules/selinux.py +++ b/salt/modules/selinux.py @@ -403,7 +403,7 @@ def _context_string_to_dict(context): return ret -def _filetype_id_to_string(filetype='a'): +def filetype_id_to_string(filetype='a'): ''' Translates SELinux filetype single-letter representation to a more human-readable version (which is also used in `semanage fcontext -l`). @@ -444,7 +444,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l 'sel_role': '[^:]+', # se_role for file context is always object_r 'sel_type': sel_type or '[^:]+', 'sel_level': sel_level or '[^:]+'} - cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else _filetype_id_to_string(filetype) + cmd_kwargs['filetype'] = '[[:alpha:] ]+' if filetype is None else filetype_id_to_string(filetype) cmd = 'semanage fcontext -l | egrep ' + \ "'^{filespec}{spacer}{filetype}{spacer}{sel_user}:{sel_role}:{sel_type}:{sel_level}$'".format(**cmd_kwargs) current_entry_text = __salt__['cmd.shell'](cmd) From 484512ebe9532ddfda587b4ae3b542b3f2e700ce Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 23 Aug 2017 10:37:18 -0400 Subject: [PATCH 152/639] Fix versionadded reference for new eauth token modularity The version should be `Oxygen`, not `2017.7.2`. Refs #42720 --- salt/runners/saltutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/runners/saltutil.py b/salt/runners/saltutil.py index 810abfc667..b691f827e1 100644 --- a/salt/runners/saltutil.py +++ b/salt/runners/saltutil.py @@ -529,7 +529,7 @@ def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): ''' - .. versionadded:: 2017.7.2 + .. versionadded:: Oxygen Sync eauth token modules from ``salt://_tokens`` to the master From c5841e2ade59c3c24bf8f9a8310f0c544373b9ac Mon Sep 17 00:00:00 2001 From: Ushmodin Nikolay Date: Wed, 23 Aug 2017 22:43:04 +0700 Subject: [PATCH 153/639] state.sls hangs on file.recurse with clean: True on windows --- salt/states/file.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index 55cec4604e..128fc7165a 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -446,7 +446,11 @@ def _clean_dir(root, keep, exclude_pat): while True: fn_ = os.path.dirname(fn_) real_keep.add(fn_) - if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\\\'])]: + if fn_ in [ + os.sep, + ''.join([os.path.splitdrive(fn_)[0], os.sep]), + ''.join([os.path.splitdrive(fn_)[0], os.sep, os.sep]) + ]: break def _delete_not_kept(nfn): From f232bed9f98cd9c9623f8be687f6824fade9e027 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Mon, 17 Jul 2017 14:55:30 -0500 Subject: [PATCH 154/639] add additional checks for ADM policies that have the same ADMX policy ID (#42279) --- salt/modules/win_lgpo.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 21f855e3c2..cbb92e99cc 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4597,11 +4597,35 @@ def _lookup_admin_template(policy_name, if adml_search_results: multiple_adml_entries = False suggested_policies = '' + adml_to_remove = [] if len(adml_search_results) > 1: multiple_adml_entries = True for adml_search_result in adml_search_results: if not getattr(adml_search_result, 'text', '').strip() == policy_name: - adml_search_results.remove(adml_search_result) + adml_to_remove.append(adml_search_result) + if hierarchy: + display_name_searchval = '$({0}.{1})'.format( + adml_search_result.tag.split('}')[1], + adml_search_result.attrib['id']) + policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( + adml_search_result.prefix, + display_name_searchval, + policy_class) + # this should only be 1 result + admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) + for search_result in admx_search_results: + this_hierarchy = _build_parent_list(search_result, + admx_policy_definitions, + True, + adml_policy_resources) + this_hierarchy.reverse() + if hierarchy != this_hierarchy: + adml_to_remove.append(adml_search_result) + for adml in adml_to_remove: + if adml in adml_search_results: + adml_search_results.remove(adml) + if len(adml_search_results) == 1 and multiple_adml_entries: + multiple_adml_entries = False for adml_search_result in adml_search_results: dmsg = 'found an ADML entry matching the string! {0} -- {1}' log.debug(dmsg.format(adml_search_result.tag, From 61bd12c0de5bba5b506c716d03927f820c3b7dad Mon Sep 17 00:00:00 2001 From: lomeroe Date: Tue, 22 Aug 2017 09:04:31 -0500 Subject: [PATCH 155/639] track xml namespace to ensure policies w/duplicate IDs or Names do not conflict --- salt/modules/win_lgpo.py | 827 +++++++++++++++++++++------------------ 1 file changed, 446 insertions(+), 381 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index cbb92e99cc..ed556c4fc5 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -3510,22 +3510,31 @@ def _checkAllAdmxPolicies(policy_class, not_configured_policies.remove(policy_item) for not_configured_policy in not_configured_policies: - policy_vals[not_configured_policy.attrib['name']] = 'Not Configured' + not_configured_policy_namespace = not_configured_policy.nsmap[not_configured_policy.prefix] + if not_configured_policy_namespace not in policy_vals: + policy_vals[not_configured_policy_namespace] = {} + policy_vals[not_configured_policy_namespace][not_configured_policy.attrib['name']] = 'Not Configured' if return_full_policy_names: - full_names[not_configured_policy.attrib['name']] = _getFullPolicyName( + if not_configured_policy_namespace not in full_names: + full_names[not_configured_policy_namespace] = {} + full_names[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _getFullPolicyName( not_configured_policy, not_configured_policy.attrib['name'], return_full_policy_names, adml_policy_resources) log.debug('building hierarchy for non-configured item {0}'.format(not_configured_policy.attrib['name'])) - hierarchy[not_configured_policy.attrib['name']] = _build_parent_list(not_configured_policy, - admx_policy_definitions, - return_full_policy_names, - adml_policy_resources) + if not_configured_policy_namespace not in hierarchy: + hierarchy[not_configured_policy_namespace] = {} + hierarchy[not_configured_policy_namespace][not_configured_policy.attrib['name']] = _build_parent_list( + not_configured_policy, + admx_policy_definitions, + return_full_policy_names, + adml_policy_resources) for admx_policy in admx_policies: this_key = None this_valuename = None this_policyname = None + this_policynamespace = None this_policy_setting = 'Not Configured' element_only_enabled_disabled = True explicit_enable_disable_value_setting = False @@ -3544,6 +3553,7 @@ def _checkAllAdmxPolicies(policy_class, log.error('policy item {0} does not have the required "name" ' 'attribute'.format(admx_policy.attrib)) break + this_policynamespace = admx_policy.nsmap[admx_policy.prefix] if ENABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3555,7 +3565,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_VALUE_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True @@ -3567,21 +3579,27 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ENABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, ENABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if DISABLED_LIST_XPATH(admx_policy) and this_policy_setting == 'Not Configured': element_only_enabled_disabled = False explicit_enable_disable_value_setting = True if _checkListItem(admx_policy, this_policyname, this_key, DISABLED_LIST_XPATH, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if not explicit_enable_disable_value_setting and this_valuename: # the policy has a key/valuename but no explicit enabled/Disabled @@ -3594,7 +3612,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Enabled' log.debug('{0} is enabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting elif _regexSearchRegPolData(re.escape(_buildKnownDataSearchString(this_key, this_valuename, 'REG_DWORD', @@ -3603,7 +3623,9 @@ def _checkAllAdmxPolicies(policy_class, policy_filedata): this_policy_setting = 'Disabled' log.debug('{0} is disabled'.format(this_policyname)) - policy_vals[this_policyname] = this_policy_setting + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = this_policy_setting if ELEMENTS_XPATH(admx_policy): if element_only_enabled_disabled or this_policy_setting == 'Enabled': @@ -3801,65 +3823,84 @@ def _checkAllAdmxPolicies(policy_class, and len(configured_elements.keys()) == len(required_elements.keys()): if policy_disabled_elements == len(required_elements.keys()): log.debug('{0} is disabled by all enum elements'.format(this_policyname)) - policy_vals[this_policyname] = 'Disabled' + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = 'Disabled' else: - policy_vals[this_policyname] = configured_elements + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements log.debug('{0} is enabled by enum elements'.format(this_policyname)) else: if this_policy_setting == 'Enabled': - policy_vals[this_policyname] = configured_elements - if return_full_policy_names and this_policyname in policy_vals: - full_names[this_policyname] = _getFullPolicyName( + if this_policynamespace not in policy_vals: + policy_vals[this_policynamespace] = {} + policy_vals[this_policynamespace][this_policyname] = configured_elements + if return_full_policy_names and this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in full_names: + full_names[this_policynamespace] = {} + full_names[this_policynamespace][this_policyname] = _getFullPolicyName( admx_policy, admx_policy.attrib['name'], return_full_policy_names, adml_policy_resources) - if this_policyname in policy_vals: - hierarchy[this_policyname] = _build_parent_list(admx_policy, + if this_policynamespace in policy_vals and this_policyname in policy_vals[this_policynamespace]: + if this_policynamespace not in hierarchy: + hierarchy[this_policynamespace] = {} + hierarchy[this_policynamespace][this_policyname] = _build_parent_list(admx_policy, admx_policy_definitions, return_full_policy_names, adml_policy_resources) if policy_vals and return_full_policy_names and not hierarchical_return: unpathed_dict = {} pathed_dict = {} - for policy_item in policy_vals.keys(): - if full_names[policy_item] in policy_vals: - # add this item with the path'd full name - full_path_list = hierarchy[policy_item] + for policy_namespace in list(policy_vals): + for policy_item in list(policy_vals[policy_namespace]): + if full_names[policy_namespace][policy_item] in policy_vals[policy_namespace]: + # add this item with the path'd full name + full_path_list = hierarchy[policy_namespace][policy_item] + full_path_list.reverse() + full_path_list.append(full_names[policy_namespace][policy_item]) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(policy_item) + pathed_dict[full_names[policy_namespace][policy_item]] = True + else: + policy_vals[policy_namespace][full_names[policy_namespace][policy_item]] = policy_vals[policy_namespace].pop(policy_item) + if policy_namespace not in unpathed_dict: + unpathed_dict[policy_namespace] = {} + unpathed_dict[policy_namespace][full_names[policy_namespace][policy_item]] = policy_item + # go back and remove any "unpathed" policies that need a full path + for path_needed in unpathed_dict[policy_namespace]: + # remove the item with the same full name and re-add it w/a path'd version + full_path_list = hierarchy[policy_namespace][unpathed_dict[policy_namespace][path_needed]] full_path_list.reverse() - full_path_list.append(full_names[policy_item]) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(policy_item) - pathed_dict[full_names[policy_item]] = True - else: - policy_vals[full_names[policy_item]] = policy_vals.pop(policy_item) - unpathed_dict[full_names[policy_item]] = policy_item - # go back and remove any "unpathed" policies that need a full path - for path_needed in unpathed_dict.keys(): - # remove the item with the same full name and re-add it w/a path'd version - full_path_list = hierarchy[unpathed_dict[path_needed]] - full_path_list.reverse() - full_path_list.append(path_needed) - log.debug('full_path_list == {0}'.format(full_path_list)) - policy_vals['\\'.join(full_path_list)] = policy_vals.pop(path_needed) + full_path_list.append(path_needed) + log.debug('full_path_list == {0}'.format(full_path_list)) + policy_vals['\\'.join(full_path_list)] = policy_vals[policy_namespace].pop(path_needed) + for policy_namespace in list(policy_vals): + if policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) if policy_vals and hierarchical_return: if hierarchy: - for hierarchy_item in hierarchy.keys(): - if hierarchy_item in policy_vals: - tdict = {} - first_item = True - for item in hierarchy[hierarchy_item]: - newdict = {} - if first_item: - h_policy_name = hierarchy_item - if return_full_policy_names: - h_policy_name = full_names[hierarchy_item] - newdict[item] = {h_policy_name: policy_vals.pop(hierarchy_item)} - first_item = False - else: - newdict[item] = tdict - tdict = newdict - if tdict: - policy_vals = dictupdate.update(policy_vals, tdict) + for policy_namespace in hierarchy: + for hierarchy_item in hierarchy[policy_namespace]: + if hierarchy_item in policy_vals[policy_namespace]: + tdict = {} + first_item = True + for item in hierarchy[policy_namespace][hierarchy_item]: + newdict = {} + if first_item: + h_policy_name = hierarchy_item + if return_full_policy_names: + h_policy_name = full_names[policy_namespace][hierarchy_item] + newdict[item] = {h_policy_name: policy_vals[policy_namespace].pop(hierarchy_item)} + first_item = False + else: + newdict[item] = tdict + tdict = newdict + if tdict: + policy_vals = dictupdate.update(policy_vals, tdict) + if policy_namespace in policy_vals and policy_vals[policy_namespace] == {}: + policy_vals.pop(policy_namespace) policy_vals = { module_policy_data.admx_registry_classes[policy_class]['lgpo_section']: { 'Administrative Templates': policy_vals @@ -4123,6 +4164,7 @@ def _policyFileReplaceOrAppend(this_string, policy_data, append_only=False): def _writeAdminTemplateRegPolFile(admtemplate_data, + admtemplate_namespace_data, admx_policy_definitions=None, adml_policy_resources=None, display_language='en-US', @@ -4139,7 +4181,9 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - policySearchXpath = etree.XPath('//*[@*[local-name() = "id"] = $id or @*[local-name() = "name"] = $id]') + #//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ] + #policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]') + policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( @@ -4151,298 +4195,305 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, hierarchical_return=False, return_not_configured=False) log.debug('preparing to loop through policies requested to be configured') - for adm_policy in admtemplate_data.keys(): - if str(admtemplate_data[adm_policy]).lower() == 'not configured': - if adm_policy in base_policy_settings: - base_policy_settings.pop(adm_policy) - else: - log.debug('adding {0} to base_policy_settings'.format(adm_policy)) - base_policy_settings[adm_policy] = admtemplate_data[adm_policy] - for admPolicy in base_policy_settings.keys(): - log.debug('working on admPolicy {0}'.format(admPolicy)) - explicit_enable_disable_value_setting = False - this_key = None - this_valuename = None - if str(base_policy_settings[admPolicy]).lower() == 'disabled': - log.debug('time to disable {0}'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - if DISABLED_VALUE_XPATH(this_policy): - # set the disabled value in the registry.pol file - explicit_enable_disable_value_setting = True - disabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - DISABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if DISABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - disabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - DISABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with disabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + for adm_namespace in admtemplate_data: + for adm_policy in admtemplate_data[adm_namespace]: + if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured': + if adm_policy in base_policy_settings[adm_namespace]: + base_policy_settings[adm_namespace].pop(adm_policy) + else: + log.debug('adding {0} to base_policy_settings'.format(adm_policy)) + if adm_namespace not in base_policy_settings: + base_policy_settings[adm_namespace] = {} + base_policy_settings[adm_namespace][adm_policy] = admtemplate_data[adm_namespace][adm_policy] + for adm_namespace in base_policy_settings: + for admPolicy in base_policy_settings[adm_namespace]: + log.debug('working on admPolicy {0}'.format(admPolicy)) + explicit_enable_disable_value_setting = False + this_key = None + this_valuename = None + if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': + log.debug('time to disable {0}'.format(admPolicy)) + #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace}) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + if DISABLED_VALUE_XPATH(this_policy): + # set the disabled value in the registry.pol file + explicit_enable_disable_value_setting = True + disabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + DISABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - disabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - None, - check_deleted=True) - existing_data = _policyFileReplaceOrAppend(disabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - log.debug('checking elements of {0}'.format(admPolicy)) - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if etree.QName(child_item).localname == 'boolean' \ - and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - # WARNING: no OOB adm files use true/falseList items - # this has not been fully vetted - temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} - for this_list in temp_dict.keys(): - disabled_list_strings = _checkListItem( - child_item, - admPolicy, - child_key, - temp_dict[this_list], - None, - test_items=False) - log.debug('working with {1} portion of {0}'.format( - admPolicy, - this_list)) - existing_data = _policyFileReplaceOrAppendList( - disabled_list_strings, - existing_data) - elif etree.QName(child_item).localname == 'boolean' \ - or etree.QName(child_item).localname == 'decimal' \ - or etree.QName(child_item).localname == 'text' \ - or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText' \ - or etree.QName(child_item).localname == 'enum': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - elif etree.QName(child_item).localname == 'list': - disabled_value_string = _processValueItem(child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=True) - msg = 'I have disabled value string of {0}' - log.debug(msg.format(disabled_value_string)) - existing_data = _policyFileReplaceOrAppend( - disabled_value_string, - existing_data) - else: - msg = 'policy {0} was found but it does not appear to be valid for the class {1}' - log.error(msg.format(admPolicy, registry_class)) - else: - msg = 'policy item {0} does not have the requried "class" attribute' - log.error(msg.format(this_policy.attrib)) - else: - log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy) - if this_policy: - this_policy = this_policy[0] - if 'class' in this_policy.attrib: - if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': - if 'key' in this_policy.attrib: - this_key = this_policy.attrib['key'] - else: - msg = 'policy item {0} does not have the required "key" attribute' - log.error(msg.format(this_policy.attrib)) - break - if 'valueName' in this_policy.attrib: - this_valuename = this_policy.attrib['valueName'] - - if ENABLED_VALUE_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_value_string = _checkValueItemParent(this_policy, - admPolicy, - this_key, - this_valuename, - ENABLED_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ENABLED_LIST_XPATH(this_policy): - explicit_enable_disable_value_setting = True - enabled_list_strings = _checkListItem(this_policy, - admPolicy, - this_key, - ENABLED_LIST_XPATH, - None, - test_items=False) - log.debug('working with enabledList portion of {0}'.format(admPolicy)) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - if not explicit_enable_disable_value_setting and this_valuename: - enabled_value_string = _buildKnownDataSearchString(this_key, - this_valuename, - 'REG_DWORD', - '1', - check_deleted=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if ELEMENTS_XPATH(this_policy): - for elements_item in ELEMENTS_XPATH(this_policy): - for child_item in elements_item.getchildren(): - child_key = this_key - child_valuename = this_valuename - if 'key' in child_item.attrib: - child_key = child_item.attrib['key'] - if 'valueName' in child_item.attrib: - child_valuename = child_item.attrib['valueName'] - if child_item.attrib['id'] in base_policy_settings[admPolicy]: - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): - list_strings = [] - if base_policy_settings[admPolicy][child_item.attrib['id']]: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - TRUE_LIST_XPATH, - None, - test_items=False) - log.debug('working with trueList portion of {0}'.format(admPolicy)) - else: - list_strings = _checkListItem(child_item, - admPolicy, - child_key, - FALSE_LIST_XPATH, - None, - test_items=False) - existing_data = _policyFileReplaceOrAppendList( - list_strings, - existing_data) - if etree.QName(child_item).localname == 'boolean' and ( - TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): - value_string = '' - if base_policy_settings[admPolicy][child_item.attrib['id']]: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - TRUE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - else: - value_string = _checkValueItemParent(child_item, - admPolicy, - child_key, - child_valuename, - FALSE_VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - value_string, - existing_data) + if DISABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + disabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + DISABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with disabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList(disabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + disabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + None, + check_deleted=True) + existing_data = _policyFileReplaceOrAppend(disabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + log.debug('checking elements of {0}'.format(admPolicy)) + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] if etree.QName(child_item).localname == 'boolean' \ + and (TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + # WARNING: no OOB adm files use true/falseList items + # this has not been fully vetted + temp_dict = {'trueList': TRUE_LIST_XPATH, 'falseList': FALSE_LIST_XPATH} + for this_list in temp_dict: + disabled_list_strings = _checkListItem( + child_item, + admPolicy, + child_key, + temp_dict[this_list], + None, + test_items=False) + log.debug('working with {1} portion of {0}'.format( + admPolicy, + this_list)) + existing_data = _policyFileReplaceOrAppendList( + disabled_list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ or etree.QName(child_item).localname == 'decimal' \ or etree.QName(child_item).localname == 'text' \ or etree.QName(child_item).localname == 'longDecimal' \ - or etree.QName(child_item).localname == 'multiText': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + or etree.QName(child_item).localname == 'multiText' \ + or etree.QName(child_item).localname == 'enum': + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, + disabled_value_string, existing_data) - elif etree.QName(child_item).localname == 'enum': - for enum_item in child_item.getchildren(): - if base_policy_settings[admPolicy][child_item.attrib['id']] == \ - _getAdmlDisplayName(adml_policy_resources, - enum_item.attrib['displayName'] - ).strip(): - enabled_value_string = _checkValueItemParent( - enum_item, - child_item.attrib['id'], - child_key, - child_valuename, - VALUE_XPATH, - None, - check_deleted=False, - test_item=False) - existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data) - if VALUE_LIST_XPATH(enum_item): - enabled_list_strings = _checkListItem(enum_item, - admPolicy, - child_key, - VALUE_LIST_XPATH, - None, - test_items=False) - msg = 'working with valueList portion of {0}' - log.debug(msg.format(child_item.attrib['id'])) - existing_data = _policyFileReplaceOrAppendList( - enabled_list_strings, - existing_data) - break elif etree.QName(child_item).localname == 'list': - enabled_value_string = _processValueItem( - child_item, - child_key, - child_valuename, - this_policy, - elements_item, - check_deleted=False, - this_element_value=base_policy_settings[admPolicy][child_item.attrib['id']]) - msg = 'I have enabled value string of {0}' - log.debug(msg.format([enabled_value_string])) + disabled_value_string = _processValueItem(child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=True) + msg = 'I have disabled value string of {0}' + log.debug(msg.format(disabled_value_string)) existing_data = _policyFileReplaceOrAppend( - enabled_value_string, - existing_data, - append_only=True) + disabled_value_string, + existing_data) + else: + msg = 'policy {0} was found but it does not appear to be valid for the class {1}' + log.error(msg.format(admPolicy, registry_class)) + else: + msg = 'policy item {0} does not have the requried "class" attribute' + log.error(msg.format(this_policy.attrib)) + else: + log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) + #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) + log.debug('found this_policy == {0}'.format(this_policy)) + if this_policy: + this_policy = this_policy[0] + if 'class' in this_policy.attrib: + if this_policy.attrib['class'] == registry_class or this_policy.attrib['class'] == 'Both': + if 'key' in this_policy.attrib: + this_key = this_policy.attrib['key'] + else: + msg = 'policy item {0} does not have the required "key" attribute' + log.error(msg.format(this_policy.attrib)) + break + if 'valueName' in this_policy.attrib: + this_valuename = this_policy.attrib['valueName'] + + if ENABLED_VALUE_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_value_string = _checkValueItemParent(this_policy, + admPolicy, + this_key, + this_valuename, + ENABLED_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ENABLED_LIST_XPATH(this_policy): + explicit_enable_disable_value_setting = True + enabled_list_strings = _checkListItem(this_policy, + admPolicy, + this_key, + ENABLED_LIST_XPATH, + None, + test_items=False) + log.debug('working with enabledList portion of {0}'.format(admPolicy)) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + if not explicit_enable_disable_value_setting and this_valuename: + enabled_value_string = _buildKnownDataSearchString(this_key, + this_valuename, + 'REG_DWORD', + '1', + check_deleted=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if ELEMENTS_XPATH(this_policy): + for elements_item in ELEMENTS_XPATH(this_policy): + for child_item in elements_item.getchildren(): + child_key = this_key + child_valuename = this_valuename + if 'key' in child_item.attrib: + child_key = child_item.attrib['key'] + if 'valueName' in child_item.attrib: + child_valuename = child_item.attrib['valueName'] + if child_item.attrib['id'] in base_policy_settings[adm_namespace][admPolicy]: + if etree.QName(child_item).localname == 'boolean' and ( + TRUE_LIST_XPATH(child_item) or FALSE_LIST_XPATH(child_item)): + list_strings = [] + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + TRUE_LIST_XPATH, + None, + test_items=False) + log.debug('working with trueList portion of {0}'.format(admPolicy)) + else: + list_strings = _checkListItem(child_item, + admPolicy, + child_key, + FALSE_LIST_XPATH, + None, + test_items=False) + existing_data = _policyFileReplaceOrAppendList( + list_strings, + existing_data) + elif etree.QName(child_item).localname == 'boolean' and ( + TRUE_VALUE_XPATH(child_item) or FALSE_VALUE_XPATH(child_item)): + value_string = '' + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + TRUE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + else: + value_string = _checkValueItemParent(child_item, + admPolicy, + child_key, + child_valuename, + FALSE_VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + value_string, + existing_data) + elif etree.QName(child_item).localname == 'boolean' \ + or etree.QName(child_item).localname == 'decimal' \ + or etree.QName(child_item).localname == 'text' \ + or etree.QName(child_item).localname == 'longDecimal' \ + or etree.QName(child_item).localname == 'multiText': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + elif etree.QName(child_item).localname == 'enum': + for enum_item in child_item.getchildren(): + if base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']] == \ + _getAdmlDisplayName(adml_policy_resources, + enum_item.attrib['displayName'] + ).strip(): + enabled_value_string = _checkValueItemParent( + enum_item, + child_item.attrib['id'], + child_key, + child_valuename, + VALUE_XPATH, + None, + check_deleted=False, + test_item=False) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data) + if VALUE_LIST_XPATH(enum_item): + enabled_list_strings = _checkListItem(enum_item, + admPolicy, + child_key, + VALUE_LIST_XPATH, + None, + test_items=False) + msg = 'working with valueList portion of {0}' + log.debug(msg.format(child_item.attrib['id'])) + existing_data = _policyFileReplaceOrAppendList( + enabled_list_strings, + existing_data) + break + elif etree.QName(child_item).localname == 'list': + enabled_value_string = _processValueItem( + child_item, + child_key, + child_valuename, + this_policy, + elements_item, + check_deleted=False, + this_element_value=base_policy_settings[adm_namespace][admPolicy][child_item.attrib['id']]) + msg = 'I have enabled value string of {0}' + log.debug(msg.format([enabled_value_string])) + existing_data = _policyFileReplaceOrAppend( + enabled_value_string, + existing_data, + append_only=True) _write_regpol_data(existing_data, policy_data.admx_registry_classes[registry_class]['policy_path'], policy_data.gpt_ini_path, @@ -4558,6 +4609,7 @@ def _lookup_admin_template(policy_name, if admx_policy_definitions is None or adml_policy_resources is None: admx_policy_definitions, adml_policy_resources = _processPolicyDefinitions( display_language=adml_language) + admx_search_results = [] admx_search_results = ADMX_SEARCH_XPATH(admx_policy_definitions, policy_name=policy_name, registry_class=policy_class) @@ -4603,24 +4655,31 @@ def _lookup_admin_template(policy_name, for adml_search_result in adml_search_results: if not getattr(adml_search_result, 'text', '').strip() == policy_name: adml_to_remove.append(adml_search_result) - if hierarchy: - display_name_searchval = '$({0}.{1})'.format( - adml_search_result.tag.split('}')[1], - adml_search_result.attrib['id']) - policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( - adml_search_result.prefix, - display_name_searchval, - policy_class) - # this should only be 1 result - admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) - for search_result in admx_search_results: - this_hierarchy = _build_parent_list(search_result, - admx_policy_definitions, - True, - adml_policy_resources) - this_hierarchy.reverse() - if hierarchy != this_hierarchy: - adml_to_remove.append(adml_search_result) + else: + if hierarchy: + display_name_searchval = '$({0}.{1})'.format( + adml_search_result.tag.split('}')[1], + adml_search_result.attrib['id']) + #policy_search_string = '//{0}:policy[@*[local-name() = "displayName"] = "{1}" and (@*[local-name() = "class"] = "Both" or @*[local-name() = "class"] = "{2}") ]'.format( + policy_search_string = '//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ]'.format( + adml_search_result.prefix, + display_name_searchval, + policy_class) + admx_results = [] + admx_search_results = admx_policy_definitions.xpath(policy_search_string, namespaces=adml_search_result.nsmap) + for search_result in admx_search_results: + log.debug('policy_name == {0}'.format(policy_name)) + this_hierarchy = _build_parent_list(search_result, + admx_policy_definitions, + True, + adml_policy_resources) + this_hierarchy.reverse() + if hierarchy != this_hierarchy: + adml_to_remove.append(adml_search_result) + else: + admx_results.append(search_result) + if len(admx_results) == 1: + admx_search_results = admx_results for adml in adml_to_remove: if adml in adml_search_results: adml_search_results.remove(adml) @@ -4634,10 +4693,11 @@ def _lookup_admin_template(policy_name, adml_search_result.tag.split('}')[1], adml_search_result.attrib['id']) log.debug('searching for displayName == {0}'.format(display_name_searchval)) - admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( - admx_policy_definitions, - display_name=display_name_searchval, - registry_class=policy_class) + if not admx_search_results: + admx_search_results = ADMX_DISPLAYNAME_SEARCH_XPATH( + admx_policy_definitions, + display_name=display_name_searchval, + registry_class=policy_class) if admx_search_results: if len(admx_search_results) == 1 or hierarchy and not multiple_adml_entries: found = False @@ -4649,6 +4709,7 @@ def _lookup_admin_template(policy_name, True, adml_policy_resources) this_hierarchy.reverse() + log.debug('testing {0} == {1}'.format(hierarchy, this_hierarchy)) if hierarchy == this_hierarchy: found = True else: @@ -5107,6 +5168,7 @@ def set_(computer_policy=None, user_policy=None, if policies[p_class]: for policy_name in policies[p_class].keys(): _pol = None + policy_namespace = None policy_key_name = policy_name if policy_name in _policydata.policies[p_class]['policies']: _pol = _policydata.policies[p_class]['policies'][policy_name] @@ -5156,16 +5218,19 @@ def set_(computer_policy=None, user_policy=None, adml_policy_resources=admlPolicyResources) if success: policy_name = the_policy.attrib['name'] - _admTemplateData[policy_name] = _value + policy_namespace = the_policy.nsmap[the_policy.prefix] + if policy_namespace not in _admTemplateData: + _admTemplateData[policy_namespace] = {} + _admTemplateData[policy_namespace][policy_name] = _value else: raise SaltInvocationError(msg) - if policy_name in _admTemplateData and the_policy is not None: - log.debug('setting == {0}'.format(_admTemplateData[policy_name]).lower()) - log.debug('{0}'.format(str(_admTemplateData[policy_name]).lower())) - if str(_admTemplateData[policy_name]).lower() != 'disabled' \ - and str(_admTemplateData[policy_name]).lower() != 'not configured': + if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None: + log.debug('setting == {0}'.format(_admTemplateData[policy_namespace][policy_name]).lower()) + log.debug('{0}'.format(str(_admTemplateData[policy_namespace][policy_name]).lower())) + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \ + and str(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured': if ELEMENTS_XPATH(the_policy): - if isinstance(_admTemplateData[policy_name], dict): + if isinstance(_admTemplateData[policy_namespace][policy_name], dict): for elements_item in ELEMENTS_XPATH(the_policy): for child_item in elements_item.getchildren(): # check each element @@ -5176,9 +5241,9 @@ def set_(computer_policy=None, user_policy=None, True, admlPolicyResources) log.debug('id attribute == "{0}" this_element_name == "{1}"'.format(child_item.attrib['id'], this_element_name)) - if this_element_name in _admTemplateData[policy_name]: + if this_element_name in _admTemplateData[policy_namespace][policy_name]: temp_element_name = this_element_name - elif child_item.attrib['id'] in _admTemplateData[policy_name]: + elif child_item.attrib['id'] in _admTemplateData[policy_namespace][policy_name]: temp_element_name = child_item.attrib['id'] else: msg = ('Element "{0}" must be included' @@ -5186,12 +5251,12 @@ def set_(computer_policy=None, user_policy=None, raise SaltInvocationError(msg.format(this_element_name, policy_name)) if 'required' in child_item.attrib \ and child_item.attrib['required'].lower() == 'true': - if not _admTemplateData[policy_name][temp_element_name]: + if not _admTemplateData[policy_namespace][policy_name][temp_element_name]: msg = 'Element "{0}" requires a value to be specified' raise SaltInvocationError(msg.format(temp_element_name)) if etree.QName(child_item).localname == 'boolean': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], bool): msg = 'Element {0} requires a boolean True or False' raise SaltInvocationError(msg.format(temp_element_name)) @@ -5203,9 +5268,9 @@ def set_(computer_policy=None, user_policy=None, min_val = int(child_item.attrib['minValue']) if 'maxValue' in child_item.attrib: max_val = int(child_item.attrib['maxValue']) - if int(_admTemplateData[policy_name][temp_element_name]) \ + if int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ < min_val or \ - int(_admTemplateData[policy_name][temp_element_name]) \ + int(_admTemplateData[policy_namespace][policy_name][temp_element_name]) \ > max_val: msg = 'Element "{0}" value must be between {1} and {2}' raise SaltInvocationError(msg.format(temp_element_name, @@ -5215,7 +5280,7 @@ def set_(computer_policy=None, user_policy=None, # make sure the value is in the enumeration found = False for enum_item in child_item.getchildren(): - if _admTemplateData[policy_name][temp_element_name] == \ + if _admTemplateData[policy_namespace][policy_name][temp_element_name] == \ _getAdmlDisplayName( admlPolicyResources, enum_item.attrib['displayName']).strip(): @@ -5229,33 +5294,33 @@ def set_(computer_policy=None, user_policy=None, and child_item.attrib['explicitValue'].lower() == \ 'true': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], dict): msg = ('Each list item of element "{0}" ' 'requires a dict value') msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) elif etree.QName(child_item).localname == 'multiText': if not isinstance( - _admTemplateData[policy_name][temp_element_name], + _admTemplateData[policy_namespace][policy_name][temp_element_name], list): msg = 'Element "{0}" requires a list value' msg = msg.format(temp_element_name) raise SaltInvocationError(msg) - _admTemplateData[policy_name][child_item.attrib['id']] = \ - _admTemplateData[policy_name].pop(temp_element_name) + _admTemplateData[policy_namespace][policy_name][child_item.attrib['id']] = \ + _admTemplateData[policy_namespace][policy_name].pop(temp_element_name) else: msg = 'The policy "{0}" has elements which must be configured' msg = msg.format(policy_name) raise SaltInvocationError(msg) else: - if str(_admTemplateData[policy_name]).lower() != 'enabled': + if str(_admTemplateData[policy_namespace][policy_name]).lower() != 'enabled': msg = ('The policy {0} must either be "Enabled", ' '"Disabled", or "Not Configured"') msg = msg.format(policy_name) @@ -5343,4 +5408,4 @@ def set_(computer_policy=None, user_policy=None, return True else: msg = 'You have to specify something!' - raise SaltInvocationError(msg) + raise SaltInvocationError(msg) \ No newline at end of file From 2da1cdd1099bf12bb6309277d7bb970bb5076e6c Mon Sep 17 00:00:00 2001 From: lomeroe Date: Tue, 22 Aug 2017 09:53:07 -0500 Subject: [PATCH 156/639] lint fix --- salt/modules/win_lgpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index ed556c4fc5..5cdd35c8fb 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -5408,4 +5408,4 @@ def set_(computer_policy=None, user_policy=None, return True else: msg = 'You have to specify something!' - raise SaltInvocationError(msg) \ No newline at end of file + raise SaltInvocationError(msg) From acc3d7ac82b4cb52e249172c1f7e34b44627271d Mon Sep 17 00:00:00 2001 From: lomeroe Date: Wed, 23 Aug 2017 11:09:12 -0500 Subject: [PATCH 157/639] correct fopen calls from salt.utils for 2016.11's utils function --- salt/modules/win_lgpo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 5cdd35c8fb..efd76192bd 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4033,14 +4033,14 @@ def _write_regpol_data(data_to_write, reg_pol_header = u'\u5250\u6765\x01\x00' if not os.path.exists(policy_file_path): ret = __salt__['file.makedirs'](policy_file_path) - with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file: + with salt.utils.fopen(policy_file_path, 'wb') as pol_file: if not data_to_write.startswith(reg_pol_header): pol_file.write(reg_pol_header.encode('utf-16-le')) pol_file.write(data_to_write.encode('utf-16-le')) try: gpt_ini_data = '' if os.path.exists(gpt_ini_path): - with salt.utils.files.fopen(gpt_ini_path, 'rb') as gpt_file: + with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file: gpt_ini_data = gpt_file.read() if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): gpt_ini_data = '[General]\r\n' + gpt_ini_data @@ -4095,7 +4095,7 @@ def _write_regpol_data(data_to_write, int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), gpt_ini_data[general_location.end():]) if gpt_ini_data: - with salt.utils.files.fopen(gpt_ini_path, 'wb') as gpt_file: + with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file: gpt_file.write(gpt_ini_data) except Exception as e: msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( From af181b3257e2ca553564df96ce764b699ecd6559 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Wed, 23 Aug 2017 11:11:08 -0500 Subject: [PATCH 158/639] correct fopen calls from salt.utils for 2017.7 --- salt/modules/win_lgpo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 766f9162be..5edb672c90 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4026,14 +4026,14 @@ def _write_regpol_data(data_to_write, reg_pol_header = u'\u5250\u6765\x01\x00' if not os.path.exists(policy_file_path): ret = __salt__['file.makedirs'](policy_file_path) - with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file: + with salt.utils.fopen(policy_file_path, 'wb') as pol_file: if not data_to_write.startswith(reg_pol_header): pol_file.write(reg_pol_header.encode('utf-16-le')) pol_file.write(data_to_write.encode('utf-16-le')) try: gpt_ini_data = '' if os.path.exists(gpt_ini_path): - with salt.utils.files.fopen(gpt_ini_path, 'rb') as gpt_file: + with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file: gpt_ini_data = gpt_file.read() if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): gpt_ini_data = '[General]\r\n' + gpt_ini_data @@ -4088,7 +4088,7 @@ def _write_regpol_data(data_to_write, int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), gpt_ini_data[general_location.end():]) if gpt_ini_data: - with salt.utils.files.fopen(gpt_ini_path, 'wb') as gpt_file: + with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file: gpt_file.write(gpt_ini_data) except Exception as e: msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( From d5b2a0be6821d39ea2945665bd1cfce4be2b10a8 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 23 Aug 2017 11:57:00 -0500 Subject: [PATCH 159/639] Resolve image ID during container comparison This fixes an issue where inspecting the container returns an image ID instead of an image name, resulting in a spurious report of a changed image. By resolving the image down to its ID for both the existing and new containers, we ensure we're comparing ID to ID. --- salt/modules/dockermod.py | 10 ++++++++++ tests/unit/modules/test_dockermod.py | 26 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index a6cd82ce8e..242067a4b0 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -902,6 +902,11 @@ def compare_container(first, second, ignore=None): if item in ('OomKillDisable',): if bool(val1) != bool(val2): ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + elif item == 'Image': + image1 = inspect_image(val1)['Id'] + image2 = inspect_image(val2)['Id'] + if image1 != image2: + ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} else: if item == 'Links': val1 = _scrub_links(val1, first) @@ -920,6 +925,11 @@ def compare_container(first, second, ignore=None): if item in ('OomKillDisable',): if bool(val1) != bool(val2): ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} + elif item == 'Image': + image1 = inspect_image(val1)['Id'] + image2 = inspect_image(val2)['Id'] + if image1 != image2: + ret.setdefault(conf_dict, {})[item] = {'old': image1, 'new': image2} else: if item == 'Links': val1 = _scrub_links(val1, first) diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 8f22a0605c..24a6d3a3df 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -697,3 +697,29 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): result = docker_mod.images() self.assertEqual(result, {'sha256:abcdefg': {'RepoTags': ['image:latest']}}) + + def test_compare_container_image_id_resolution(self): + ''' + Compare + ''' + def _inspect_container_effect(id_): + return { + 'container1': {'Config': {'Image': 'realimage:latest'}, + 'HostConfig':{}}, + 'container2': {'Config': {'Image': 'image_id'}, + 'HostConfig':{}}, + }[id_] + + def _inspect_image_effect(id_): + return { + 'realimage:latest': {'Id': 'image_id'}, + 'image_id': {'Id': 'image_id'}, + }[id_] + + inspect_container_mock = MagicMock(side_effect=_inspect_container_effect) + inspect_image_mock = MagicMock(side_effect=_inspect_image_effect) + + with patch.object(docker_mod, 'inspect_container', inspect_container_mock): + with patch.object(docker_mod, 'inspect_image', inspect_image_mock): + ret = docker_mod.compare_container('container1', 'container2') + self.assertEqual(ret, {}) From f8be0f1de5472e24baecc738ea43579cdb8edded Mon Sep 17 00:00:00 2001 From: Mike Place Date: Wed, 23 Aug 2017 12:08:48 -0600 Subject: [PATCH 160/639] Reorder kwargs --- salt/modules/boto_efs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/boto_efs.py b/salt/modules/boto_efs.py index a498fe5e8a..22a35bb7a0 100644 --- a/salt/modules/boto_efs.py +++ b/salt/modules/boto_efs.py @@ -131,11 +131,11 @@ def _get_conn(key=None, def create_file_system(name, performance_mode='generalPurpose', - creation_token=None, keyid=None, key=None, profile=None, region=None, + creation_token=None, **kwargs): ''' Creates a new, empty file system. @@ -361,11 +361,11 @@ def delete_tags(filesystemid, def get_file_systems(filesystemid=None, - creation_token=None, keyid=None, key=None, profile=None, region=None, + creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property From 40e1898705258ab5f5d03824bc697c6757cb64f0 Mon Sep 17 00:00:00 2001 From: Pablo Hernandez Date: Tue, 22 Aug 2017 20:28:27 -0400 Subject: [PATCH 161/639] Added feature to pass kwargs to the build process for docker_image.present #42606 --- salt/states/docker_image.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/salt/states/docker_image.py b/salt/states/docker_image.py index e3c1a37779..45665ab9f9 100644 --- a/salt/states/docker_image.py +++ b/salt/states/docker_image.py @@ -40,6 +40,7 @@ import logging # Import salt libs import salt.utils.docker +import salt.utils # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -202,10 +203,19 @@ def present(name, return ret if build: + # get the functions default value and args + argspec = salt.utils.args.get_function_argspec(__salt__['docker.build']) + # Map any if existing args from kwargs into the build_args dictionary + build_args = dict(zip(argspec.args, argspec.defaults)) + for k, v in build_args.items(): + if k in kwargs.get('kwargs', {}): + build_args[k] = kwargs.get('kwargs', {}).get(k) try: - image_update = __salt__['docker.build'](path=build, - image=image, - dockerfile=dockerfile) + # map values passed from the state to the build args + build_args['path'] = build + build_args['image'] = image + build_args['dockerfile'] = dockerfile + image_update = __salt__['docker.build'](**build_args) except Exception as exc: ret['comment'] = ( 'Encountered error building {0} as {1}: {2}' From 0db22cef4d2e46bd206666c1f2f8d6b5abed3af3 Mon Sep 17 00:00:00 2001 From: Pablo Hernandez Date: Wed, 23 Aug 2017 14:13:38 -0400 Subject: [PATCH 162/639] Fix salt import to be more explicit --- salt/states/docker_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/docker_image.py b/salt/states/docker_image.py index 45665ab9f9..f396a605c5 100644 --- a/salt/states/docker_image.py +++ b/salt/states/docker_image.py @@ -40,7 +40,7 @@ import logging # Import salt libs import salt.utils.docker -import salt.utils +import salt.utils.args # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name From 1dcf167bb7befa0e1ebf8c7256437318961f0351 Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Thu, 17 Aug 2017 12:02:07 -0400 Subject: [PATCH 163/639] Update state.py create `_clean_tag()` method to safely encode tag names for file creation use `_clean_tag()` method for both create and read of state cache file --- salt/state.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/state.py b/salt/state.py index 43d1df17a7..950cea70ed 100644 --- a/salt/state.py +++ b/salt/state.py @@ -25,6 +25,7 @@ import traceback import re import time import random +from urllib import quote # Import salt libs import salt.utils @@ -145,6 +146,11 @@ def _gen_tag(low): ''' return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) +def _clean_tag(tag): + ''' + urllib safe quote the tag value to avoid invalid chars in the filename + ''' + return quote(tag, safe='') def _l_tag(name, id_): low = {'name': 'listen_{0}'.format(name), @@ -1695,7 +1701,7 @@ class State(object): trb) } troot = os.path.join(self.opts['cachedir'], self.jid) - tfile = os.path.join(troot, tag) + tfile = os.path.join(troot, _clean_tag(tag)) if not os.path.isdir(troot): try: os.makedirs(troot) @@ -2047,7 +2053,7 @@ class State(object): proc = running[tag].get('proc') if proc: if not proc.is_alive(): - ret_cache = os.path.join(self.opts['cachedir'], self.jid, tag) + ret_cache = os.path.join(self.opts['cachedir'], self.jid, _clean_tag(tag)) if not os.path.isfile(ret_cache): ret = {'result': False, 'comment': 'Parallel process failed to return', From fb80e174000f306fb2fe26f6efe9726edf54bc2c Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Fri, 18 Aug 2017 13:05:56 -0400 Subject: [PATCH 164/639] state.py: fix import and utf8 encode before quote --- salt/state.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/state.py b/salt/state.py index 950cea70ed..88a7ec1532 100644 --- a/salt/state.py +++ b/salt/state.py @@ -25,7 +25,7 @@ import traceback import re import time import random -from urllib import quote +from urllib # Import salt libs import salt.utils @@ -150,7 +150,7 @@ def _clean_tag(tag): ''' urllib safe quote the tag value to avoid invalid chars in the filename ''' - return quote(tag, safe='') + return urllib.quote(tag.encode('utf8'), safe='') def _l_tag(name, id_): low = {'name': 'listen_{0}'.format(name), From 446457d017c1791041df9c71781903b3cf2fd99b Mon Sep 17 00:00:00 2001 From: garethgreenaway Date: Mon, 21 Aug 2017 10:46:41 -0700 Subject: [PATCH 165/639] Swapping `from` for `import` --- salt/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/state.py b/salt/state.py index 88a7ec1532..391dc2b5a9 100644 --- a/salt/state.py +++ b/salt/state.py @@ -25,7 +25,7 @@ import traceback import re import time import random -from urllib +import urllib # Import salt libs import salt.utils From b8ead879edea078aae91768651bd12d419ca15e6 Mon Sep 17 00:00:00 2001 From: garethgreenaway Date: Mon, 21 Aug 2017 12:44:55 -0700 Subject: [PATCH 166/639] Fixing lint issues --- salt/state.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/state.py b/salt/state.py index 391dc2b5a9..a0e22e5e63 100644 --- a/salt/state.py +++ b/salt/state.py @@ -146,12 +146,14 @@ def _gen_tag(low): ''' return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) + def _clean_tag(tag): ''' urllib safe quote the tag value to avoid invalid chars in the filename ''' return urllib.quote(tag.encode('utf8'), safe='') + def _l_tag(name, id_): low = {'name': 'listen_{0}'.format(name), '__id__': 'listen_{0}'.format(id_), From 4957268b371e5fab1d9c8a05a99ce812c549cc42 Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Wed, 23 Aug 2017 14:22:46 -0400 Subject: [PATCH 167/639] update state.py to use safe_filename_leaf instead of urllib.quote generate state cache filename by using safe_filename_leaf salt util method --- salt/state.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/state.py b/salt/state.py index a0e22e5e63..912f06a9ed 100644 --- a/salt/state.py +++ b/salt/state.py @@ -149,9 +149,9 @@ def _gen_tag(low): def _clean_tag(tag): ''' - urllib safe quote the tag value to avoid invalid chars in the filename + Make tag name safe for filenames ''' - return urllib.quote(tag.encode('utf8'), safe='') + return salt.utils.safe_filename_leaf(tag) def _l_tag(name, id_): From 42064883ea8067161a6b4003fabab11a341d0de3 Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Wed, 23 Aug 2017 14:28:16 -0400 Subject: [PATCH 168/639] state.py remove unused urllib import --- salt/state.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/state.py b/salt/state.py index 912f06a9ed..ae2ba0c6db 100644 --- a/salt/state.py +++ b/salt/state.py @@ -25,7 +25,6 @@ import traceback import re import time import random -import urllib # Import salt libs import salt.utils From 2722e9521daa5285815ec75cbfd6eb45bf53d2a7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 16 Aug 2017 16:17:24 -0600 Subject: [PATCH 169/639] Use os.path.join to create paths --- tests/unit/test_test_module_names.py | 50 ++++++++++++++-------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/unit/test_test_module_names.py b/tests/unit/test_test_module_names.py index a7b2bf94ad..485f737685 100644 --- a/tests/unit/test_test_module_names.py +++ b/tests/unit/test_test_module_names.py @@ -13,33 +13,33 @@ from tests.support.unit import TestCase from tests.support.paths import CODE_DIR EXCLUDED_DIRS = [ - 'tests/pkg', - 'tests/perf', - 'tests/support', - 'tests/unit/utils/cache_mods', - 'tests/unit/modules/inspectlib', - 'tests/unit/modules/zypp/', - 'tests/unit/templates/files', - 'tests/integration/files/', - 'tests/integration/cloud/helpers', + os.path.join('tests', 'pkg'), + os.path.join('tests', 'perf'), + os.path.join('tests', 'support'), + os.path.join('tests', 'unit', 'utils', 'cache_mods'), + os.path.join('tests', 'unit', 'modules', 'inspectlib'), + os.path.join('tests', 'unit', 'modules', 'zypp'), + os.path.join('tests', 'unit', 'templates', 'files'), + os.path.join('tests', 'integration', 'files'), + os.path.join('tests', 'integration', 'cloud', 'helpers'), ] EXCLUDED_FILES = [ - 'tests/eventlisten.py', - 'tests/buildpackage.py', - 'tests/saltsh.py', - 'tests/minionswarm.py', - 'tests/wheeltest.py', - 'tests/runtests.py', - 'tests/jenkins.py', - 'tests/salt-tcpdump.py', - 'tests/conftest.py', - 'tests/packdump.py', - 'tests/consist.py', - 'tests/modparser.py', - 'tests/committer_parser.py', - 'tests/zypp_plugin.py', - 'tests/unit/transport/mixins.py', - 'tests/integration/utils/testprogram.py', + os.path.join('tests', 'eventlisten.py'), + os.path.join('tests', 'buildpackage.py'), + os.path.join('tests', 'saltsh.py'), + os.path.join('tests', 'minionswarm.py'), + os.path.join('tests', 'wheeltest.py'), + os.path.join('tests', 'runtests.py'), + os.path.join('tests', 'jenkins.py'), + os.path.join('tests', 'salt-tcpdump.py'), + os.path.join('tests', 'conftest.py'), + os.path.join('tests', 'packdump.py'), + os.path.join('tests', 'consist.py'), + os.path.join('tests', 'modparser.py'), + os.path.join('tests', 'committer_parser.py'), + os.path.join('tests', 'zypp_plugin.py'), + os.path.join('tests', 'unit', 'transport', 'mixins.py'), + os.path.join('tests', 'integration', 'utils', 'testprogram.py'), ] From 5901b10c14df9fdf071fc3c411d791e98827fc74 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 12 May 2017 13:29:48 -0600 Subject: [PATCH 170/639] Remove deprecated Legacy Git Pillar code --- doc/ref/configuration/master.rst | 2 +- doc/topics/releases/2015.8.0.rst | 2 +- doc/topics/releases/oxygen.rst | 7 + doc/topics/tutorials/gitfs.rst | 11 +- salt/client/ssh/__init__.py | 3 +- salt/daemons/masterapi.py | 54 +--- salt/master.py | 11 +- salt/pillar/__init__.py | 46 +-- salt/pillar/git_pillar.py | 538 +++++-------------------------- salt/runners/git_pillar.py | 85 ++--- salt/utils/gitfs.py | 12 - tests/support/gitfs.py | 1 - tests/unit/pillar/test_git.py | 27 +- 13 files changed, 159 insertions(+), 640 deletions(-) diff --git a/doc/ref/configuration/master.rst b/doc/ref/configuration/master.rst index 655eb74e56..e60f677da3 100644 --- a/doc/ref/configuration/master.rst +++ b/doc/ref/configuration/master.rst @@ -3787,7 +3787,7 @@ they were created by a different master. Default: ``True`` Normally, when processing :ref:`git_pillar remotes -`, if more than one repo under the same ``git`` +`, if more than one repo under the same ``git`` section in the ``ext_pillar`` configuration refers to the same pillar environment, then each repo in a given environment will have access to the other repos' files to be referenced in their top files. However, it may be diff --git a/doc/topics/releases/2015.8.0.rst b/doc/topics/releases/2015.8.0.rst index ac9b03ff11..5a34397a92 100644 --- a/doc/topics/releases/2015.8.0.rst +++ b/doc/topics/releases/2015.8.0.rst @@ -106,7 +106,7 @@ bringing with it the ability to access authenticated repositories. Using the new features will require updates to the git ext_pillar configuration, further details can be found in the :ref:`pillar.git_pillar -` docs. +` docs. .. _pygit2: https://github.com/libgit2/pygit2 diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 6c4e5325b1..32124157b0 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -708,6 +708,13 @@ during blackout. This release adds support for using this feature in the grains as well, by using special grains keys ``minion_blackout`` and ``minion_blackout_whitelist``. +Pillar Deprecations +------------------- + +The legacy configuration for ``git_pillar`` has been removed. Please use the new +configuration for ``git_pillar``, which is documented in the external pillar module +for :mod:`git_pillar `. + Utils Deprecations ================== diff --git a/doc/topics/tutorials/gitfs.rst b/doc/topics/tutorials/gitfs.rst index 10b4c4339e..cc0b1df9f8 100644 --- a/doc/topics/tutorials/gitfs.rst +++ b/doc/topics/tutorials/gitfs.rst @@ -1110,15 +1110,8 @@ Using Git as an External Pillar Source The git external pillar (a.k.a. git_pillar) has been rewritten for the 2015.8.0 release. This rewrite brings with it pygit2_ support (allowing for access to authenticated repositories), as well as more granular support for per-remote -configuration. - -To make use of the new features, changes to the git ext_pillar configuration -must be made. The new configuration schema is detailed :ref:`here -`. - -For Salt releases before 2015.8.0, click :ref:`here ` -for documentation. - +configuration. This configuration schema is detailed :ref:`here +`. .. _faq-gitfs-bug: diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py index de5f6a8ded..cd3e53f0ea 100644 --- a/salt/client/ssh/__init__.py +++ b/salt/client/ssh/__init__.py @@ -1049,8 +1049,7 @@ class Single(object): opts_pkg[u'id'], opts_pkg.get(u'environment', u'base') ) - pillar_dirs = {} - pillar_data = pillar.compile_pillar(pillar_dirs=pillar_dirs) + pillar_data = pillar.compile_pillar() # TODO: cache minion opts in datap in master.py data = {u'opts': opts_pkg, diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 14358157a0..3ee965daf3 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -32,6 +32,8 @@ import salt.utils.atomicfile import salt.utils.event import salt.utils.files import salt.utils.gitfs +import salt.utils.verify +import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid import salt.utils.minions @@ -64,44 +66,19 @@ def init_git_pillar(opts): ret = [] for opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: - if isinstance(opts_dict['git'], six.string_types): - # Legacy git pillar code - try: - import git - except ImportError: - return ret - parts = opts_dict['git'].strip().split() - try: - br = parts[0] - loc = parts[1] - except IndexError: - log.critical( - 'Unable to extract external pillar data: {0}' - .format(opts_dict['git']) - ) + try: + pillar = salt.utils.gitfs.GitPillar(opts) + pillar.init_remotes( + opts_dict['git'], + git_pillar.PER_REMOTE_OVERRIDES, + git_pillar.PER_REMOTE_ONLY + ) + ret.append(pillar) + except FileserverConfigError: + if opts.get('git_pillar_verify_config', True): + raise else: - ret.append( - git_pillar._LegacyGitPillar( - br, - loc, - opts - ) - ) - else: - # New git_pillar code - try: - pillar = salt.utils.gitfs.GitPillar(opts) - pillar.init_remotes( - opts_dict['git'], - git_pillar.PER_REMOTE_OVERRIDES, - git_pillar.PER_REMOTE_ONLY - ) - ret.append(pillar) - except FileserverConfigError: - if opts.get('git_pillar_verify_config', True): - raise - else: - log.critical('Could not initialize git_pillar') + log.critical('Could not initialize git_pillar') return ret @@ -705,8 +682,7 @@ class RemoteFuncs(object): load.get('ext'), self.mminion.functions, pillar_override=load.get('pillar_override', {})) - pillar_dirs = {} - data = pillar.compile_pillar(pillar_dirs=pillar_dirs) + data = pillar.compile_pillar() if self.opts.get('minion_data_cache', False): self.cache.store('minions/{0}'.format(load['id']), 'data', diff --git a/salt/master.py b/salt/master.py index 6ea98f4923..f3f697bf83 100644 --- a/salt/master.py +++ b/salt/master.py @@ -315,7 +315,7 @@ class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess): ''' try: for pillar in self.git_pillar: - pillar.update() + pillar.fetch_remotes() except Exception as exc: log.error(u'Exception caught while updating git_pillar', exc_info=True) @@ -471,18 +471,18 @@ class Master(SMaster): pass if self.opts.get(u'git_pillar_verify_config', True): - non_legacy_git_pillars = [ + git_pillars = [ x for x in self.opts.get(u'ext_pillar', []) if u'git' in x and not isinstance(x[u'git'], six.string_types) ] - if non_legacy_git_pillars: + if git_pillars: try: new_opts = copy.deepcopy(self.opts) from salt.pillar.git_pillar \ import PER_REMOTE_OVERRIDES as per_remote_overrides, \ PER_REMOTE_ONLY as per_remote_only - for repo in non_legacy_git_pillars: + for repo in git_pillars: new_opts[u'ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar(new_opts) @@ -1304,7 +1304,6 @@ class AESFuncs(object): return False load[u'grains'][u'id'] = load[u'id'] - pillar_dirs = {} pillar = salt.pillar.get_pillar( self.opts, load[u'grains'], @@ -1313,7 +1312,7 @@ class AESFuncs(object): ext=load.get(u'ext'), pillar_override=load.get(u'pillar_override', {}), pillarenv=load.get(u'pillarenv')) - data = pillar.compile_pillar(pillar_dirs=pillar_dirs) + data = pillar.compile_pillar() self.fs_.update_opts() if self.opts.get(u'minion_data_cache', False): self.masterapi.cache.store(u'minions/{0}'.format(load[u'id']), diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index ce6c5621f0..f93f4eea98 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -233,7 +233,7 @@ class PillarCache(object): functions=self.functions, pillar_override=self.pillar_override, pillarenv=self.pillarenv) - return fresh_pillar.compile_pillar() # FIXME We are not yet passing pillar_dirs in here + return fresh_pillar.compile_pillar() def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv)) @@ -763,7 +763,7 @@ class Pillar(object): return pillar, errors - def _external_pillar_data(self, pillar, val, pillar_dirs, key): + def _external_pillar_data(self, pillar, val, key): ''' Builds actual pillar data structure and updates the ``pillar`` variable ''' @@ -772,26 +772,16 @@ class Pillar(object): if isinstance(val, dict): ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): - if key == 'git': - ext = self.ext_pillars[key](self.minion_id, - val, - pillar_dirs) - else: - ext = self.ext_pillars[key](self.minion_id, - pillar, - *val) + ext = self.ext_pillars[key](self.minion_id, + pillar, + *val) else: - if key == 'git': - ext = self.ext_pillars[key](self.minion_id, - val, - pillar_dirs) - else: - ext = self.ext_pillars[key](self.minion_id, - pillar, - val) + ext = self.ext_pillars[key](self.minion_id, + pillar, + val) return ext - def ext_pillar(self, pillar, pillar_dirs, errors=None): + def ext_pillar(self, pillar, errors=None): ''' Render the external pillar data ''' @@ -843,9 +833,8 @@ class Pillar(object): continue try: ext = self._external_pillar_data(pillar, - val, - pillar_dirs, - key) + val, + key) except Exception as exc: errors.append( 'Failed to load ext_pillar {0}: {1}'.format( @@ -867,16 +856,14 @@ class Pillar(object): ext = None return pillar, errors - def compile_pillar(self, ext=True, pillar_dirs=None): + def compile_pillar(self, ext=True): ''' Render the pillar data and return ''' top, top_errors = self.get_top() if ext: if self.opts.get('ext_pillar_first', False): - self.opts['pillar'], errors = self.ext_pillar( - self.pillar_override, - pillar_dirs) + self.opts['pillar'], errors = self.ext_pillar(self.pillar_override) self.rend = salt.loader.render(self.opts, self.functions) matches = self.top_matches(top) pillar, errors = self.render_pillar(matches, errors=errors) @@ -888,8 +875,7 @@ class Pillar(object): else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) - pillar, errors = self.ext_pillar( - pillar, pillar_dirs, errors=errors) + pillar, errors = self.ext_pillar(pillar, errors=errors) else: matches = self.top_matches(top) pillar, errors = self.render_pillar(matches) @@ -984,6 +970,6 @@ class Pillar(object): # ext_pillar etc. class AsyncPillar(Pillar): @tornado.gen.coroutine - def compile_pillar(self, ext=True, pillar_dirs=None): - ret = super(AsyncPillar, self).compile_pillar(ext=ext, pillar_dirs=pillar_dirs) + def compile_pillar(self, ext=True): + ret = super(AsyncPillar, self).compile_pillar(ext=ext) raise tornado.gen.Return(ret) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index c7b418d567..363a6dadd1 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -3,12 +3,6 @@ Use a git repository as a Pillar source --------------------------------------- -.. note:: - This external pillar has been rewritten for the :ref:`2015.8.0 - ` release. The old method of configuring this - external pillar will be maintained for a couple releases, allowing time for - configurations to be updated to reflect the new usage. - This external pillar allows for a Pillar top file and Pillar SLS files to be sourced from a git repository. @@ -41,8 +35,7 @@ the repo's URL. Configuration details can be found below. - bar Additionally, while git_pillar allows for the branch/tag to be overridden - (see :ref:`here `, or :ref:`here - ` for Salt releases before 2015.8.0), keep in + (see :ref:`here `), keep in mind that the top file must reference the actual environment name. It is common practice to make the environment in a git_pillar top file match the branch/tag name, but when remapping, the environment of course no longer @@ -51,113 +44,10 @@ the repo's URL. Configuration details can be found below. common misconfiguration that may be to blame, and is a good first step in troubleshooting. -.. _git-pillar-pre-2015-8-0: +.. _git-pillar-configuration: -Configuring git_pillar for Salt releases before 2015.8.0 -======================================================== - -.. note:: - This legacy configuration for git_pillar will no longer be supported as of - the **Oxygen** release of Salt. - -For Salt releases earlier than :ref:`2015.8.0 `, -GitPython is the only supported provider for git_pillar. Individual -repositories can be configured under the :conf_master:`ext_pillar` -configuration parameter like so: - -.. code-block:: yaml - - ext_pillar: - - git: master https://gitserver/git-pillar.git root=subdirectory - -The repository is specified in the format `` ``, with an -optional ``root`` parameter (added in the :ref:`2014.7.0 -` release) which allows the pillar SLS files to be -served up from a subdirectory (similar to :conf_master:`gitfs_root` in gitfs). - -To use more than one branch from the same repo, multiple lines must be -specified under :conf_master:`ext_pillar`: - -.. code-block:: yaml - - ext_pillar: - - git: master https://gitserver/git-pillar.git - - git: dev https://gitserver/git-pillar.git - -.. _git-pillar-env-remap-legacy: - -To remap a specific branch to a specific Pillar environment, use the format -``:``: - -.. code-block:: yaml - - ext_pillar: - - git: develop:dev https://gitserver/git-pillar.git - - git: master:prod https://gitserver/git-pillar.git - -In this case, the ``develop`` branch would need its own ``top.sls`` with a -``dev`` section in it, like this: - -.. code-block:: yaml - - dev: - '*': - - bar - -The ``master`` branch would need its own ``top.sls`` with a ``prod`` section in -it: - -.. code-block:: yaml - - prod: - '*': - - bar - -If ``__env__`` is specified as the branch name, then git_pillar will first look -at the minion's :conf_minion:`environment` option. If unset, it will fall back -to using branch specified by the master's :conf_master:`gitfs_base`: - -.. code-block:: yaml - - ext_pillar: - - git: __env__ https://gitserver/git-pillar.git root=pillar - -The corresponding Pillar top file would look like this: - -.. code-block:: yaml - - {{saltenv}}: - '*': - - bar - -.. note:: - This feature was unintentionally omitted when git_pillar was rewritten for - the 2015.8.0 release. It was added again in the 2016.3.4 release, but it - has changed slightly in that release. On Salt masters running 2015.8.0 - through 2016.3.3, this feature can only be accessed using the legacy config - described above. For 2016.3.4 and later, refer to explanation of the - ``__env__`` parameter in the below section. - - Versions 2016.3.0 through 2016.3.4 incorrectly check the *master's* - ``environment`` config option (instead of the minion's) before falling back - to :conf_master:`gitfs_base`. This has been fixed in the 2016.3.5 and - 2016.11.1 releases (2016.11.0 contains the incorrect behavior). - - Additionally, in releases before 2016.11.0, both ``{{env}}`` and - ``{{saltenv}}`` could be used as a placeholder for the environment. - Starting in 2016.11.0, ``{{env}}`` is no longer supported. - -.. _git-pillar-2015-8-0-and-later: - -Configuring git_pillar for Salt releases 2015.8.0 and later -=========================================================== - -.. note:: - In version 2015.8.0, the method of configuring git external pillars has - changed, and now more closely resembles that of the :ref:`Git Fileserver - Backend `. If Salt detects the old configuration schema, it - will use the pre-2015.8.0 code to compile the external pillar. A warning - will also be logged. +Configuring git_pillar for Salt +=============================== Beginning with Salt version 2015.8.0, pygit2_ is now supported in addition to GitPython_. The requirements for GitPython_ and pygit2_ are the same as for @@ -258,32 +148,6 @@ The corresponding Pillar top file would look like this: '*': - bar -.. note:: - This feature was unintentionally omitted when git_pillar was rewritten for - the 2015.8.0 release. It was added again in the 2016.3.4 release, but it - has changed slightly in that release. The fallback value replaced by - ``{{env}}`` is :conf_master: is :conf_master:`git_pillar_base`, while the - legacy config's version of this feature replaces ``{{env}}`` with - :conf_master:`gitfs_base`. - - On Salt masters running 2015.8.0 through 2016.3.3, this feature can only be - accessed using the legacy config in the previous section of this page. - - The same issue which affected the behavior of the minion's - :conf_minion:`environment` config value using the legacy configuration - syntax (see the documentation in the pre-2015.8.0 section above for the - legacy support of this feature) also affects the new-style git_pillar - syntax in version 2016.3.4. This has been corrected in version 2016.3.5 and - 2016.11.1 (2016.11.0 contains the incorrect behavior). - - 2016.3.4 incorrectly checks the *master's* ``environment`` config option - (instead of the minion's) before falling back to the master's - :conf_master:`git_pillar_base`. - - Additionally, in releases before 2016.11.0, both ``{{env}}`` and - ``{{saltenv}}`` could be used as a placeholder for the environment. - Starting in 2016.11.0, ``{{env}}`` is no longer supported. - With the addition of pygit2_ support, git_pillar can now interact with authenticated remotes. Authentication works just like in gitfs (as outlined in the :ref:`Git Fileserver Backend Walkthrough `), only @@ -469,8 +333,6 @@ from __future__ import absolute_import # Import python libs import copy import logging -import hashlib -import os # Import salt libs import salt.utils.gitfs @@ -509,339 +371,89 @@ def __virtual__(): # No git external pillars were configured return False - for ext_pillar in git_ext_pillars: - if isinstance(ext_pillar['git'], six.string_types): - # Verification of legacy git pillar configuration - if not HAS_GITPYTHON: - log.error( - 'Git-based ext_pillar is enabled in configuration but ' - 'could not be loaded, is GitPython installed?' - ) - return False - if not git.__version__ > '0.3.0': - return False - return __virtualname__ - else: - # Verification of new git pillar configuration - try: - salt.utils.gitfs.GitPillar(__opts__) - # Initialization of the GitPillar object did not fail, so we - # know we have valid configuration syntax and that a valid - # provider was detected. - return __virtualname__ - except FileserverConfigError: - pass - return False + try: + salt.utils.gitfs.GitPillar(__opts__) + # Initialization of the GitPillar object did not fail, so we + # know we have valid configuration syntax and that a valid + # provider was detected. + return __virtualname__ + except FileserverConfigError: + return False -def ext_pillar(minion_id, repo, pillar_dirs): +def ext_pillar(minion_id, repo): ''' Checkout the ext_pillar sources and compile the resulting pillar SLS ''' - if isinstance(repo, six.string_types): - return _legacy_git_pillar(minion_id, repo, pillar_dirs) - else: - opts = copy.deepcopy(__opts__) - opts['pillar_roots'] = {} - opts['__git_pillar'] = True - pillar = salt.utils.gitfs.GitPillar(opts) - pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) - if __opts__.get('__role') == 'minion': - # If masterless, fetch the remotes. We'll need to remove this once - # we make the minion daemon able to run standalone. - pillar.fetch_remotes() - pillar.checkout() - ret = {} - merge_strategy = __opts__.get( - 'pillar_source_merging_strategy', - 'smart' - ) - merge_lists = __opts__.get( - 'pillar_merge_lists', - False - ) - for pillar_dir, env in six.iteritems(pillar.pillar_dirs): - # If pillarenv is set, only grab pillars with that match pillarenv - if opts['pillarenv'] and env != opts['pillarenv'] and env != '__env__': - log.debug( - 'env \'%s\' for pillar dir \'%s\' does not match ' - 'pillarenv \'%s\', skipping', - env, pillar_dir, opts['pillarenv'] - ) - continue - if pillar_dir in pillar.pillar_linked_dirs: - log.debug( - 'git_pillar is skipping processing on %s as it is a ' - 'mounted repo', pillar_dir - ) - continue - else: - log.debug( - 'git_pillar is processing pillar SLS from %s for pillar ' - 'env \'%s\'', pillar_dir, env - ) - - if env == '__env__': - env = opts.get('pillarenv') \ - or opts.get('environment') \ - or opts.get('git_pillar_base') - log.debug('__env__ maps to %s', env) - - pillar_roots = [pillar_dir] - - if __opts__['git_pillar_includes']: - # Add the rest of the pillar_dirs in this environment to the - # list, excluding the current pillar_dir being processed. This - # is because it was already specified above as the first in the - # list, so that its top file is sourced from the correct - # location and not from another git_pillar remote. - pillar_roots.extend( - [d for (d, e) in six.iteritems(pillar.pillar_dirs) - if env == e and d != pillar_dir] - ) - - opts['pillar_roots'] = {env: pillar_roots} - - local_pillar = Pillar(opts, __grains__, minion_id, env) - ret = salt.utils.dictupdate.merge( - ret, - local_pillar.compile_pillar(ext=False), - strategy=merge_strategy, - merge_lists=merge_lists - ) - return ret - - -# Legacy git_pillar code -class _LegacyGitPillar(object): - ''' - Deal with the remote git repository for Pillar - ''' - - def __init__(self, branch, repo_location, opts): - ''' - Try to initialize the Git repo object - ''' - self.branch = self.map_branch(branch, opts) - self.rp_location = repo_location - self.opts = opts - self._envs = set() - self.working_dir = '' - self.repo = None - - hash_type = getattr(hashlib, opts['hash_type']) - hash_str = '{0} {1}'.format(self.branch, self.rp_location) - repo_hash = hash_type(salt.utils.stringutils.to_bytes(hash_str)).hexdigest() - rp_ = os.path.join(self.opts['cachedir'], 'pillar_gitfs', repo_hash) - - if not os.path.isdir(rp_): - os.makedirs(rp_) - try: - self.repo = git.Repo.init(rp_) - except (git.exc.NoSuchPathError, - git.exc.InvalidGitRepositoryError) as exc: - log.error( - 'GitPython exception caught while initializing the repo: %s. ' - 'Maybe the git CLI program is not available.', exc - ) - except Exception as exc: - log.exception('Undefined exception in git pillar. ' - 'This may be a bug should be reported to the ' - 'SaltStack developers.') - - # Git directory we are working on - # Should be the same as self.repo.working_dir - self.working_dir = rp_ - - if isinstance(self.repo, git.Repo): - if not self.repo.remotes: - try: - self.repo.create_remote('origin', self.rp_location) - # ignore git ssl verification if requested - if self.opts.get('pillar_gitfs_ssl_verify', True): - self.repo.git.config('http.sslVerify', 'true') - else: - self.repo.git.config('http.sslVerify', 'false') - except os.error: - # This exception occurs when two processes are - # trying to write to the git config at once, go - # ahead and pass over it since this is the only - # write. - # This should place a lock down. - pass - else: - if self.repo.remotes.origin.url != self.rp_location: - self.repo.remotes.origin.config_writer.set( - 'url', self.rp_location) - - def map_branch(self, branch, opts=None): - opts = __opts__ if opts is None else opts - if branch == '__env__': - branch = opts.get('environment') or 'base' - if branch == 'base': - branch = opts.get('gitfs_base') or 'master' - elif ':' in branch: - branch = branch.split(':', 1)[0] - return branch - - def update(self): - ''' - Ensure you are following the latest changes on the remote - - Return boolean whether it worked - ''' - try: - log.debug('Legacy git_pillar: Updating \'%s\'', self.rp_location) - self.repo.git.fetch() - except git.exc.GitCommandError as exc: - log.error( - 'Unable to fetch the latest changes from remote %s: %s', - self.rp_location, exc - ) - return False - - try: - checkout_ref = 'origin/{0}'.format(self.branch) - log.debug('Legacy git_pillar: Checking out %s for \'%s\'', - checkout_ref, self.rp_location) - self.repo.git.checkout(checkout_ref) - except git.exc.GitCommandError as exc: - log.error( - 'Legacy git_pillar: Failed to checkout %s for \'%s\': %s', - checkout_ref, self.rp_location, exc - ) - return False - - return True - - def envs(self): - ''' - Return a list of refs that can be used as environments - ''' - if isinstance(self.repo, git.Repo): - remote = self.repo.remote() - for ref in self.repo.refs: - parted = ref.name.partition('/') - short = parted[2] if parted[2] else parted[0] - if isinstance(ref, git.Head): - if short == 'master': - short = 'base' - if ref not in remote.stale_refs: - self._envs.add(short) - elif isinstance(ref, git.Tag): - self._envs.add(short) - - return list(self._envs) - - -def _legacy_git_pillar(minion_id, repo_string, pillar_dirs): - ''' - Support pre-Beryllium config schema - ''' - salt.utils.versions.warn_until( - 'Oxygen', - 'The git ext_pillar configuration is deprecated. Please refer to the ' - 'documentation at ' - 'https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.git_pillar.html ' - 'for more information. This configuration will no longer be supported ' - 'as of the Oxygen release of Salt.' - ) - if pillar_dirs is None: - return - # split the branch, repo name and optional extra (key=val) parameters. - options = repo_string.strip().split() - branch_env = options[0] - repo_location = options[1] - root = '' - - for extraopt in options[2:]: - # Support multiple key=val attributes as custom parameters. - DELIM = '=' - if DELIM not in extraopt: - log.error( - 'Legacy git_pillar: Incorrectly formatted extra parameter ' - '\'%s\' within \'%s\' missing \'%s\')', - extraopt, repo_string, DELIM - ) - key, val = _extract_key_val(extraopt, DELIM) - if key == 'root': - root = val - else: - log.error( - 'Legacy git_pillar: Unrecognized extra parameter \'%s\' ' - 'in \'%s\'', - key, repo_string - ) - - # environment is "different" from the branch - cfg_branch, _, environment = branch_env.partition(':') - - gitpil = _LegacyGitPillar(cfg_branch, repo_location, __opts__) - branch = gitpil.branch - - if environment == '': - if branch == 'master': - environment = 'base' - else: - environment = branch - - # normpath is needed to remove appended '/' if root is empty string. - pillar_dir = os.path.normpath(os.path.join(gitpil.working_dir, root)) - log.debug( - 'Legacy git_pillar: pillar_dir for \'%s\' is \'%s\'', - repo_string, pillar_dir - ) - log.debug( - 'Legacy git_pillar: branch for \'%s\' is \'%s\'', - repo_string, branch - ) - - pillar_dirs.setdefault(pillar_dir, {}) - - if cfg_branch == '__env__' and branch not in ['master', 'base']: - gitpil.update() - elif pillar_dirs[pillar_dir].get(branch, False): - log.debug( - 'Already processed pillar_dir \'%s\' for \'%s\'', - pillar_dir, repo_string - ) - return {} # we've already seen this combo - - pillar_dirs[pillar_dir].setdefault(branch, True) - - # Don't recurse forever-- the Pillar object will re-call the ext_pillar - # function - if __opts__['pillar_roots'].get(branch, []) == [pillar_dir]: - return {} - opts = copy.deepcopy(__opts__) - - opts['pillar_roots'][environment] = [pillar_dir] + opts['pillar_roots'] = {} opts['__git_pillar'] = True + pillar = salt.utils.gitfs.GitPillar(opts) + pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY) + if __opts__.get('__role') == 'minion': + # If masterless, fetch the remotes. We'll need to remove this once + # we make the minion daemon able to run standalone. + pillar.fetch_remotes() + pillar.checkout() + ret = {} + merge_strategy = __opts__.get( + 'pillar_source_merging_strategy', + 'smart' + ) + merge_lists = __opts__.get( + 'pillar_merge_lists', + False + ) + for pillar_dir, env in six.iteritems(pillar.pillar_dirs): + # If pillarenv is set, only grab pillars with that match pillarenv + if opts['pillarenv'] and env != opts['pillarenv']: + log.debug( + 'env \'%s\' for pillar dir \'%s\' does not match ' + 'pillarenv \'%s\', skipping', + env, pillar_dir, opts['pillarenv'] + ) + continue + if pillar_dir in pillar.pillar_linked_dirs: + log.debug( + 'git_pillar is skipping processing on %s as it is a ' + 'mounted repo', pillar_dir + ) + continue + else: + log.debug( + 'git_pillar is processing pillar SLS from %s for pillar ' + 'env \'%s\'', pillar_dir, env + ) - pil = Pillar(opts, __grains__, minion_id, branch) + if env == '__env__': + env = opts.get('pillarenv') \ + or opts.get('environment') \ + or opts.get('git_pillar_base') + log.debug('__env__ maps to %s', env) - return pil.compile_pillar(ext=False) + pillar_roots = [pillar_dir] + if __opts__['git_pillar_includes']: + # Add the rest of the pillar_dirs in this environment to the + # list, excluding the current pillar_dir being processed. This + # is because it was already specified above as the first in the + # list, so that its top file is sourced from the correct + # location and not from another git_pillar remote. + pillar_roots.extend( + [d for (d, e) in six.iteritems(pillar.pillar_dirs) + if env == e and d != pillar_dir] + ) -def _update(branch, repo_location): - ''' - Ensure you are following the latest changes on the remote + opts['pillar_roots'] = {env: pillar_roots} - return boolean whether it worked - ''' - gitpil = _LegacyGitPillar(branch, repo_location, __opts__) - - return gitpil.update() - - -def _envs(branch, repo_location): - ''' - Return a list of refs that can be used as environments - ''' - gitpil = _LegacyGitPillar(branch, repo_location, __opts__) - - return gitpil.envs() + local_pillar = Pillar(opts, __grains__, minion_id, env) + ret = salt.utils.dictupdate.merge( + ret, + local_pillar.compile_pillar(ext=False), + strategy=merge_strategy, + merge_lists=merge_lists + ) + return ret def _extract_key_val(kv, delimiter='='): diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py index 0e8e97beb3..ca302ae7f8 100644 --- a/salt/runners/git_pillar.py +++ b/salt/runners/git_pillar.py @@ -11,7 +11,6 @@ import logging import salt.pillar.git_pillar import salt.utils.gitfs from salt.exceptions import SaltRunnerError -from salt.ext import six log = logging.getLogger(__name__) @@ -21,18 +20,13 @@ def update(branch=None, repo=None): .. versionadded:: 2014.1.0 .. versionchanged:: 2015.8.4 - This runner function now supports the :ref:`new git_pillar - configuration schema ` introduced in + This runner function now supports the :ref:`git_pillar + configuration schema ` introduced in 2015.8.0. Additionally, the branch and repo can now be omitted to - update all git_pillar remotes. The return data has also changed. For - releases 2015.8.3 and earlier, there is no value returned. Starting - with 2015.8.4, the return data is a dictionary. If using the :ref:`old - git_pillar configuration schema `, then the - dictionary values will be ``True`` if the update completed without - error, and ``False`` if an error occurred. If using the :ref:`new - git_pillar configuration schema `, the - values will be ``True`` only if new commits were fetched, and ``False`` - if there were errors or no new commits were fetched. + update all git_pillar remotes. The return data has also changed to + a dictionary. The values will be ``True`` only if new commits were + fetched, and ``False`` if there were errors or no new commits were + fetched. Fetch one or all configured git_pillar remotes. @@ -56,7 +50,7 @@ def update(branch=None, repo=None): # Update specific branch and repo salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git' - # Update all repos (2015.8.4 and later) + # Update all repos salt-run git_pillar.update # Run with debug logging salt-run git_pillar.update -l debug @@ -67,47 +61,30 @@ def update(branch=None, repo=None): if pillar_type != 'git': continue pillar_conf = ext_pillar[pillar_type] - if isinstance(pillar_conf, six.string_types): - parts = pillar_conf.split() - if len(parts) >= 2: - desired_branch, desired_repo = parts[:2] - # Skip this remote if it doesn't match the search criteria - if branch is not None: - if branch != desired_branch: - continue - if repo is not None: - if repo != desired_repo: - continue - ret[pillar_conf] = salt.pillar.git_pillar._LegacyGitPillar( - parts[0], - parts[1], - __opts__).update() - - else: - pillar = salt.utils.gitfs.GitPillar(__opts__) - pillar.init_remotes(pillar_conf, - salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, - salt.pillar.git_pillar.PER_REMOTE_ONLY) - for remote in pillar.remotes: - # Skip this remote if it doesn't match the search criteria - if branch is not None: - if branch != remote.branch: - continue - if repo is not None: - if repo != remote.url: - continue - try: - result = remote.fetch() - except Exception as exc: - log.error( - 'Exception \'{0}\' caught while fetching git_pillar ' - 'remote \'{1}\''.format(exc, remote.id), - exc_info_on_loglevel=logging.DEBUG - ) - result = False - finally: - remote.clear_lock() - ret[remote.id] = result + pillar = salt.utils.gitfs.GitPillar(__opts__) + pillar.init_remotes(pillar_conf, + salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, + salt.pillar.git_pillar.PER_REMOTE_ONLY) + for remote in pillar.remotes: + # Skip this remote if it doesn't match the search criteria + if branch is not None: + if branch != remote.branch: + continue + if repo is not None: + if repo != remote.url: + continue + try: + result = remote.fetch() + except Exception as exc: + log.error( + 'Exception \'{0}\' caught while fetching git_pillar ' + 'remote \'{1}\''.format(exc, remote.id), + exc_info_on_loglevel=logging.DEBUG + ) + result = False + finally: + remote.clear_lock() + ret[remote.id] = result if not ret: if branch is not None or repo is not None: diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index fa90b7c879..8c7750c596 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -2894,18 +2894,6 @@ class GitPillar(GitBase): return False return True - def update(self): - ''' - Execute a git fetch on all of the repos. In this case, simply execute - self.fetch_remotes() from the parent class. - - This function only exists to make the git_pillar update code in - master.py (salt.master.Maintenance.handle_git_pillar) less complicated, - once the legacy git_pillar code is purged we can remove this function - and just run pillar.fetch_remotes() there. - ''' - return self.fetch_remotes() - class WinRepo(GitBase): ''' diff --git a/tests/support/gitfs.py b/tests/support/gitfs.py index 0f794ec934..411bfd27ce 100644 --- a/tests/support/gitfs.py +++ b/tests/support/gitfs.py @@ -342,7 +342,6 @@ class GitPillarTestBase(GitTestBase, LoaderModuleMockMixin): return git_pillar.ext_pillar( 'minion', ext_pillar_opts['ext_pillar'][0]['git'], - {} ) def make_repo(self, root_dir, user='root'): diff --git a/tests/unit/pillar/test_git.py b/tests/unit/pillar/test_git.py index 30c641e60e..907c4c93d4 100644 --- a/tests/unit/pillar/test_git.py +++ b/tests/unit/pillar/test_git.py @@ -72,7 +72,6 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul def setUp(self): super(GitPillarTestCase, self).setUp() - git_pillar._update('master', 'file://{0}'.format(self.repo_path)) def tearDown(self): shutil.rmtree(self.tmpdir, onerror=self._rmtree_error) @@ -108,8 +107,7 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul 'check direct call ``ext_pillar()`` interface' with patch.dict(git_pillar.__opts__, {'environment': None}): mypillar = git_pillar.ext_pillar('myminion', - self.conf_line, - {}) + self.conf_line) self.assertEqual(PILLAR_CONTENT, mypillar) def test_from_upper(self): @@ -126,7 +124,7 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul pil = Pillar(git_pillar.__opts__, git_pillar.__grains__, 'myminion', None) - self.assertEqual(PILLAR_CONTENT, pil.compile_pillar(pillar_dirs={})) + self.assertEqual(PILLAR_CONTENT, pil.compile_pillar()) def test_no_loop(self): '''Check that the reinstantiation of a pillar object does recurse. @@ -139,20 +137,6 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul Otherwise, the fact that the :class:`MaximumRecursion` error is caught can go in the way on the testing. - On the current code base, this test fails if the two first lines of - :func:``git_pillar.ext_pillar`:: - - if pillar_dirs is None: - return - - are replaced by:: - - if pillar_dirs is None: - pillar_dirs = {} - - .. note:: the explicit anti-recursion protection does not prevent - looping between two different Git pillars. - This test will help subsequent refactors, and also as a base for other external pillars of the same kind. ''' @@ -161,7 +145,6 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul subprocess.check_call(['git', 'clone', self.repo_path, repo2]) with patch.dict(git_pillar.__opts__, {'ext_pillar': [dict(git=self.conf_line), dict(git=conf_line2)]}): - git_pillar._update(*conf_line2.split(None, 1)) pil = Pillar(git_pillar.__opts__, git_pillar.__grains__, @@ -170,13 +153,13 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul orig_ext_pillar = pil.ext_pillars['git'] orig_ext_pillar.count = 0 - def ext_pillar_count_calls(minion_id, repo_string, pillar_dirs): + def ext_pillar_count_calls(minion_id, repo_string): orig_ext_pillar.count += 1 if orig_ext_pillar.count > 6: # going all the way to an infinite loop is harsh on the # test machine raise RuntimeError('Infinite loop detected') - return orig_ext_pillar(minion_id, repo_string, pillar_dirs) + return orig_ext_pillar(minion_id, repo_string) from salt.loader import LazyLoader orig_getitem = LazyLoader.__getitem__ @@ -187,5 +170,5 @@ class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModul return orig_getitem(self, key) with patch.object(LazyLoader, '__getitem__', __getitem__): - self.assertEqual(PILLAR_CONTENT, pil.compile_pillar(pillar_dirs={})) + self.assertEqual(PILLAR_CONTENT, pil.compile_pillar()) self.assertTrue(orig_ext_pillar.count < 7) From 83f87077ba092e6a6eb0ba79992f098ce6aadd64 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 9 Aug 2017 15:52:20 -0400 Subject: [PATCH 171/639] Remove unused import --- salt/pillar/git_pillar.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 363a6dadd1..53e58be0ac 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -344,13 +344,6 @@ from salt.pillar import Pillar # Import third party libs from salt.ext import six -# pylint: disable=import-error -try: - import git - HAS_GITPYTHON = True -except ImportError: - HAS_GITPYTHON = False -# pylint: enable=import-error PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs') PER_REMOTE_ONLY = ('name', 'mountpoint') From c48801095fbce13aad3906eae84d84ac6e2ede47 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 18 Aug 2017 14:44:55 -0400 Subject: [PATCH 172/639] Remove tests for old git_pillar syntax. These tests were testing things present in the legacy git pillar syntax and are no longer needed. The new git pillar syntax has many more integration tests to cover various cases. --- tests/unit/pillar/test_git.py | 174 ---------------------------------- 1 file changed, 174 deletions(-) delete mode 100644 tests/unit/pillar/test_git.py diff --git a/tests/unit/pillar/test_git.py b/tests/unit/pillar/test_git.py deleted file mode 100644 index 907c4c93d4..0000000000 --- a/tests/unit/pillar/test_git.py +++ /dev/null @@ -1,174 +0,0 @@ -# -*- coding: utf-8 -*- -'''test for pillar git_pillar.py - - - :codeauthor: :email:`Georges Racinet (gracinet@anybox.fr)` - -Based on joint work with Paul Tonelli about hg_pillar integration. - -''' - -# Import python libs -from __future__ import absolute_import - -import os -import tempfile -import shutil -import subprocess -import yaml -import stat - -# Import Salt Testing libs -from tests.integration import AdaptedConfigurationTestCaseMixin -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.paths import TMP -from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch - -COMMIT_USER_NAME = 'test_user' -COMMIT_USER_EMAIL = 'someone@git.test' -# file contents -PILLAR_CONTENT = {'gna': 'hello'} -FILE_DATA = { - 'top.sls': {'base': {'*': ['user']}}, - 'user.sls': PILLAR_CONTENT - } - -# Import Salt Libs -import salt.utils.files -from salt.pillar import Pillar -import salt.pillar.git_pillar as git_pillar - - -@skipIf(NO_MOCK, NO_MOCK_REASON) -@skipIf(not git_pillar.HAS_GITPYTHON, 'no GitPython') -class GitPillarTestCase(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin): - 'test git_pillar pillar' - maxDiff = None - - def setup_loader_modules(self): - self.tmpdir = tempfile.mkdtemp(dir=TMP) - cachedir = os.path.join(self.tmpdir, 'cachedir') - os.makedirs(os.path.join(cachedir, 'pillar_gitfs')) - self.repo_path = self._create_repo() - return { - git_pillar: { - '__opts__': { - 'cachedir': cachedir, - 'pillar_roots': {}, - 'hash_type': 'sha256', - 'file_ignore_regex': [], - 'file_ignore_glob': [], - 'file_roots': {}, - 'state_top': 'top.sls', - 'extension_modules': '', - 'renderer': 'yaml_jinja', - 'renderer_blacklist': [], - 'renderer_whitelist': [], - 'pillar_opts': False - } - } - } - - def setUp(self): - super(GitPillarTestCase, self).setUp() - - def tearDown(self): - shutil.rmtree(self.tmpdir, onerror=self._rmtree_error) - super(GitPillarTestCase, self).tearDown() - - def _rmtree_error(self, func, path, excinfo): - os.chmod(path, stat.S_IWRITE) - func(path) - - def _create_repo(self): - 'create source Git repo in temp directory' - repo = os.path.join(self.tmpdir, 'repo_pillar') - os.makedirs(repo) - subprocess.check_call(['git', 'init', repo]) - for filename in FILE_DATA: - with salt.utils.files.fopen(os.path.join(repo, filename), 'w') as data_file: - yaml.dump(FILE_DATA[filename], data_file) - - subprocess.check_call(['git', 'add', '.'], cwd=repo) - subprocess.call(['git', 'config', 'user.email', COMMIT_USER_EMAIL], - cwd=repo) - subprocess.call(['git', 'config', 'user.name', COMMIT_USER_NAME], - cwd=repo) - subprocess.check_call(['git', 'commit', '-m', 'first commit'], - cwd=repo) - return repo - - @property - def conf_line(self): - return 'master file://{0}'.format(self.repo_path) - - def test_base(self): - 'check direct call ``ext_pillar()`` interface' - with patch.dict(git_pillar.__opts__, {'environment': None}): - mypillar = git_pillar.ext_pillar('myminion', - self.conf_line) - self.assertEqual(PILLAR_CONTENT, mypillar) - - def test_from_upper(self): - '''Check whole calling stack from parent Pillar instance - - This test is closer to what happens in real life, and demonstrates - how ``compile_pillar()`` is called twice. - - This kind of test should/would become non-necessary, once git_pillar, - all these pillar are called exactly in the same way (git is an - exception for now), and don't recurse. - ''' - with patch.dict(git_pillar.__opts__, {'ext_pillar': [dict(git=self.conf_line)]}): - pil = Pillar(git_pillar.__opts__, - git_pillar.__grains__, - 'myminion', None) - self.assertEqual(PILLAR_CONTENT, pil.compile_pillar()) - - def test_no_loop(self): - '''Check that the reinstantiation of a pillar object does recurse. - - This test goes in great details of patching that the dedicated - utilities might do in a simpler way. - Namely, we replace the main ``ext_pillar`` entry function by one - that keeps count of its calls. - - Otherwise, the fact that the :class:`MaximumRecursion` error is caught - can go in the way on the testing. - - This test will help subsequent refactors, and also as a base for other - external pillars of the same kind. - ''' - repo2 = os.path.join(self.tmpdir, 'repo_pillar2') - conf_line2 = 'master file://{0}'.format(repo2) - subprocess.check_call(['git', 'clone', self.repo_path, repo2]) - with patch.dict(git_pillar.__opts__, {'ext_pillar': [dict(git=self.conf_line), - dict(git=conf_line2)]}): - - pil = Pillar(git_pillar.__opts__, - git_pillar.__grains__, - 'myminion', 'base') - - orig_ext_pillar = pil.ext_pillars['git'] - orig_ext_pillar.count = 0 - - def ext_pillar_count_calls(minion_id, repo_string): - orig_ext_pillar.count += 1 - if orig_ext_pillar.count > 6: - # going all the way to an infinite loop is harsh on the - # test machine - raise RuntimeError('Infinite loop detected') - return orig_ext_pillar(minion_id, repo_string) - - from salt.loader import LazyLoader - orig_getitem = LazyLoader.__getitem__ - - def __getitem__(self, key): - if key == 'git.ext_pillar': - return ext_pillar_count_calls - return orig_getitem(self, key) - - with patch.object(LazyLoader, '__getitem__', __getitem__): - self.assertEqual(PILLAR_CONTENT, pil.compile_pillar()) - self.assertTrue(orig_ext_pillar.count < 7) From 18b8fc8cad7dd3cb8027d23130d86c52c5ee412d Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Wed, 23 Aug 2017 17:01:47 -0400 Subject: [PATCH 173/639] INFRA-5461 - fix a couple of smallish bugs in boto_elb modules --- salt/modules/boto_elb.py | 90 +++++++++++++++++++++------------------- salt/states/boto_elb.py | 2 +- 2 files changed, 49 insertions(+), 43 deletions(-) diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index ddbb543d74..b62531187e 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -49,6 +49,7 @@ from __future__ import absolute_import # Import Python libs import logging import json +import time log = logging.getLogger(__name__) @@ -161,48 +162,53 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None): ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - try: - lb = conn.get_all_load_balancers(load_balancer_names=[name]) - lb = lb[0] - ret = {} - ret['availability_zones'] = lb.availability_zones - listeners = [] - for _listener in lb.listeners: - listener_dict = {} - listener_dict['elb_port'] = _listener.load_balancer_port - listener_dict['elb_protocol'] = _listener.protocol - listener_dict['instance_port'] = _listener.instance_port - listener_dict['instance_protocol'] = _listener.instance_protocol - listener_dict['policies'] = _listener.policy_names - if _listener.ssl_certificate_id: - listener_dict['certificate'] = _listener.ssl_certificate_id - listeners.append(listener_dict) - ret['listeners'] = listeners - backends = [] - for _backend in lb.backends: - bs_dict = {} - bs_dict['instance_port'] = _backend.instance_port - bs_dict['policies'] = [p.policy_name for p in _backend.policies] - backends.append(bs_dict) - ret['backends'] = backends - ret['subnets'] = lb.subnets - ret['security_groups'] = lb.security_groups - ret['scheme'] = lb.scheme - ret['dns_name'] = lb.dns_name - ret['tags'] = _get_all_tags(conn, name) - lb_policy_lists = [ - lb.policies.app_cookie_stickiness_policies, - lb.policies.lb_cookie_stickiness_policies, - lb.policies.other_policies - ] - policies = [] - for policy_list in lb_policy_lists: - policies += [p.policy_name for p in policy_list] - ret['policies'] = policies - return ret - except boto.exception.BotoServerError as error: - log.debug(error) - return {} + while True: + try: + lb = conn.get_all_load_balancers(load_balancer_names=[name]) + lb = lb[0] + ret = {} + ret['availability_zones'] = lb.availability_zones + listeners = [] + for _listener in lb.listeners: + listener_dict = {} + listener_dict['elb_port'] = _listener.load_balancer_port + listener_dict['elb_protocol'] = _listener.protocol + listener_dict['instance_port'] = _listener.instance_port + listener_dict['instance_protocol'] = _listener.instance_protocol + listener_dict['policies'] = _listener.policy_names + if _listener.ssl_certificate_id: + listener_dict['certificate'] = _listener.ssl_certificate_id + listeners.append(listener_dict) + ret['listeners'] = listeners + backends = [] + for _backend in lb.backends: + bs_dict = {} + bs_dict['instance_port'] = _backend.instance_port + bs_dict['policies'] = [p.policy_name for p in _backend.policies] + backends.append(bs_dict) + ret['backends'] = backends + ret['subnets'] = lb.subnets + ret['security_groups'] = lb.security_groups + ret['scheme'] = lb.scheme + ret['dns_name'] = lb.dns_name + ret['tags'] = _get_all_tags(conn, name) + lb_policy_lists = [ + lb.policies.app_cookie_stickiness_policies, + lb.policies.lb_cookie_stickiness_policies, + lb.policies.other_policies + ] + policies = [] + for policy_list in lb_policy_lists: + policies += [p.policy_name for p in policy_list] + ret['policies'] = policies + return ret + except boto.exception.BotoServerError as error: + if getattr(error, 'error_code', '') == 'Throttling': + log.info('Throttled by AWS API, will retry in 5 seconds.') + time.sleep(5) + continue + log.error(error) + return {} def listener_dict_to_tuple(listener): diff --git a/salt/states/boto_elb.py b/salt/states/boto_elb.py index 0b9357f79d..6163c09dfd 100644 --- a/salt/states/boto_elb.py +++ b/salt/states/boto_elb.py @@ -1312,7 +1312,7 @@ def _tags_present(name, tags, region, key, keyid, profile): tags_to_add = tags tags_to_update = {} tags_to_remove = [] - if lb['tags']: + if lb.get('tags'): for _tag in lb['tags']: if _tag not in tags.keys(): if _tag not in tags_to_remove: From e7ab96183a0fb774fa6e4164391cce21395731b9 Mon Sep 17 00:00:00 2001 From: Pablo Hernandez Date: Wed, 23 Aug 2017 19:07:30 -0400 Subject: [PATCH 174/639] Added handling for python3 compatibility --- salt/states/docker_image.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/docker_image.py b/salt/states/docker_image.py index f396a605c5..26751f0205 100644 --- a/salt/states/docker_image.py +++ b/salt/states/docker_image.py @@ -41,6 +41,7 @@ import logging # Import salt libs import salt.utils.docker import salt.utils.args +from salt.ext.six.moves import zip # Enable proper logging log = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -206,7 +207,7 @@ def present(name, # get the functions default value and args argspec = salt.utils.args.get_function_argspec(__salt__['docker.build']) # Map any if existing args from kwargs into the build_args dictionary - build_args = dict(zip(argspec.args, argspec.defaults)) + build_args = dict(list(zip(argspec.args, argspec.defaults))) for k, v in build_args.items(): if k in kwargs.get('kwargs', {}): build_args[k] = kwargs.get('kwargs', {}).get(k) From d8612ae0063af82f24dba94aa1e8d9139810694d Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Tue, 22 Aug 2017 11:48:08 +0200 Subject: [PATCH 175/639] fix debootstrap and enhance packages selection/deletion via cmdline --- salt/modules/genesis.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index 5680f00829..ba0a0a399a 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -24,6 +24,8 @@ import salt.utils.kickstart import salt.syspaths from salt.exceptions import SaltInvocationError +# Import 3rd-party libs +from salt.ext import six log = logging.getLogger(__name__) @@ -178,9 +180,13 @@ def bootstrap( if pkgs is None: pkgs = [] + elif isinstance(pkgs, six.string_types): + pkgs = pkgs.split(',') if exclude_pkgs is None: exclude_pkgs = [] + elif isinstance(exclude_pkgs, six.string_types): + exclude_pkgs = exclude_pkgs.split(',') if platform in ('rpm', 'yum'): _bootstrap_yum( @@ -393,15 +399,22 @@ def _bootstrap_deb( if repo_url is None: repo_url = 'http://ftp.debian.org/debian/' + if not salt.utils.which('debootstrap'): + log.error('Required tool debootstrap is not installed.') + return False + deb_args = [ 'debootstrap', '--foreign', '--arch', - _cmd_quote(arch), - '--include', - ] + pkgs + [ - '--exclude', - ] + exclude_pkgs + [ + _cmd_quote(arch)] + + if pkgs: + deb_args += ['--include'] + pkgs + if exclude_pkgs: + deb_args += ['--exclude'] + exclude_pkgs + + deb_args += [ _cmd_quote(flavor), _cmd_quote(root), _cmd_quote(repo_url), From 216ced69e56bca31db2f24fe703387e5da9664f9 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Thu, 24 Aug 2017 12:00:36 +0200 Subject: [PATCH 176/639] allow comma-separated pkgs lists, quote args, test deb behaviour --- salt/modules/genesis.py | 21 +++++++---- tests/unit/modules/genesis_test.py | 57 ++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 9 deletions(-) diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index ba0a0a399a..eceaba5bb5 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -180,13 +180,9 @@ def bootstrap( if pkgs is None: pkgs = [] - elif isinstance(pkgs, six.string_types): - pkgs = pkgs.split(',') if exclude_pkgs is None: exclude_pkgs = [] - elif isinstance(exclude_pkgs, six.string_types): - exclude_pkgs = exclude_pkgs.split(',') if platform in ('rpm', 'yum'): _bootstrap_yum( @@ -331,6 +327,8 @@ def _bootstrap_yum( ''' if pkgs is None: pkgs = [] + elif isinstance(pkgs, six.string_types): + pkgs = pkgs.split(',') default_pkgs = ('yum', 'centos-release', 'iputils') for pkg in default_pkgs: @@ -339,6 +337,8 @@ def _bootstrap_yum( if exclude_pkgs is None: exclude_pkgs = [] + elif isinstance(exclude_pkgs, six.string_types): + exclude_pkgs = exclude_pkgs.split(',') for pkg in exclude_pkgs: pkgs.remove(pkg) @@ -403,6 +403,11 @@ def _bootstrap_deb( log.error('Required tool debootstrap is not installed.') return False + if isinstance(pkgs, (list, tuple)): + pkgs = ','.join(pkgs) + if isinstance(exclude_pkgs, (list, tuple)): + exclude_pkgs = ','.join(exclude_pkgs) + deb_args = [ 'debootstrap', '--foreign', @@ -410,9 +415,9 @@ def _bootstrap_deb( _cmd_quote(arch)] if pkgs: - deb_args += ['--include'] + pkgs + deb_args += ['--include', _cmd_quote(pkgs)] if exclude_pkgs: - deb_args += ['--exclude'] + exclude_pkgs + deb_args += ['--exclude', _cmd_quote(exclude_pkgs)] deb_args += [ _cmd_quote(flavor), @@ -482,6 +487,8 @@ def _bootstrap_pacman( if pkgs is None: pkgs = [] + elif isinstance(pkgs, six.string_types): + pkgs = pkgs.split(',') default_pkgs = ('pacman', 'linux', 'systemd-sysvcompat', 'grub') for pkg in default_pkgs: @@ -490,6 +497,8 @@ def _bootstrap_pacman( if exclude_pkgs is None: exclude_pkgs = [] + elif isinstance(exclude_pkgs, six.string_types): + exclude_pkgs = exclude_pkgs.split(',') for pkg in exclude_pkgs: pkgs.remove(pkg) diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index cbee47f09d..bab16d3f1e 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -49,12 +49,63 @@ class GenesisTestCase(TestCase): with patch.dict(genesis.__salt__, {'disk.blkid': MagicMock(return_value={})}): self.assertEqual(genesis.bootstrap('rpm', 'root', 'dir'), None) - with patch.object(genesis, '_bootstrap_deb', return_value='A'): + common_parms = {'platform': 'deb', + 'root': 'root', + 'img_format': 'dir', + 'arch': 'amd64', + 'flavor': 'stable', + 'static_qemu': 'qemu'} + + param_sets = [ + + {'params': {}, + 'commandlines': [ + ['debootstrap', '--foreign', '--arch', 'amd64', + 'stable', 'root', 'http://ftp.debian.org/debian/'], + ]}, + + {'params': {'pkgs': 'vim'}, + 'commandlines': [ + ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim', + 'stable', 'root', 'http://ftp.debian.org/debian/'], + ]}, + + {'params': {'pkgs': 'vim,emacs'}, + 'commandlines': [ + ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'], + ]}, + + {'params': {'pkgs': ['vim', 'emacs']}, + 'commandlines': [ + ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'], + ]}, + + {'params': {'pkgs': ['vim', 'emacs'], 'exclude_pkgs': ['vim', 'foo']}, + 'commandlines': [ + ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', '--exclude', 'vim,foo', + 'stable', 'root', 'http://ftp.debian.org/debian/'], + ]}, + + ] + + for param_set in param_sets: + with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), 'file.rmdir': MagicMock(), - 'file.directory_exists': MagicMock()}): + 'file.directory_exists': MagicMock(), + 'cmd.run': MagicMock()}): with patch.dict(genesis.__salt__, {'disk.blkid': MagicMock(return_value={})}): - self.assertEqual(genesis.bootstrap('deb', 'root', 'dir'), None) + param_set['params'].update(common_parms) + self.assertEqual(genesis.bootstrap(**param_set['params']), + None) + for commandline in param_set['commandlines']: + genesis.__salt__['cmd.run'].assert_any_call(commandline, python_shell=False) with patch.object(genesis, '_bootstrap_pacman', return_value='A') as pacman_patch: with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), From db11e1985b2e3f53ce258e2a266455584d4b5e12 Mon Sep 17 00:00:00 2001 From: SuperPommeDeTerre Date: Thu, 17 Aug 2017 11:56:23 +0200 Subject: [PATCH 177/639] Fix for #26995 --- salt/modules/artifactory.py | 77 +++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index d521e786f3..169ffaff8b 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -202,45 +202,48 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio has_classifier = classifier is not None and classifier != "" if snapshot_version is None: - snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + try: + snapshot_version_metadata = _get_snapshot_version_metadata(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) + if packaging not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if packaging not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested packaging '{packaging}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) + if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: + error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. + artifactory_url: {artifactory_url} + repository: {repository} + group_id: {group_id} + artifact_id: {artifact_id} + packaging: {packaging} + classifier: {classifier} + version: {version}'''.format( + artifactory_url=artifactory_url, + repository=repository, + group_id=group_id, + artifact_id=artifact_id, + packaging=packaging, + classifier=classifier, + version=version) + raise ArtifactoryError(error_message) - if has_classifier and classifier not in snapshot_version_metadata['snapshot_versions']: - error_message = '''Cannot find requested classifier '{classifier}' in the snapshot version metadata. - artifactory_url: {artifactory_url} - repository: {repository} - group_id: {group_id} - artifact_id: {artifact_id} - packaging: {packaging} - classifier: {classifier} - version: {version}'''.format( - artifactory_url=artifactory_url, - repository=repository, - group_id=group_id, - artifact_id=artifact_id, - packaging=packaging, - classifier=classifier, - version=version) - raise ArtifactoryError(error_message) - - snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] + except CommandExecutionError as err: + log.error('Could not fetch maven-metadat.xml. Assuming snapshot_version=%s.', version) + snapshot_version = version group_url = __get_group_id_subpath(group_id) From e314102978a842efe9b99f629e7015e9a3e844d1 Mon Sep 17 00:00:00 2001 From: SuperPommeDeTerre Date: Thu, 17 Aug 2017 13:28:10 +0200 Subject: [PATCH 178/639] Fix typo. --- salt/modules/artifactory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/artifactory.py b/salt/modules/artifactory.py index 169ffaff8b..26065a8d37 100644 --- a/salt/modules/artifactory.py +++ b/salt/modules/artifactory.py @@ -242,7 +242,7 @@ def _get_snapshot_url(artifactory_url, repository, group_id, artifact_id, versio snapshot_version = snapshot_version_metadata['snapshot_versions'][packaging] except CommandExecutionError as err: - log.error('Could not fetch maven-metadat.xml. Assuming snapshot_version=%s.', version) + log.error('Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.', version) snapshot_version = version group_url = __get_group_id_subpath(group_id) From 13e5997457d3bcf592f1b32149d297c09437a521 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Thu, 24 Aug 2017 12:49:39 +0200 Subject: [PATCH 179/639] lint --- tests/unit/modules/genesis_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index bab16d3f1e..44325ab711 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -66,14 +66,14 @@ class GenesisTestCase(TestCase): {'params': {'pkgs': 'vim'}, 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', + ['debootstrap', '--foreign', '--arch', 'amd64', '--include', 'vim', 'stable', 'root', 'http://ftp.debian.org/debian/'], ]}, {'params': {'pkgs': 'vim,emacs'}, 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', + ['debootstrap', '--foreign', '--arch', 'amd64', '--include', 'vim,emacs', 'stable', 'root', 'http://ftp.debian.org/debian/'], ]}, From 3634055e3462edf9b4c0fabc377c0288e9f3a15c Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 24 Aug 2017 11:05:32 +0000 Subject: [PATCH 180/639] Improve napalm state output in debug mode --- salt/utils/napalm.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index 0ca387cc68..721aa43d95 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -435,10 +435,8 @@ def default_ret(name): def loaded_ret(ret, loaded, test, debug): ''' Return the final state output. - ret The initial state output structure. - loaded The loaded dictionary. ''' @@ -447,9 +445,6 @@ def loaded_ret(ret, loaded, test, debug): 'comment': loaded.get('comment', '') }) pchanges = {} - if not loaded.get('result', False): - # Failure of some sort - return ret if debug: # Always check for debug pchanges.update({ @@ -458,6 +453,15 @@ def loaded_ret(ret, loaded, test, debug): ret.update({ "pchanges": pchanges }) + if not loaded.get('result', False): + # Failure of some sort + if debug: + ret['comment'] = '{base_err}\n\nLoaded config:\n\n{loaded_cfg}'.format(base_err=ret['comment'], + loaded_cfg=loaded['loaded_config']) + if loaded.get('diff'): + ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], + diff=loaded['diff']) + return ret if not loaded.get('already_configured', True): # We're making changes pchanges.update({ @@ -484,6 +488,7 @@ def loaded_ret(ret, loaded, test, debug): return ret # No changes ret.update({ - 'result': True + 'result': True, + 'changes': {} }) return ret From 3a906109bd9398f403fe0acf07f78c216a7ece2a Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 24 Aug 2017 12:26:46 +0000 Subject: [PATCH 181/639] Include compliance reports For the netyang state, the compliance report will be included by default in the pchanges dictionary. It can be also returned in the comment field, by enabling using the `compliance_report` option. --- salt/states/netyang.py | 10 +++++++++- salt/utils/napalm.py | 19 ++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/salt/states/netyang.py b/salt/states/netyang.py index e39b09b3b9..b116cb8f6a 100644 --- a/salt/states/netyang.py +++ b/salt/states/netyang.py @@ -38,6 +38,7 @@ except ImportError: HAS_NAPALM_YANG = False # Import salt modules +import salt.output from salt.utils import fopen import salt.utils.napalm @@ -140,6 +141,7 @@ def managed(name, debug = kwargs.get('debug', False) or __opts__.get('debug', False) commit = kwargs.get('commit', True) or __opts__.get('commit', True) replace = kwargs.get('replace', False) or __opts__.get('replace', False) + return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False) profiles = kwargs.get('profiles', []) temp_file = __salt__['temp.file']() log.debug('Creating temp file: {0}'.format(temp_file)) @@ -180,7 +182,13 @@ def managed(name, log.debug('Loaded config result:') log.debug(loaded_changes) __salt__['file.remove'](temp_file) - return salt.utils.napalm.loaded_ret(ret, loaded_changes, test, debug) + loaded_changes['compliance_report'] = compliance_report + return salt.utils.napalm.loaded_ret(ret, + loaded_changes, + test, + debug, + opts=__opts__, + compliance_report=return_compliance_report) def configured(name, diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index 721aa43d95..abaf731ffa 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -432,7 +432,7 @@ def default_ret(name): return ret -def loaded_ret(ret, loaded, test, debug): +def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): ''' Return the final state output. ret @@ -445,6 +445,8 @@ def loaded_ret(ret, loaded, test, debug): 'comment': loaded.get('comment', '') }) pchanges = {} + if 'compliance_report' in loaded: + pchanges['compliance_report'] = loaded['compliance_report'] if debug: # Always check for debug pchanges.update({ @@ -471,10 +473,17 @@ def loaded_ret(ret, loaded, test, debug): 'pchanges': pchanges }) if test: - for k, v in pchanges.items(): - ret.update({ - "comment": "{}:\n{}\n\n{}".format(k, v, ret.get("comment", '')) - }) + if pchanges.get('diff'): + ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], + diff=pchanges['diff']) + if pchanges.get('loaded_config'): + ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format( + comment_base=ret['comment'], + loaded_cfg=pchanges['loaded_config']) + if compliance_report and pchanges.get('compliance_report'): + ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format( + comment_base=ret['comment'], + compliance=salt.output.string_format(pchanges['compliance_report'], 'nested', opts=opts)) ret.update({ 'result': None, }) From 0bbea6b04c58bad3dcbac707f64c997804aa02f4 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 24 Aug 2017 12:33:27 +0000 Subject: [PATCH 182/639] Document the new compliance_report arg --- salt/states/netyang.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/states/netyang.py b/salt/states/netyang.py index b116cb8f6a..e47f4a9830 100644 --- a/salt/states/netyang.py +++ b/salt/states/netyang.py @@ -93,6 +93,13 @@ def managed(name, Use certain profiles to generate the config. If not specified, will use the platform default profile(s). + compliance_report: ``False`` + Return the compliance report in the comment. + The compliance report structured object can be found however + in the ``pchanges`` field of the output (not displayed on the CLI). + + .. versionadded:: 2017.7.3 + test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit From 1cd33cbaa97fdc9eb1931df73a051716d4431325 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 24 Aug 2017 13:00:15 +0000 Subject: [PATCH 183/639] Simplify the loaded_ret logic --- salt/utils/napalm.py | 64 +++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 39 deletions(-) diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index abaf731ffa..dc7443f088 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -441,58 +441,44 @@ def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): The loaded dictionary. ''' # Always get the comment - ret.update({ - 'comment': loaded.get('comment', '') - }) + changes = {} pchanges = {} + ret['comment'] = loaded['comment'] + if 'diff' in loaded: + changes['diff'] = loaded['diff'] + pchanges['diff'] = loaded['diff'] if 'compliance_report' in loaded: + if compliance_report: + changes['compliance_report'] = loaded['compliance_report'] pchanges['compliance_report'] = loaded['compliance_report'] - if debug: - # Always check for debug - pchanges.update({ - 'loaded_config': loaded.get('loaded_config', '') - }) - ret.update({ - "pchanges": pchanges - }) + if debug and 'loaded_config' in loaded: + changes['loaded_config'] = loaded['loaded_config'] + pchanges['loaded_config'] = loaded['loaded_config'] + ret['pchanges']= pchanges + if changes.get('diff'): + ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], + diff=pchanges['diff']) + if changes.get('loaded_config'): + ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format( + comment_base=ret['comment'], + loaded_cfg=changes['loaded_config']) + if changes.get('compliance_report'): + ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format( + comment_base=ret['comment'], + compliance=salt.output.string_format(changes['compliance_report'], 'nested', opts=opts)) if not loaded.get('result', False): # Failure of some sort - if debug: - ret['comment'] = '{base_err}\n\nLoaded config:\n\n{loaded_cfg}'.format(base_err=ret['comment'], - loaded_cfg=loaded['loaded_config']) - if loaded.get('diff'): - ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], - diff=loaded['diff']) return ret if not loaded.get('already_configured', True): # We're making changes - pchanges.update({ - "diff": loaded.get('diff', '') - }) - ret.update({ - 'pchanges': pchanges - }) if test: - if pchanges.get('diff'): - ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], - diff=pchanges['diff']) - if pchanges.get('loaded_config'): - ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format( - comment_base=ret['comment'], - loaded_cfg=pchanges['loaded_config']) - if compliance_report and pchanges.get('compliance_report'): - ret['comment'] = '{comment_base}\n\nCompliance report:\n\n{compliance}'.format( - comment_base=ret['comment'], - compliance=salt.output.string_format(pchanges['compliance_report'], 'nested', opts=opts)) - ret.update({ - 'result': None, - }) + ret['result'] = None return ret # Not test, changes were applied ret.update({ 'result': True, - 'changes': pchanges, - 'comment': "Configuration changed!\n{}".format(ret.get('comment', '')) + 'changes': changes, + 'comment': "Configuration changed!\n{}".format(loaded['comment']) }) return ret # No changes From e5cc667762616446383a0a454b5b6fde74ddb8dc Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Thu, 24 Aug 2017 15:04:37 +0200 Subject: [PATCH 184/639] tests: fix a leftover and simplify some parts --- tests/unit/modules/genesis_test.py | 54 +++++++++++++----------------- 1 file changed, 24 insertions(+), 30 deletions(-) diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index 44325ab711..31dccabad1 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -59,38 +59,33 @@ class GenesisTestCase(TestCase): param_sets = [ {'params': {}, - 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', - 'stable', 'root', 'http://ftp.debian.org/debian/'], - ]}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, {'params': {'pkgs': 'vim'}, - 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim', - 'stable', 'root', 'http://ftp.debian.org/debian/'], - ]}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, {'params': {'pkgs': 'vim,emacs'}, - 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', - 'stable', 'root', 'http://ftp.debian.org/debian/'], - ]}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, {'params': {'pkgs': ['vim', 'emacs']}, - 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', - 'stable', 'root', 'http://ftp.debian.org/debian/'], - ]}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, {'params': {'pkgs': ['vim', 'emacs'], 'exclude_pkgs': ['vim', 'foo']}, - 'commandlines': [ - ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', '--exclude', 'vim,foo', - 'stable', 'root', 'http://ftp.debian.org/debian/'], - ]}, + 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', + '--include', 'vim,emacs', '--exclude', 'vim,foo', + 'stable', 'root', 'http://ftp.debian.org/debian/'] + }, ] @@ -99,13 +94,12 @@ class GenesisTestCase(TestCase): with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), 'file.rmdir': MagicMock(), 'file.directory_exists': MagicMock(), - 'cmd.run': MagicMock()}): - with patch.dict(genesis.__salt__, {'disk.blkid': MagicMock(return_value={})}): + 'cmd.run': MagicMock(), + 'disk.blkid': MagicMock(return_value={})}): + with patch('salt.modules.genesis.salt.utils.which', return_value=True): param_set['params'].update(common_parms) - self.assertEqual(genesis.bootstrap(**param_set['params']), - None) - for commandline in param_set['commandlines']: - genesis.__salt__['cmd.run'].assert_any_call(commandline, python_shell=False) + self.assertEqual(genesis.bootstrap(**param_set['params']), None) + genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False) with patch.object(genesis, '_bootstrap_pacman', return_value='A') as pacman_patch: with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), From db94f3bb1c4806f474f33ad7c4d067770e1642a0 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Thu, 24 Aug 2017 15:07:08 +0200 Subject: [PATCH 185/639] better formatting --- tests/unit/modules/genesis_test.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index 31dccabad1..784bb8ad84 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -60,31 +60,31 @@ class GenesisTestCase(TestCase): {'params': {}, 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', - 'stable', 'root', 'http://ftp.debian.org/debian/'] + 'stable', 'root', 'http://ftp.debian.org/debian/'] }, {'params': {'pkgs': 'vim'}, 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim', - 'stable', 'root', 'http://ftp.debian.org/debian/'] + '--include', 'vim', + 'stable', 'root', 'http://ftp.debian.org/debian/'] }, {'params': {'pkgs': 'vim,emacs'}, 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', - 'stable', 'root', 'http://ftp.debian.org/debian/'] + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] }, {'params': {'pkgs': ['vim', 'emacs']}, 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', - 'stable', 'root', 'http://ftp.debian.org/debian/'] + '--include', 'vim,emacs', + 'stable', 'root', 'http://ftp.debian.org/debian/'] }, {'params': {'pkgs': ['vim', 'emacs'], 'exclude_pkgs': ['vim', 'foo']}, 'cmd': ['debootstrap', '--foreign', '--arch', 'amd64', - '--include', 'vim,emacs', '--exclude', 'vim,foo', - 'stable', 'root', 'http://ftp.debian.org/debian/'] + '--include', 'vim,emacs', '--exclude', 'vim,foo', + 'stable', 'root', 'http://ftp.debian.org/debian/'] }, ] From c10717dc898f26478a1ac219d9394f88371b0f9d Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 24 Aug 2017 13:20:45 +0000 Subject: [PATCH 186/639] Lint and fix --- salt/states/netyang.py | 1 - salt/utils/napalm.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/states/netyang.py b/salt/states/netyang.py index e47f4a9830..2db6208f57 100644 --- a/salt/states/netyang.py +++ b/salt/states/netyang.py @@ -38,7 +38,6 @@ except ImportError: HAS_NAPALM_YANG = False # Import salt modules -import salt.output from salt.utils import fopen import salt.utils.napalm diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index dc7443f088..0523e0a568 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -24,6 +24,7 @@ from functools import wraps log = logging.getLogger(__file__) import salt.utils +import salt.output # Import third party lib try: @@ -454,10 +455,10 @@ def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): if debug and 'loaded_config' in loaded: changes['loaded_config'] = loaded['loaded_config'] pchanges['loaded_config'] = loaded['loaded_config'] - ret['pchanges']= pchanges + ret['pchanges'] = pchanges if changes.get('diff'): ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], - diff=pchanges['diff']) + diff=changes['diff']) if changes.get('loaded_config'): ret['comment'] = '{comment_base}\n\nLoaded config:\n\n{loaded_cfg}'.format( comment_base=ret['comment'], From d010b74b8779b3b627761e1e6ee4c439d0922a11 Mon Sep 17 00:00:00 2001 From: Darren Demicoli Date: Fri, 18 Aug 2017 01:36:51 +0200 Subject: [PATCH 187/639] Do not try to match pillarenv with __env__ --- salt/pillar/git_pillar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py index 8b85d14ab3..ae1d337387 100644 --- a/salt/pillar/git_pillar.py +++ b/salt/pillar/git_pillar.py @@ -561,7 +561,7 @@ def ext_pillar(minion_id, repo, pillar_dirs): ) for pillar_dir, env in six.iteritems(pillar.pillar_dirs): # If pillarenv is set, only grab pillars with that match pillarenv - if opts['pillarenv'] and env != opts['pillarenv']: + if opts['pillarenv'] and env != opts['pillarenv'] and env != '__env__': log.debug( 'env \'%s\' for pillar dir \'%s\' does not match ' 'pillarenv \'%s\', skipping', From 999aa63d3d93556d12d5f38c3391724afcb0e6d4 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 10:52:24 -0500 Subject: [PATCH 188/639] Improve pkgrepo docs Also deprecate the refresh_db argument in aptpkg.mod_repo (and the pkgrepo states), in favor of the widely used "refresh" argument. --- salt/modules/aptpkg.py | 71 ++++++++++++++++++++++++------------------ salt/states/pkgrepo.py | 49 +++++++++++++++++++---------- 2 files changed, 74 insertions(+), 46 deletions(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index c5f9d33650..8d5d674306 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -2147,44 +2147,44 @@ def mod_repo(repo, saltenv='base', **kwargs): The following options are available to modify a repo definition: - architectures - a comma separated list of supported architectures, e.g. ``amd64`` - If this option is not set, all architectures (configured in the - system) will be used. + architectures + A comma-separated list of supported architectures, e.g. ``amd64`` If + this option is not set, all architectures (configured in the system) + will be used. - comps - a comma separated list of components for the repo, e.g. ``main`` + comps + A comma separated list of components for the repo, e.g. ``main`` - file - a file name to be used + file + A file name to be used - keyserver - keyserver to get gpg key from + keyserver + Keyserver to get gpg key from - keyid - key id to load with the keyserver argument + keyid + Key ID to load with the ``keyserver`` argument - key_url - URL to a GPG key to add to the APT GPG keyring + key_url + URL to a GPG key to add to the APT GPG keyring - key_text - GPG key in string form to add to the APT GPG keyring + key_text + GPG key in string form to add to the APT GPG keyring - consolidate - if ``True``, will attempt to de-dup and consolidate sources + consolidate : False + If ``True``, will attempt to de-duplicate and consolidate sources - comments - Sometimes you want to supply additional information, but not as - enabled configuration. All comments provided here will be joined - into a single string and appended to the repo configuration with a - comment marker (#) before it. + comments + Sometimes you want to supply additional information, but not as + enabled configuration. All comments provided here will be joined + into a single string and appended to the repo configuration with a + comment marker (#) before it. - .. versionadded:: 2015.8.9 + .. versionadded:: 2015.8.9 - .. note:: Due to the way keys are stored for APT, there is a known issue - where the key won't be updated unless another change is made - at the same time. Keys should be properly added on initial - configuration. + .. note:: + Due to the way keys are stored for APT, there is a known issue where + the key won't be updated unless another change is made at the same + time. Keys should be properly added on initial configuration. CLI Examples: @@ -2193,6 +2193,17 @@ def mod_repo(repo, saltenv='base', **kwargs): salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe ''' + if 'refresh_db' in kwargs: + salt.utils.versions.warn_until( + 'Neon', + 'The \'refresh_db\' argument to \'pkg.mod_repo\' has been ' + 'renamed to \'refresh\'. Support for using \'refresh_db\' will be ' + 'removed in the Neon release of Salt.' + ) + refresh = kwargs['refresh_db'] + else: + refresh = kwargs.get('refresh', True) + _check_apt() # to ensure no one sets some key values that _shouldn't_ be changed on the # object itself, this is just a white-list of "ok" to set properties @@ -2225,7 +2236,7 @@ def mod_repo(repo, saltenv='base', **kwargs): ) ) # explicit refresh when a repo is modified. - if kwargs.get('refresh_db', True): + if refresh: refresh_db() return {repo: out} else: @@ -2429,7 +2440,7 @@ def mod_repo(repo, saltenv='base', **kwargs): setattr(mod_source, key, kwargs[key]) sources.save() # on changes, explicitly refresh - if kwargs.get('refresh_db', True): + if refresh: refresh_db() return { repo: { diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py index 1a8e9efb30..0c47abae1d 100644 --- a/salt/states/pkgrepo.py +++ b/salt/states/pkgrepo.py @@ -97,6 +97,7 @@ import salt.utils import salt.utils.files import salt.utils.pkg.deb import salt.utils.pkg.rpm +import salt.utils.versions def __virtual__(): @@ -132,7 +133,7 @@ def managed(name, ppa=None, **kwargs): disabled : False Included to reduce confusion due to APT's use of the ``disabled`` - argument. If this is passed for a yum/dnf/zypper-based distro, then the + argument. If this is passed for a YUM/DNF/Zypper-based distro, then the reverse will be passed as ``enabled``. For example passing ``disabled=True`` will assume ``enabled=False``. @@ -151,7 +152,7 @@ def managed(name, ppa=None, **kwargs): enabled configuration. Anything supplied for this list will be saved in the repo configuration with a comment marker (#) in front. - Additional configuration values seen in yum repo files, such as ``gpgkey`` or + Additional configuration values seen in repo files, such as ``gpgkey`` or ``gpgcheck``, will be used directly as key-value pairs. For example: .. code-block:: yaml @@ -258,29 +259,45 @@ def managed(name, ppa=None, **kwargs): Use either ``keyid``/``keyserver`` or ``key_url``, but not both. - consolidate - If set to true, this will consolidate all sources definitions to - the sources.list file, cleanup the now unused files, consolidate - components (e.g. main) for the same URI, type, and architecture - to a single line, and finally remove comments from the sources.list - file. The consolidate will run every time the state is processed. The - option only needs to be set on one repo managed by salt to take effect. + consolidate : False + If set to ``True``, this will consolidate all sources definitions to the + sources.list file, cleanup the now unused files, consolidate components + (e.g. main) for the same URI, type, and architecture to a single line, + and finally remove comments from the sources.list file. The consolidate + will run every time the state is processed. The option only needs to be + set on one repo managed by salt to take effect. - clean_file - If set to true, empty file before config repo, dangerous if use - multiple sources in one file. + clean_file : False + If set to ``True``, empty the file before config repo + + .. note:: + Use with care. This can be dangerous if multiple sources are + configured in the same file. .. versionadded:: 2015.8.0 - refresh_db - If set to false this will skip refreshing the apt package database on - debian based systems. + refresh : True + If set to ``False`` this will skip refreshing the apt package database + on debian based systems. + + refresh_db : True + .. deprecated:: Oxygen + Use ``refresh`` instead. require_in Set this to a list of pkg.installed or pkg.latest to trigger the running of apt-get update prior to attempting to install these - packages. Setting a require in the pkg will not work for this. + packages. Setting a require in the pkg state will not work for this. ''' + if 'refresh_db' in kwargs: + salt.utils.versions.warn_until( + 'Neon', + 'The \'refresh_db\' argument to \'pkg.mod_repo\' has been ' + 'renamed to \'refresh\'. Support for using \'refresh_db\' will be ' + 'removed in the Neon release of Salt.' + ) + kwargs['refresh'] = kwargs.pop('refresh_db') + ret = {'name': name, 'changes': {}, 'result': None, From 7b5943a31a62910097d06a72d046053f723fc6c1 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 11:04:31 -0500 Subject: [PATCH 189/639] Add warning about adding new functions to salt/utils/__init__.py Since we are breaking this up, we don't want new functions added here. --- salt/utils/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 35e9172a5a..41f06fbb9a 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -1,6 +1,11 @@ # -*- coding: utf-8 -*- ''' Some of the utils used by salt + +NOTE: The dev team is working on splitting up this file for the Oxygen release. +Please do not add any new functions to this file. New functions should be +organized in other files under salt/utils/. Please consult the dev team if you +are unsure where a new function should go. ''' # Import python libs From 5385c7901edc646ae90cd9f852e1ed82dd546a40 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 24 Aug 2017 12:07:44 -0400 Subject: [PATCH 190/639] Move new utils/__init__.py funcs to utils.files.py We're in the process of breaking up the utils/__init__.py file, so let's steer clear of adding new funcs here. This way we can avoid a deprecation process for these functions in develop, since the funcs in this location were never released. Refs #43056 --- salt/utils/__init__.py | 34 ---------------------------------- salt/utils/files.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index c03e0a6d39..ec018f1ad7 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -16,7 +16,6 @@ import json import logging import numbers import os -import os.path import posixpath import random import re @@ -33,7 +32,6 @@ import warnings import string import subprocess import getpass -import urllib # Import 3rd-party libs from salt.ext import six @@ -165,38 +163,6 @@ def is_empty(filename): return False -def safe_filename_leaf(file_basename): - ''' - input the basename of a file, without the directory tree, and returns a safe name to use - i.e. only the required characters are converted by urllib.quote - If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. - For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible - windows is \\ / : * ? " < > | posix is / - ''' - def _replace(re_obj): - return urllib.quote(re_obj.group(0), safe=u'') - if not isinstance(file_basename, six.text_type): - # the following string is not prefixed with u - return re.sub('[\\\\:/*?"<>|]', - _replace, - six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) - # the following string is prefixed with u - return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) - - -def safe_filepath(file_path_name): - ''' - input the full path and filename, splits on directory separator and calls safe_filename_leaf for - each part of the path. - ''' - (drive, path) = os.path.splitdrive(file_path_name) - path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) - if drive: - return os.sep.join([drive, path]) - else: - return path - - def is_hex(value): ''' Returns True if value is a hexidecimal string, otherwise returns False diff --git a/salt/utils/files.py b/salt/utils/files.py index d4893608a2..8d463756d9 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -7,9 +7,11 @@ import contextlib import errno import logging import os +import re import shutil import subprocess import time +import urllib # Import salt libs import salt.utils @@ -258,3 +260,39 @@ def set_umask(mask): yield finally: os.umask(orig_mask) + + +def safe_filename_leaf(file_basename): + ''' + Input the basename of a file, without the directory tree, and returns a safe name to use + i.e. only the required characters are converted by urllib.quote + If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode. + For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible + windows is \\ / : * ? " < > | posix is / + + .. versionadded:: 2017.7.2 + ''' + def _replace(re_obj): + return urllib.quote(re_obj.group(0), safe=u'') + if not isinstance(file_basename, six.text_type): + # the following string is not prefixed with u + return re.sub('[\\\\:/*?"<>|]', + _replace, + six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace')) + # the following string is prefixed with u + return re.sub(u'[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE) + + +def safe_filepath(file_path_name): + ''' + Input the full path and filename, splits on directory separator and calls safe_filename_leaf for + each part of the path. + + .. versionadded:: 2017.7.2 + ''' + (drive, path) = os.path.splitdrive(file_path_name) + path = os.sep.join([safe_filename_leaf(file_section) for file_section in file_path_name.rsplit(os.sep)]) + if drive: + return os.sep.join([drive, path]) + else: + return path From 1b24244bd32149dec40644771b670538b546bed4 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 24 Aug 2017 12:51:00 -0400 Subject: [PATCH 191/639] Add New Release Branch Strategy to Contribution Docs --- doc/topics/development/contributing.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/topics/development/contributing.rst b/doc/topics/development/contributing.rst index 57e3b3c677..fd21d86a23 100644 --- a/doc/topics/development/contributing.rst +++ b/doc/topics/development/contributing.rst @@ -260,6 +260,13 @@ The Salt development team will back-port bug fixes made to ``develop`` to the current release branch if the contributor cannot create the pull request against that branch. +Release Branches +---------------- + +For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases. + +Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well. + Keeping Salt Forks in Sync ========================== From 9d97ea30e7010088c61c57cef71451f5627997fc Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Thu, 24 Aug 2017 13:17:27 -0400 Subject: [PATCH 192/639] INFRA-5461 - add time constraint to loop --- salt/modules/boto_elb.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index b62531187e..9b300d368f 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -161,6 +161,8 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elb.exists myelb region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + wait = 60 + orig_wait = wait while True: try: @@ -204,9 +206,13 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None): return ret except boto.exception.BotoServerError as error: if getattr(error, 'error_code', '') == 'Throttling': - log.info('Throttled by AWS API, will retry in 5 seconds.') - time.sleep(5) - continue + if wait > 0: + sleep = wait if wait % 5 == wait else 5 + log.info('Throttled by AWS API, will retry in 5 seconds.') + time.sleep(sleep) + wait -= sleep + continue + log.error('API still throttling us after {0} seconds!'.format(orig_wait)) log.error(error) return {} From d5abd98406b05c5e64e970cb9ea3233f3f2e95a0 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 18 Aug 2017 09:03:20 -0700 Subject: [PATCH 193/639] tweaking to be able to use beacon module & state module to manage custom beacons --- salt/beacons/__init__.py | 23 +++++++++++++++++++++++ salt/minion.py | 2 ++ salt/modules/beacons.py | 6 ++++++ 3 files changed, 31 insertions(+) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index 0b27bfbe67..c761a28a79 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -214,6 +214,29 @@ class Beacon(object): return True + def validate_beacon(self, name, beacon_data): + ''' + Return available beacon functions + ''' + validate_str = '{}.validate' + # Run the validate function if it's available, + # otherwise there is a warning about it being missing + if validate_str in self.beacons: + valid, vcomment = self.beacons[validate_str](b_config[mod]) + + if not valid: + log.info('Beacon %s configuration invalid, ' + 'not running.\n%s', mod, vcomment) + continue + + # Fire the complete event back along with the list of beacons + evt = salt.utils.event.get_event('minion', opts=self.opts) + log.debug('=== self.beacons {} ==='.format(list(self.beacons))) + evt.fire_event({'complete': True, 'beacons': list(self.beacons)}, + tag='/salt/minion/minion_available_beacons') + + return True + def add_beacon(self, name, beacon_data): ''' Add a beacon item diff --git a/salt/minion.py b/salt/minion.py index 5713a0edb6..3faeee35e6 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1930,6 +1930,8 @@ class Minion(MinionBase): self.beacons.disable_beacon(name) elif func == u'list': self.beacons.list_beacons() + elif func == u'validate_beacon': + self.beacons.validate_beacon(name, beacon_data) def environ_setenv(self, tag, data): ''' diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 0b6cd342f4..3bb42b7684 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -97,6 +97,12 @@ def add(name, beacon_data, **kwargs): else: # Attempt to load the beacon module so we have access to the validate function try: + eventer = salt.utils.event.get_event('minion', opts=__opts__) + res = __salt__['event.fire']({'name': name, 'func': 'available_beacons'}, 'manage_beacons') + if res: + event_ret = eventer.get_event(tag='/salt/minion/minion_available_beacons', wait=30) + log.debug('=== event_ret {} ==='.format(event_ret)) + beacon_module = __import__('salt.beacons.' + name, fromlist=['validate']) log.debug('Successfully imported beacon.') except ImportError: From 803247f84228d5da9a318ffe9313094c4dc7cfec Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 08:30:09 -0700 Subject: [PATCH 194/639] Adding list_available to show loaded & available beacons on the minion. Swapping out the way we validate beacon configuration to be able to support adding & modifying custom beacons. --- salt/beacons/__init__.py | 36 +++++++--- salt/minion.py | 2 + salt/modules/beacons.py | 138 +++++++++++++++++++++++++-------------- 3 files changed, 117 insertions(+), 59 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index c761a28a79..f53488e1bf 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -214,26 +214,42 @@ class Beacon(object): return True + def list_available_beacons(self): + ''' + List the available beacons + ''' + _beacons = ['{0}'.format(_beacon.replace('.beacon', '')) + for _beacon in list(self.beacons) if '.beacon' in _beacon] + + # Fire the complete event back along with the list of beacons + evt = salt.utils.event.get_event('minion', opts=self.opts) + evt.fire_event({'complete': True, 'beacons': _beacons}, + tag='/salt/minion/minion_beacons_list_available_complete') + + return True + def validate_beacon(self, name, beacon_data): ''' Return available beacon functions ''' - validate_str = '{}.validate' + validate_str = '{}.validate'.format(name) # Run the validate function if it's available, # otherwise there is a warning about it being missing if validate_str in self.beacons: - valid, vcomment = self.beacons[validate_str](b_config[mod]) - - if not valid: - log.info('Beacon %s configuration invalid, ' - 'not running.\n%s', mod, vcomment) - continue + if 'enabled' in beacon_data: + del beacon_data['enabled'] + valid, vcomment = self.beacons[validate_str](beacon_data) + else: + log.info('Beacon {0} does not have a validate' + ' function, skipping validation.'.format(name)) + valid = True # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - log.debug('=== self.beacons {} ==='.format(list(self.beacons))) - evt.fire_event({'complete': True, 'beacons': list(self.beacons)}, - tag='/salt/minion/minion_available_beacons') + evt.fire_event({'complete': True, + 'vcomment': vcomment, + 'valid': valid}, + tag='/salt/minion/minion_beacon_validation_complete') return True diff --git a/salt/minion.py b/salt/minion.py index 3faeee35e6..e1c5f38464 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -1930,6 +1930,8 @@ class Minion(MinionBase): self.beacons.disable_beacon(name) elif func == u'list': self.beacons.list_beacons() + elif func == u'list_available': + self.beacons.list_available_beacons() elif func == u'validate_beacon': self.beacons.validate_beacon(name, beacon_data) diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 3bb42b7684..49e0e390f9 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -11,6 +11,7 @@ from __future__ import absolute_import import difflib import logging import os +import six import yaml # Import Salt libs @@ -69,6 +70,47 @@ def list_(return_yaml=True): return {'beacons': {}} +def list_available(return_yaml=True): + ''' + List the beacons currently available on the minion + + :param return_yaml: Whether to return YAML formatted output, default True + :return: List of currently configured Beacons. + + CLI Example: + + .. code-block:: bash + + salt '*' beacons.list_available + + ''' + beacons = None + + try: + eventer = salt.utils.event.get_event('minion', opts=__opts__) + res = __salt__['event.fire']({'func': 'list_available'}, 'manage_beacons') + if res: + event_ret = eventer.get_event(tag='/salt/minion/minion_beacons_list_available_complete', wait=30) + if event_ret and event_ret['complete']: + beacons = event_ret['beacons'] + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret = {} + ret['result'] = False + ret['comment'] = 'Event module not available. Beacon add failed.' + return ret + + if beacons: + if return_yaml: + tmp = {'beacons': beacons} + yaml_out = yaml.safe_dump(tmp, default_flow_style=False) + return yaml_out + else: + return beacons + else: + return {'beacons': {}} + + def add(name, beacon_data, **kwargs): ''' Add a beacon on the minion @@ -95,43 +137,34 @@ def add(name, beacon_data, **kwargs): ret['result'] = True ret['comment'] = 'Beacon: {0} would be added.'.format(name) else: - # Attempt to load the beacon module so we have access to the validate function try: + # Attempt to load the beacon module so we have access to the validate function eventer = salt.utils.event.get_event('minion', opts=__opts__) - res = __salt__['event.fire']({'name': name, 'func': 'available_beacons'}, 'manage_beacons') + res = __salt__['event.fire']({'name': name, + 'beacon_data': beacon_data, + 'func': 'validate_beacon'}, + 'manage_beacons') if res: - event_ret = eventer.get_event(tag='/salt/minion/minion_available_beacons', wait=30) - log.debug('=== event_ret {} ==='.format(event_ret)) + event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30) + valid = event_ret['valid'] + vcomment = event_ret['vcomment'] - beacon_module = __import__('salt.beacons.' + name, fromlist=['validate']) - log.debug('Successfully imported beacon.') - except ImportError: - ret['comment'] = 'Beacon {0} does not exist'.format(name) - return ret + if not valid: + ret['result'] = False + ret['comment'] = ('Beacon {0} configuration invalid, ' + 'not adding.\n{1}'.format(name, vcomment)) + return ret - # Attempt to validate - if hasattr(beacon_module, 'validate'): - _beacon_data = beacon_data - if 'enabled' in _beacon_data: - del _beacon_data['enabled'] - valid, vcomment = beacon_module.validate(_beacon_data) - else: - log.info('Beacon {0} does not have a validate' - ' function, skipping validation.'.format(name)) - valid = True - - if not valid: - ret['result'] = False - ret['comment'] = ('Beacon {0} configuration invalid, ' - 'not adding.\n{1}'.format(name, vcomment)) - return ret + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret['comment'] = 'Event module not available. Beacon add failed.' try: - eventer = salt.utils.event.get_event('minion', opts=__opts__) - res = __salt__['event.fire']({'name': name, 'beacon_data': beacon_data, 'func': 'add'}, 'manage_beacons') + res = __salt__['event.fire']({'name': name, + 'beacon_data': beacon_data, + 'func': 'add'}, 'manage_beacons') if res: event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_add_complete', wait=30) - log.debug('=== event_ret {} ==='.format(event_ret)) if event_ret and event_ret['complete']: beacons = event_ret['beacons'] if name in beacons and beacons[name] == beacon_data: @@ -171,29 +204,32 @@ def modify(name, beacon_data, **kwargs): ret['result'] = True ret['comment'] = 'Beacon: {0} would be added.'.format(name) else: - # Attempt to load the beacon module so we have access to the validate function try: - beacon_module = __import__('salt.beacons.' + name, fromlist=['validate']) - log.debug('Successfully imported beacon.') - except ImportError: - ret['comment'] = 'Beacon {0} does not exist'.format(name) - return ret + # Attempt to load the beacon module so we have access to the validate function + eventer = salt.utils.event.get_event('minion', opts=__opts__) + res = __salt__['event.fire']({'name': name, + 'beacon_data': beacon_data, + 'func': 'validate_beacon'}, + 'manage_beacons') + if res: + event_ret = eventer.get_event(tag='/salt/minion/minion_beacon_validation_complete', wait=30) + valid = event_ret['valid'] + vcomment = event_ret['vcomment'] - # Attempt to validate - if hasattr(beacon_module, 'validate'): - _beacon_data = beacon_data - if 'enabled' in _beacon_data: - del _beacon_data['enabled'] - valid, vcomment = beacon_module.validate(_beacon_data) - else: - log.info('Beacon {0} does not have a validate' - ' function, skipping validation.'.format(name)) - valid = True + if not valid: + ret['result'] = False + ret['comment'] = ('Beacon {0} configuration invalid, ' + 'not adding.\n{1}'.format(name, vcomment)) + return ret + + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret['comment'] = 'Event module not available. Beacon modify failed.' if not valid: ret['result'] = False ret['comment'] = ('Beacon {0} configuration invalid, ' - 'not adding.\n{1}'.format(name, vcomment)) + 'not modifying.\n{1}'.format(name, vcomment)) return ret _current = current_beacons[name] @@ -203,10 +239,14 @@ def modify(name, beacon_data, **kwargs): ret['comment'] = 'Job {0} in correct state'.format(name) return ret - _current_lines = ['{0}:{1}\n'.format(key, value) - for (key, value) in sorted(_current.items())] - _new_lines = ['{0}:{1}\n'.format(key, value) - for (key, value) in sorted(_new.items())] + _current_lines = [] + for _item in _current: + _current_lines.extend(['{0}:{1}\n'.format(key, value) + for (key, value) in six.iteritems(_current[0])]) + _new_lines = [] + for _item in _new: + _new_lines.extend(['{0}:{1}\n'.format(key, value) + for (key, value) in six.iteritems(_new[0])]) _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes'] = {} From ba7e92c113f27690abfbdae3ca84c800a45d2f99 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 09:07:15 -0700 Subject: [PATCH 195/639] Swapping out the first item list for the iterated item. --- salt/modules/beacons.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 49e0e390f9..5749baa3d0 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -242,11 +242,11 @@ def modify(name, beacon_data, **kwargs): _current_lines = [] for _item in _current: _current_lines.extend(['{0}:{1}\n'.format(key, value) - for (key, value) in six.iteritems(_current[0])]) + for (key, value) in six.iteritems(_item)]) _new_lines = [] for _item in _new: _new_lines.extend(['{0}:{1}\n'.format(key, value) - for (key, value) in six.iteritems(_new[0])]) + for (key, value) in six.iteritems(_item)]) _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes'] = {} From d0307ff07073bcfa7561342ee190688c6302efcf Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 12:14:14 -0700 Subject: [PATCH 196/639] Fixing lint issues --- salt/modules/beacons.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 5749baa3d0..7e095ed656 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -11,10 +11,10 @@ from __future__ import absolute_import import difflib import logging import os -import six import yaml # Import Salt libs +import salt.ext.six as six import salt.utils.event import salt.utils.files from salt.ext.six.moves import map From 41790d4f28bc9c0676bfea7021aacaeaa3838e17 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 16:15:41 -0700 Subject: [PATCH 197/639] Missing the test_add Beacon test. --- tests/unit/modules/test_beacons.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unit/modules/test_beacons.py b/tests/unit/modules/test_beacons.py index 708221d638..6706866bb9 100644 --- a/tests/unit/modules/test_beacons.py +++ b/tests/unit/modules/test_beacons.py @@ -59,6 +59,10 @@ class BeaconsTestCase(TestCase, LoaderModuleMockMixin): event_returns = [{'complete': True, 'tag': '/salt/minion/minion_beacons_list_complete', 'beacons': {}}, + {'complete': True, + 'valid': True, + 'vcomment': '', + 'tag': '/salt/minion/minion_beacons_list_complete'}, {'complete': True, 'tag': '/salt/minion/minion_beacon_add_complete', 'beacons': {'ps': [{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]}}] From ae4946c28fedf6222bd191306a39470de1cf7c7e Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 24 Aug 2017 10:23:50 -0700 Subject: [PATCH 198/639] Removing unnecessary call to list(), updating all logging statements for printf-style. --- salt/beacons/__init__.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index f53488e1bf..4d4e2cde69 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -59,7 +59,7 @@ class Beacon(object): if 'enabled' in current_beacon_config: if not current_beacon_config['enabled']: - log.trace('Beacon {0} disabled'.format(mod)) + log.trace('Beacon %s disabled', mod) continue else: # remove 'enabled' item before processing the beacon @@ -68,7 +68,7 @@ class Beacon(object): else: self._remove_list_item(config[mod], 'enabled') - log.trace('Beacon processing: {0}'.format(mod)) + log.trace('Beacon processing: %s', mod) fun_str = '{0}.beacon'.format(mod) validate_str = '{0}.validate'.format(mod) if fun_str in self.beacons: @@ -77,10 +77,10 @@ class Beacon(object): if interval: b_config = self._trim_config(b_config, mod, 'interval') if not self._process_interval(mod, interval): - log.trace('Skipping beacon {0}. Interval not reached.'.format(mod)) + log.trace('Skipping beacon %s. Interval not reached.', mod) continue if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'): - log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod)) + log.trace('Evaluting if beacon %s should be skipped due to a state run.', mod) b_config = self._trim_config(b_config, mod, 'disable_during_state_run') is_running = False running_jobs = salt.utils.minion.running(self.opts) @@ -90,10 +90,10 @@ class Beacon(object): if is_running: close_str = '{0}.close'.format(mod) if close_str in self.beacons: - log.info('Closing beacon {0}. State run in progress.'.format(mod)) + log.info('Closing beacon %s. State run in progress.', mod) self.beacons[close_str](b_config[mod]) else: - log.info('Skipping beacon {0}. State run in progress.'.format(mod)) + log.info('Skipping beacon %s. State run in progress.', mod) continue # Update __grains__ on the beacon self.beacons[fun_str].__globals__['__grains__'] = grains @@ -120,7 +120,7 @@ class Beacon(object): if runonce: self.disable_beacon(mod) else: - log.warning('Unable to process beacon {0}'.format(mod)) + log.warning('Unable to process beacon %s', mod) return ret def _trim_config(self, b_config, mod, key): @@ -149,19 +149,19 @@ class Beacon(object): Process beacons with intervals Return True if a beacon should be run on this loop ''' - log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod)) + log.trace('Processing interval %s for beacon mod %s', interval, mod) loop_interval = self.opts['loop_interval'] if mod in self.interval_map: log.trace('Processing interval in map') counter = self.interval_map[mod] - log.trace('Interval counter: {0}'.format(counter)) + log.trace('Interval counter: %s', counter) if counter * loop_interval >= interval: self.interval_map[mod] = 1 return True else: self.interval_map[mod] += 1 else: - log.trace('Interval process inserting mod: {0}'.format(mod)) + log.trace('Interval process inserting mod: %s', mod) self.interval_map[mod] = 1 return False @@ -219,7 +219,7 @@ class Beacon(object): List the available beacons ''' _beacons = ['{0}'.format(_beacon.replace('.beacon', '')) - for _beacon in list(self.beacons) if '.beacon' in _beacon] + for _beacon in self.beacons if '.beacon' in _beacon] # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) @@ -240,8 +240,8 @@ class Beacon(object): del beacon_data['enabled'] valid, vcomment = self.beacons[validate_str](beacon_data) else: - log.info('Beacon {0} does not have a validate' - ' function, skipping validation.'.format(name)) + log.info('Beacon %s does not have a validate' + ' function, skipping validation.', name) valid = True # Fire the complete event back along with the list of beacons @@ -263,9 +263,9 @@ class Beacon(object): if name in self.opts['beacons']: log.info('Updating settings for beacon ' - 'item: {0}'.format(name)) + 'item: %s', name) else: - log.info('Added new beacon item {0}'.format(name)) + log.info('Added new beacon item %s', name) self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons @@ -284,7 +284,7 @@ class Beacon(object): data[name] = beacon_data log.info('Updating settings for beacon ' - 'item: {0}'.format(name)) + 'item: %s', name) self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons @@ -300,7 +300,7 @@ class Beacon(object): ''' if name in self.opts['beacons']: - log.info('Deleting beacon item {0}'.format(name)) + log.info('Deleting beacon item %s', name) del self.opts['beacons'][name] # Fire the complete event back along with updated list of beacons From a0bb654e4695868ffd970717ad7a482aa0887ebb Mon Sep 17 00:00:00 2001 From: garethgreenaway Date: Thu, 24 Aug 2017 12:09:29 -0700 Subject: [PATCH 199/639] Fixing lint issues --- tests/unit/modules/test_dockermod.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 24a6d3a3df..6d078d23b7 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -705,9 +705,9 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): def _inspect_container_effect(id_): return { 'container1': {'Config': {'Image': 'realimage:latest'}, - 'HostConfig':{}}, + 'HostConfig': {}}, 'container2': {'Config': {'Image': 'image_id'}, - 'HostConfig':{}}, + 'HostConfig': {}}, }[id_] def _inspect_image_effect(id_): From 4f8e6c65e599d8bdb7cbfb1bf5990be6f9354ec3 Mon Sep 17 00:00:00 2001 From: Justin Bradfield Date: Thu, 24 Aug 2017 16:06:39 -0400 Subject: [PATCH 200/639] access safe_filename_leaf through utils.files, changed in #43172 --- salt/state.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/state.py b/salt/state.py index ae2ba0c6db..0d35b98276 100644 --- a/salt/state.py +++ b/salt/state.py @@ -37,6 +37,7 @@ import salt.utils.dictupdate import salt.utils.event import salt.utils.url import salt.utils.process +import salt.utils.files import salt.syspaths as syspaths from salt.utils import immutabletypes from salt.template import compile_template, compile_template_str @@ -150,7 +151,7 @@ def _clean_tag(tag): ''' Make tag name safe for filenames ''' - return salt.utils.safe_filename_leaf(tag) + return salt.utils.files.safe_filename_leaf(tag) def _l_tag(name, id_): From 3adf8ad04be2c3cddef1b7ad71c3d0327afaa63e Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 15:26:55 -0500 Subject: [PATCH 201/639] Fix missed deprecation This removes an argument that was slated for deprecation in the Hydrogen release. --- salt/modules/aptpkg.py | 33 +++++++-------------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index c8a26a8e06..b044c4ead2 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -135,19 +135,6 @@ def _reconstruct_ppa_name(owner_name, ppa_name): return 'ppa:{0}/{1}'.format(owner_name, ppa_name) -def _get_repo(**kwargs): - ''' - Check the kwargs for either 'fromrepo' or 'repo' and return the value. - 'fromrepo' takes precedence over 'repo'. - ''' - for key in ('fromrepo', 'repo'): - try: - return kwargs[key] - except KeyError: - pass - return '' - - def _check_apt(): ''' Abort if python-apt is not installed @@ -242,18 +229,11 @@ def latest_version(*names, **kwargs): ''' refresh = salt.utils.is_true(kwargs.pop('refresh', True)) show_installed = salt.utils.is_true(kwargs.pop('show_installed', False)) - if 'repo' in kwargs: - # Remember to kill _get_repo() too when removing this warning. - salt.utils.warn_until( - 'Hydrogen', - 'The \'repo\' argument to apt.latest_version is deprecated, and ' - 'will be removed in Salt {version}. Please use \'fromrepo\' ' - 'instead.' + raise SaltInvocationError( + 'The \'repo\' argument is invalid, use \'fromrepo\' instead' ) - fromrepo = _get_repo(**kwargs) - kwargs.pop('fromrepo', None) - kwargs.pop('repo', None) + fromrepo = kwargs.pop('fromrepo', None) cache_valid_time = kwargs.pop('cache_valid_time', 0) if len(names) == 0: @@ -1380,9 +1360,10 @@ def _get_upgradable(dist_upgrade=True, **kwargs): cmd.append('dist-upgrade') else: cmd.append('upgrade') - fromrepo = _get_repo(**kwargs) - if fromrepo: - cmd.extend(['-o', 'APT::Default-Release={0}'.format(fromrepo)]) + try: + cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])]) + except KeyError: + pass call = __salt__['cmd.run_all'](cmd, python_shell=False, From 62d10b1b3809e239145a5e7bbca48ca7b525afc6 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 24 Aug 2017 17:00:55 -0400 Subject: [PATCH 202/639] Deprecate Authorize class and any_auth function The Authorize class in salt.auth is not used in Salt code any where and is dead code. This puts a deprecation path on the class. The salt.utils.minions.CkMinions.any_auth functions is also marked for deprecation. This function is only called by the "rights_check" function in the Authorize class. Since the Authorize class will be removed in Salt Neon, then the any_auth function should be removed in Neon as well. --- salt/auth/__init__.py | 8 ++++++++ salt/utils/minions.py | 7 +++++++ 2 files changed, 15 insertions(+) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 0abd532388..96a8786daa 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -31,6 +31,7 @@ import salt.transport.client import salt.utils import salt.utils.files import salt.utils.minions +import salt.utils.versions import salt.payload log = logging.getLogger(__name__) @@ -410,6 +411,13 @@ class Authorize(object): The authorization engine used by EAUTH ''' def __init__(self, opts, load, loadauth=None): + salt.utils.versions.warn_until( + 'Neon', + 'The \'Authorize\' class has been deprecated. Please use the ' + '\'LoadAuth\', \'Reslover\', or \'AuthUser\' classes instead. ' + 'Support for the \'Authorze\' class will be removed in Salt ' + '{version}.' + ) self.opts = salt.config.master_config(opts['conf_file']) self.load = load self.ckminions = salt.utils.minions.CkMinions(opts) diff --git a/salt/utils/minions.py b/salt/utils/minions.py index b0889cb1e8..43f875dc24 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -723,6 +723,13 @@ class CkMinions(object): ''' Read in the form and determine which auth check routine to execute ''' + # This function is only called from salt.auth.Authorize(), which is also + # deprecated and will be removed in Neon. + salt.utils.versions.warn_until( + 'Neon', + 'The \'any_auth\' function has been deprecated. Support for this ' + 'function will be removed in Salt {version}.' + ) if form == 'publish': return self.auth_check( auth_list, From 2d08d993e49060d3d04d579e7acb32be14745628 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 24 Aug 2017 15:30:27 -0600 Subject: [PATCH 203/639] Add grains to detect default gateway --- salt/grains/core.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py index f635143fb6..ac70fb7206 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2423,4 +2423,33 @@ def get_master(): # master return {'master': __opts__.get('master', '')} + +def default_gateway(): + ''' + Populates grains which describe whether a server has a default gateway + configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps + for a `default` at the beginning of any line. + + If the `ip` command is unavailable, no grains will be populated. + + List of grains: + + ip4_gw: True # True/False if default ipv4 gateway + ip6_gw: True # True/False if default ipv6 gateway + ip_gw: True # True if either of the above is True, False otherwise + ''' + grains = {} + if not salt.utils.which('ip'): + return {} + grains['ip_gw'] = False + grains['ip4_gw'] = False + grains['ip6_gw'] = False + if __salt__['cmd.run']('ip -4 route show | grep "^default"', python_shell=True): + grains['ip_gw'] = True + grains['ip4_gw'] = True + if __salt__['cmd.run']('ip -6 route show | grep "^default"', python_shell=True): + grains['ip_gw'] = True + grains['ip6_gw'] = True + return grains + # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From 19e87799ef7adc9e52d7fdd1db48c3557fc1010d Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 24 Aug 2017 15:55:30 -0600 Subject: [PATCH 204/639] Add basepi to userBlacklist for mention bot --- .mention-bot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mention-bot b/.mention-bot index b6ed4f2b24..86890cfde0 100644 --- a/.mention-bot +++ b/.mention-bot @@ -7,6 +7,6 @@ } ], "skipTitle": "Merge forward", - "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"] + "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh", "basepi"] } From 2640833400835afa6b9598d818c8bdef7d043856 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 15:00:26 -0500 Subject: [PATCH 205/639] git.detached: Fix traceback when rev is a SHA and is not present locally This catches the (expected) CommandExecutionError when git.describe is run on a commit that doesn't exist. This also renames the remote_ref_type variable to remote_rev_type. This change was made for the 2017.7 branch, but this fix is being applied to 2016.11, so by making the rename here as well we will avoid a potential bug from creeping in on a merge-forward. --- salt/states/git.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/salt/states/git.py b/salt/states/git.py index 0654d46d19..428d256b41 100644 --- a/salt/states/git.py +++ b/salt/states/git.py @@ -2221,11 +2221,11 @@ def detached(name, return ret # Determine if supplied ref is a hash - remote_ref_type = 'ref' + remote_rev_type = 'ref' if len(ref) <= 40 \ and all(x in string.hexdigits for x in ref): ref = ref.lower() - remote_ref_type = 'hash' + remote_rev_type = 'hash' comments = [] hash_exists_locally = False @@ -2238,13 +2238,18 @@ def detached(name, local_commit_id = _get_local_rev_and_branch(target, user, password)[0] - if remote_ref_type is 'hash' \ - and __salt__['git.describe'](target, - ref, - user=user, - password=password): - # The ref is a hash and it exists locally so skip to checkout - hash_exists_locally = True + if remote_rev_type is 'hash': + try: + __salt__['git.describe'](target, + ref, + user=user, + password=password, + ignore_retcode=True) + except CommandExecutionError: + hash_exists_locally = False + else: + # The rev is a hash and it exists locally so skip to checkout + hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, @@ -2409,7 +2414,7 @@ def detached(name, #get refs and checkout checkout_commit_id = '' - if remote_ref_type is 'hash': + if remote_rev_type is 'hash': if __salt__['git.describe'](target, ref, user=user, password=password): checkout_commit_id = ref else: From 0186835ebfa8223ab2b2eef2f85f5266a9f4e268 Mon Sep 17 00:00:00 2001 From: garethgreenaway Date: Thu, 24 Aug 2017 12:09:29 -0700 Subject: [PATCH 206/639] Fix docstring in test --- tests/unit/modules/test_dockermod.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 6d078d23b7..a774e23d32 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -700,7 +700,8 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): def test_compare_container_image_id_resolution(self): ''' - Compare + Test comparing two containers when one's inspect output is an ID and + not formatted in image:tag notation. ''' def _inspect_container_effect(id_): return { From 081f42ad7148c5dcb441fea3d9970de18dfe0eb4 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 17:27:58 -0500 Subject: [PATCH 207/639] docker.compare_container: Perform boolean comparison when one side's value is null/None --- salt/modules/dockermod.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index a6cd82ce8e..aa02b6452c 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -899,7 +899,7 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict][item] val2 = result2[conf_dict].get(item) - if item in ('OomKillDisable',): + if item in ('OomKillDisable',) or (val1 is None or val2 is None): if bool(val1) != bool(val2): ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} else: @@ -917,7 +917,7 @@ def compare_container(first, second, ignore=None): continue val1 = result1[conf_dict].get(item) val2 = result2[conf_dict][item] - if item in ('OomKillDisable',): + if item in ('OomKillDisable',) or (val1 is None or val2 is None): if bool(val1) != bool(val2): ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2} else: From 7279f98e9276f77bef074062e8f718c8065093ba Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 24 Aug 2017 14:26:28 -0500 Subject: [PATCH 208/639] docker_image states: Handle Hub images prefixed with "docker.io/" On some platforms, for reason which I do not yet grok, images pulled from the Hub are prefixed with "docker.io/". This causes the docker_image states to fail unless the user manually adds "docker.io/" before the image name. This commit adds a new function called "docker.resolve_tag" which disambiguates this variance and allows images to be specified without the "docker.io/" prefix. Resolves #42935. --- salt/modules/dockermod.py | 38 +++++++++++++++ salt/states/docker_image.py | 67 ++++++++++++-------------- tests/unit/modules/test_dockermod.py | 25 +++++++++- tests/unit/states/test_docker_image.py | 39 ++++++--------- 4 files changed, 108 insertions(+), 61 deletions(-) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index 3a10b20068..fa29f623ba 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -232,6 +232,7 @@ except ImportError: # pylint: enable=import-error HAS_NSENTER = bool(salt.utils.which('nsenter')) +HUB_PREFIX = 'docker.io/' # Set up logging log = logging.getLogger(__name__) @@ -1486,6 +1487,43 @@ def list_tags(): return sorted(ret) +def resolve_tag(name, tags=None): + ''' + .. versionadded:: 2017.7.2,Oxygen + + Given an image tag, check the locally-pulled tags (using + :py:func:`docker.list_tags `) and return + the matching tag. This helps disambiguate differences on some platforms + where images from the Docker Hub are prefixed with ``docker.io/``. If an + image name with no tag is passed, a tag of ``latest`` is assumed. + + If the specified image is not pulled locally, this function will return + ``False``. + + tags + An optional Python list of tags to check against. If passed, then + :py:func:`docker.list_tags ` will not + be run to get a list of tags. This is useful when resolving a number of + tags at the same time. + + CLI Examples: + + .. code-block:: bash + + salt myminion docker.resolve_tag busybox + salt myminion docker.resolve_tag busybox:latest + ''' + tag_name = ':'.join(salt.utils.docker.get_repo_tag(name)) + if tags is None: + tags = list_tags() + if tag_name in tags: + return tag_name + full_name = HUB_PREFIX + tag_name + if not name.startswith(HUB_PREFIX) and full_name in tags: + return full_name + return False + + def logs(name): ''' Returns the logs for the container. Equivalent to running the ``docker diff --git a/salt/states/docker_image.py b/salt/states/docker_image.py index e3c1a37779..087ce8f693 100644 --- a/salt/states/docker_image.py +++ b/salt/states/docker_image.py @@ -135,13 +135,14 @@ def present(name, .. versionadded:: 2016.11.0 sls - Allow for building images with ``dockerng.sls_build`` by specify the - SLS files to build with. This can be a list or comma-seperated string. + Allow for building of image with :py:func:`docker.sls_build + ` by specifying the SLS files with + which to build. This can be a list or comma-seperated string. .. code-block:: yaml myuser/myimage:mytag: - dockerng.image_present: + docker_image.present: - sls: - webapp1 - webapp2 @@ -151,12 +152,14 @@ def present(name, .. versionadded: 2017.7.0 base - Base image with which to start ``dockerng.sls_build`` + Base image with which to start :py:func:`docker.sls_build + ` .. versionadded: 2017.7.0 saltenv - environment from which to pull sls files for ``dockerng.sls_build``. + Environment from which to pull SLS files for :py:func:`docker.sls_build + ` .. versionadded: 2017.7.0 ''' @@ -169,11 +172,14 @@ def present(name, ret['comment'] = 'Only one of \'build\' or \'load\' is permitted.' return ret - # Ensure that we have repo:tag notation image = ':'.join(salt.utils.docker.get_repo_tag(name)) - all_tags = __salt__['docker.list_tags']() + resolved_tag = __salt__['docker.resolve_tag'](image) - if image in all_tags: + if resolved_tag is False: + # Specified image is not present + image_info = None + else: + # Specified image is present if not force: ret['result'] = True ret['comment'] = 'Image \'{0}\' already present'.format(name) @@ -185,8 +191,6 @@ def present(name, ret['comment'] = \ 'Unable to get info for image \'{0}\': {1}'.format(name, exc) return ret - else: - image_info = None if build or sls: action = 'built' @@ -197,15 +201,15 @@ def present(name, if __opts__['test']: ret['result'] = None - if (image in all_tags and force) or image not in all_tags: + if (resolved_tag is not False and force) or resolved_tag is False: ret['comment'] = 'Image \'{0}\' will be {1}'.format(name, action) return ret if build: try: image_update = __salt__['docker.build'](path=build, - image=image, - dockerfile=dockerfile) + image=image, + dockerfile=dockerfile) except Exception as exc: ret['comment'] = ( 'Encountered error building {0} as {1}: {2}' @@ -219,10 +223,10 @@ def present(name, if isinstance(sls, list): sls = ','.join(sls) try: - image_update = __salt__['dockerng.sls_build'](name=image, - base=base, - mods=sls, - saltenv=saltenv) + image_update = __salt__['docker.sls_build'](name=image, + base=base, + mods=sls, + saltenv=saltenv) except Exception as exc: ret['comment'] = ( 'Encountered error using sls {0} for building {1}: {2}' @@ -252,10 +256,8 @@ def present(name, client_timeout=client_timeout ) except Exception as exc: - ret['comment'] = ( - 'Encountered error pulling {0}: {1}' - .format(image, exc) - ) + ret['comment'] = \ + 'Encountered error pulling {0}: {1}'.format(image, exc) return ret if (image_info is not None and image_info['Id'][:12] == image_update .get('Layers', {}) @@ -267,7 +269,7 @@ def present(name, # Only add to the changes dict if layers were pulled ret['changes'] = image_update - ret['result'] = image in __salt__['docker.list_tags']() + ret['result'] = bool(__salt__['docker.resolve_tag'](image)) if not ret['result']: # This shouldn't happen, failure to pull should be caught above @@ -345,23 +347,16 @@ def absent(name=None, images=None, force=False): ret['comment'] = 'One of \'name\' and \'images\' must be provided' return ret elif images is not None: - targets = [] - for target in images: - try: - targets.append(':'.join(salt.utils.docker.get_repo_tag(target))) - except TypeError: - # Don't stomp on images with unicode characters in Python 2, - # only force image to be a str if it wasn't already (which is - # very unlikely). - targets.append(':'.join(salt.utils.docker.get_repo_tag(str(target)))) + targets = images elif name: - try: - targets = [':'.join(salt.utils.docker.get_repo_tag(name))] - except TypeError: - targets = [':'.join(salt.utils.docker.get_repo_tag(str(name)))] + targets = [name] pre_tags = __salt__['docker.list_tags']() - to_delete = [x for x in targets if x in pre_tags] + to_delete = [] + for target in targets: + resolved_tag = __salt__['docker.resolve_tag'](target, tags=pre_tags) + if resolved_tag is not False: + to_delete.append(resolved_tag) log.debug('targets = {0}'.format(targets)) log.debug('to_delete = {0}'.format(to_delete)) diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index a774e23d32..3d0328c180 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -679,9 +679,9 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret) def test_images_with_empty_tags(self): - """ + ''' docker 1.12 reports also images without tags with `null`. - """ + ''' client = Mock() client.api_version = '1.24' client.images = Mock( @@ -724,3 +724,24 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): with patch.object(docker_mod, 'inspect_image', inspect_image_mock): ret = docker_mod.compare_container('container1', 'container2') self.assertEqual(ret, {}) + + def test_resolve_tag(self): + ''' + Test the resolve_tag function + ''' + with_prefix = 'docker.io/foo:latest' + no_prefix = 'bar:latest' + with patch.object(docker_mod, + 'list_tags', + MagicMock(return_value=[with_prefix])): + self.assertEqual(docker_mod.resolve_tag('foo'), with_prefix) + self.assertEqual(docker_mod.resolve_tag('foo:latest'), with_prefix) + self.assertEqual(docker_mod.resolve_tag(with_prefix), with_prefix) + self.assertEqual(docker_mod.resolve_tag('foo:bar'), False) + + with patch.object(docker_mod, + 'list_tags', + MagicMock(return_value=[no_prefix])): + self.assertEqual(docker_mod.resolve_tag('bar'), no_prefix) + self.assertEqual(docker_mod.resolve_tag(no_prefix), no_prefix) + self.assertEqual(docker_mod.resolve_tag('bar:baz'), False) diff --git a/tests/unit/states/test_docker_image.py b/tests/unit/states/test_docker_image.py index 4d94c2e239..868925ba3d 100644 --- a/tests/unit/states/test_docker_image.py +++ b/tests/unit/states/test_docker_image.py @@ -10,7 +10,7 @@ from __future__ import absolute_import from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase from tests.support.mock import ( - Mock, + MagicMock, NO_MOCK, NO_MOCK_REASON, patch @@ -50,21 +50,19 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin): if ``image:latest`` is already downloaded locally the state should not report changes. ''' - docker_inspect_image = Mock( - return_value={'Id': 'abcdefghijk'}) - docker_pull = Mock( + docker_inspect_image = MagicMock(return_value={'Id': 'abcdefghijkl'}) + docker_pull = MagicMock( return_value={'Layers': - {'Already_Pulled': ['abcdefghijk'], + {'Already_Pulled': ['abcdefghijkl'], 'Pulled': []}, 'Status': 'Image is up to date for image:latest', 'Time_Elapsed': 1.1}) - docker_list_tags = Mock( - return_value=['image:latest'] - ) + docker_list_tags = MagicMock(return_value=['image:latest']) + docker_resolve_tag = MagicMock(return_value='image:latest') __salt__ = {'docker.list_tags': docker_list_tags, 'docker.pull': docker_pull, 'docker.inspect_image': docker_inspect_image, - } + 'docker.resolve_tag': docker_resolve_tag} with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): ret = docker_state.present('image:latest', force=True) @@ -89,29 +87,24 @@ class DockerImageTestCase(TestCase, LoaderModuleMockMixin): if ``image:latest`` is not downloaded and force is true should pull a new image successfuly. ''' - docker_inspect_image = Mock( - side_effect=CommandExecutionError( - 'Error 404: No such image/container: image:latest')) - docker_pull = Mock( + docker_inspect_image = MagicMock(return_value={'Id': '1234567890ab'}) + docker_pull = MagicMock( return_value={'Layers': - {'Already_Pulled': ['abcdefghijk'], - 'Pulled': ['abcdefghijk']}, - 'Status': "Image 'image:latest' was pulled", - 'Time_Elapsed': 1.1}) - docker_list_tags = Mock( - side_effect=[[], ['image:latest']] - ) + {'Pulled': ['abcdefghijkl']}, + 'Status': "Image 'image:latest' was pulled", + 'Time_Elapsed': 1.1}) + docker_list_tags = MagicMock(side_effect=[[], ['image:latest']]) + docker_resolve_tag = MagicMock(return_value='image:latest') __salt__ = {'docker.list_tags': docker_list_tags, 'docker.pull': docker_pull, 'docker.inspect_image': docker_inspect_image, - } + 'docker.resolve_tag': docker_resolve_tag} with patch.dict(docker_state.__dict__, {'__salt__': __salt__}): ret = docker_state.present('image:latest', force=True) self.assertEqual(ret, {'changes': { - 'Layers': {'Already_Pulled': ['abcdefghijk'], - 'Pulled': ['abcdefghijk']}, + 'Layers': {'Pulled': ['abcdefghijkl']}, 'Status': "Image 'image:latest' was pulled", 'Time_Elapsed': 1.1}, 'result': True, From fbbb36fe047858c66251e2f08b597531c30c0544 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 28 Jul 2017 13:34:07 -0400 Subject: [PATCH 209/639] Added RemotePillarMixin containing remote pillar common functionality - retrieve the config data to be sent to external pillar functions --- salt/pillar/__init__.py | 50 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index f93f4eea98..168428af04 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -23,6 +23,7 @@ import salt.transport import salt.utils.url import salt.utils.cache import salt.utils.crypt +import salt.utils.dictupdate from salt.exceptions import SaltClientError from salt.template import compile_template from salt.utils.dictupdate import merge @@ -72,7 +73,54 @@ def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None 'local': AsyncPillar, }.get(file_client, AsyncPillar) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, - pillar_override=pillar_override, pillarenv=pillarenv) + pillar_override=pillar_override, pillarenv=pillarenv, + extra_minion_data=extra_minion_data) + + +class RemotePillarMixin(object): + ''' + Common remote pillar functionality + ''' + def get_ext_pillar_extra_minion_data(self, opts): + ''' + Returns the extra data from the minion's opts dict (the config file). + + This data will be passed to external pillar functions. + ''' + def get_subconfig(opts_key): + ''' + Returns a dict containing the opts key subtree, while maintaining + the opts structure + ''' + ret_dict = aux_dict = {} + config_val = opts + subkeys = opts_key.split(':') + # Build an empty dict with the opts path + for subkey in subkeys[:-1]: + aux_dict[subkey] = {} + aux_dict = aux_dict[subkey] + if not config_val.get(subkey): + # The subkey is not in the config + return {} + config_val = config_val[subkey] + if subkeys[-1] not in config_val: + return {} + aux_dict[subkeys[-1]] = config_val[subkeys[-1]] + return ret_dict + + extra_data = {} + if 'pass_to_ext_pillars' in opts: + if not isinstance(opts['pass_to_ext_pillars'], list): + log.exception('\'pass_to_ext_pillars\' config is malformed.') + raise SaltClientError('\'pass_to_ext_pillars\' config is ' + 'malformed.') + for key in opts['pass_to_ext_pillars']: + salt.utils.dictupdate.update(extra_data, + get_subconfig(key), + recursive_update=True, + merge_lists=True) + log.trace('ext_pillar_extra_data = {0}'.format(extra_data)) + return pillar_override class AsyncRemotePillar(object): From 3dbd5b5eaa4d225bb28ed659950fe477a2d58159 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 06:35:38 -0400 Subject: [PATCH 210/639] Inherited RemotePillarMixin and added sending the extra minion data when requesting the pillar --- salt/pillar/__init__.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 168428af04..13a4a5d97c 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -120,15 +120,15 @@ class RemotePillarMixin(object): recursive_update=True, merge_lists=True) log.trace('ext_pillar_extra_data = {0}'.format(extra_data)) - return pillar_override + return extra_data -class AsyncRemotePillar(object): +class AsyncRemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['environment'] = saltenv self.ext = ext @@ -141,6 +141,14 @@ class AsyncRemotePillar(object): if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') + self.extra_minion_data = extra_minion_data or {} + if not isinstance(self.extra_minion_data, dict): + self.extra_minion_data = {} + log.error('Extra minion data must be a dictionary') + salt.utils.dictupdate.update(self.extra_minion_data, + self.get_ext_pillar_extra_minion_data(opts), + recursive_update=True, + merge_lists=True) @tornado.gen.coroutine def compile_pillar(self): @@ -152,6 +160,7 @@ class AsyncRemotePillar(object): 'saltenv': self.opts['environment'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, + 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: @@ -174,12 +183,12 @@ class AsyncRemotePillar(object): raise tornado.gen.Return(ret_pillar) -class RemotePillar(object): +class RemotePillar(RemotePillarMixin): ''' Get the pillar from the master ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, extra_minion_data=None): self.opts = opts self.opts['environment'] = saltenv self.ext = ext @@ -192,6 +201,14 @@ class RemotePillar(object): if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') + self.extra_minion_data = extra_minion_data or {} + if not isinstance(self.extra_minion_data, dict): + self.extra_minion_data = {} + log.error('Extra minion data must be a dictionary') + salt.utils.dictupdate.update(self.extra_minion_data, + self.get_ext_pillar_extra_minion_data(opts), + recursive_update=True, + merge_lists=True) def compile_pillar(self): ''' @@ -202,6 +219,7 @@ class RemotePillar(object): 'saltenv': self.opts['environment'], 'pillarenv': self.opts['pillarenv'], 'pillar_override': self.pillar_override, + 'extra_minion_data': self.extra_minion_data, 'ver': '2', 'cmd': '_pillar'} if self.ext: From 2ea93f4a8192e3e99ad9d1a0e466bc7a5cfa7bd9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 06:38:58 -0400 Subject: [PATCH 211/639] Added the extra_minion_data param when instantiating the pillar object --- salt/master.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/master.py b/salt/master.py index f3f697bf83..08926c5147 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1311,7 +1311,8 @@ class AESFuncs(object): load.get(u'saltenv', load.get(u'env')), ext=load.get(u'ext'), pillar_override=load.get(u'pillar_override', {}), - pillarenv=load.get(u'pillarenv')) + pillarenv=load.get(u'pillarenv'), + extra_minion_data=load.get(u'extra_minion_data')) data = pillar.compile_pillar() self.fs_.update_opts() if self.opts.get(u'minion_data_cache', False): From bcd1c78fbb8349233fbad6953601a1529191261b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 06:44:18 -0400 Subject: [PATCH 212/639] Passed extra minion data (if with content) to external pillar functions --- salt/pillar/__init__.py | 46 ++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 13a4a5d97c..e5e5dbb192 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -37,7 +37,7 @@ log = logging.getLogger(__name__) def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' @@ -56,12 +56,14 @@ def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs, pillar_override=pillar_override, pillarenv=pillarenv) return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs, - pillar_override=pillar_override, pillarenv=pillarenv) + pillar_override=pillar_override, pillarenv=pillarenv, + extra_minion_data=extra_minion_data) # TODO: migrate everyone to this one! def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, + extra_minion_data=None): ''' Return the correct pillar driver based on the file_client option ''' @@ -253,7 +255,7 @@ class PillarCache(object): ''' # TODO ABC? def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, extra_minion_data=None): # Yes, we need all of these because we need to route to the Pillar object # if we have no cache. This is another refactor target. @@ -331,7 +333,7 @@ class Pillar(object): Read over the pillar top files and render the pillar data ''' def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, - pillar_override=None, pillarenv=None): + pillar_override=None, pillarenv=None, extra_minion_data=None): self.minion_id = minion_id self.ext = ext if pillarenv is None: @@ -377,6 +379,10 @@ class Pillar(object): if not isinstance(self.pillar_override, dict): self.pillar_override = {} log.error('Pillar data must be a dictionary') + self.extra_minion_data = extra_minion_data or {} + if not isinstance(self.extra_minion_data, dict): + self.extra_minion_data = {} + log.error('Extra minion data must be a dictionary') def __valid_on_demand_ext_pillar(self, opts): ''' @@ -836,15 +842,31 @@ class Pillar(object): ext = None if isinstance(val, dict): - ext = self.ext_pillars[key](self.minion_id, pillar, **val) + if self.extra_minion_data: + ext = self.ext_pillars[key](self.minion_id, pillar, + self.extra_minion_data, **val) + else: + ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): - ext = self.ext_pillars[key](self.minion_id, - pillar, - *val) + if self.extra_minion_data: + ext = self.ext_pillars[key]( + self.minion_id, pillar, *val, + extra_minion_data=self.extra_minion_data) + else: + ext = self.ext_pillars[key](self.minion_id, + pillar, + *val) else: - ext = self.ext_pillars[key](self.minion_id, - pillar, - val) + if self.extra_minion_data: + ext = self.ext_pillars[key]( + self.minion_id, + pillar, + val, + extra_minion_data=self.extra_minion_data) + else: + ext = self.ext_pillars[key](self.minion_id, + pillar, + val) return ext def ext_pillar(self, pillar, errors=None): From f7c5b2f046b6444453ee91148a078ed4cced6aef Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 08:24:06 -0400 Subject: [PATCH 213/639] Added argspec check to see if external pillar functions support the extra_minion_data parameter --- salt/pillar/__init__.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index e5e5dbb192..76ae1c41de 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -13,6 +13,7 @@ import logging import tornado.gen import sys import traceback +import inspect # Import salt libs import salt.loader @@ -840,15 +841,17 @@ class Pillar(object): Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None + args = inspect.getargspec(self.ext_pillars[key]).args if isinstance(val, dict): - if self.extra_minion_data: - ext = self.ext_pillars[key](self.minion_id, pillar, - self.extra_minion_data, **val) + if ('extra_minion_data' in args) and self.extra_minion_data: + ext = self.ext_pillars[key]( + self.minion_id, pillar, + extra_minion_data=self.extra_minion_data, **val) else: ext = self.ext_pillars[key](self.minion_id, pillar, **val) elif isinstance(val, list): - if self.extra_minion_data: + if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, *val, extra_minion_data=self.extra_minion_data) @@ -857,7 +860,7 @@ class Pillar(object): pillar, *val) else: - if self.extra_minion_data: + if ('extra_minion_data' in args) and self.extra_minion_data: ext = self.ext_pillars[key]( self.minion_id, pillar, From bba999066253cd7ddd7cfca4ee88e83cfb6a04db Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 31 Jul 2017 06:00:55 -0400 Subject: [PATCH 214/639] Added pass_to_ext_pillars to valid config options --- salt/config/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 593828af58..514e3be153 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -1079,6 +1079,11 @@ VALID_OPTS = { # (in other words, require that minions have 'minion_sign_messages' # turned on) 'require_minion_sign_messages': bool, + + # The list of config entries to be passed to external pillar function as + # part of the extra_minion_data param + # Subconfig entries can be specified by using the ':' notation (e.g. key:subkey) + 'pass_to_ext_pillars': (six.string_types, list), } # default configurations From 31d74f1a671120b1610ad54166bc123773a515d2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 31 Jul 2017 17:37:52 -0400 Subject: [PATCH 215/639] Add 'pass_to_ext_pillars' minion config option documentation --- doc/ref/configuration/minion.rst | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 039764c6b2..dabf6872cc 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2113,6 +2113,41 @@ It will be interpreted as megabytes. file_recv_max_size: 100 +.. conf_minion:: pass_to_ext_pillars + +``pass_to_ext_pillars`` +----------------------- + +Specify a list of configuration keys whose values are to be passed to +external pillar functions. + +Suboptions can be specified using the ':' notation (i.e. ``option:suboption``) + +The values are are merged and included in the ``extra_minion_data`` optional +parameter of the external pillar function. The ``extra_minion_data`` parameter +is passed only to the external pillars functions that have it explicitly +specified in their definition. + +If the config contains + +.. code-block:: yaml + + opt1: value1 + opt2: + subopt1: value2 + subopt2: value3 + + add_to_pillar: + - opt1 + - opt2: subopt1 + +the ``extra_minion_data`` parameter will be + +.. code-block:: python + + {'opt1': 'value1', + 'opt2': {'subopt1': 'value2'}} + Security Settings ================= From 5b383f00363edbebddb96a2e51365fb6d33df956 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 13:07:51 -0400 Subject: [PATCH 216/639] Added tests for RemotePillar adding extra minion data from config --- tests/unit/test_pillar.py | 121 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index 41a5f4efdc..cbc301e662 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- ''' :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` + :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` tests.unit.pillar_test @@ -19,6 +20,7 @@ from tests.support.paths import TMP # Import salt libs import salt.pillar import salt.utils.stringutils +import salt.exceptions @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -318,3 +320,122 @@ p2: }[sls] client.get_state.side_effect = get_state + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@patch('salt.transport.Channel.factory', MagicMock()) +class RemotePillarTestCase(TestCase): + ''' + Tests for instantiating a RemotePillar in salt.pillar + ''' + def setUp(self): + self.grains = {} + + def tearDown(self): + for attr in ('grains',): + try: + delattr(self, attr) + except AttributeError: + continue + + def test_get_opts_in_pillar_override_call(self): + mock_get_extra_minion_data = MagicMock(return_value={}) + with patch( + 'salt.pillar.RemotePillarMixin.get_ext_pillar_extra_minion_data', + mock_get_extra_minion_data): + + salt.pillar.RemotePillar({}, self.grains, 'mocked-minion', 'dev') + mock_get_extra_minion_data.assert_called_once_with( + {'environment': 'dev'}) + + def test_multiple_keys_in_opts_added_to_pillar(self): + opts = { + 'renderer': 'json', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add', 'path_to_add2'] + } + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked-minion', 'dev') + self.assertEqual(pillar.extra_minion_data, + {'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data2': ['fake_data3', + 'fake_data4']}}) + + def test_subkey_in_opts_added_to_pillar(self): + opts = { + 'renderer': 'json', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add2:fake_data5'] + } + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked-minion', 'dev') + self.assertEqual(pillar.extra_minion_data, + {'path_to_add2': {'fake_data5': 'fake_data6'}}) + + def test_non_existent_leaf_opt_in_add_to_pillar(self): + opts = { + 'renderer': 'json', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add2:fake_data_non_exist'] + } + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked-minion', 'dev') + self.assertEqual(pillar.pillar_override, {}) + + + def test_non_existent_intermediate_opt_in_add_to_pillar(self): + opts = { + 'renderer': 'json', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add_no_exist'] + } + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked-minion', 'dev') + self.assertEqual(pillar.pillar_override, {}) + + def test_malformed_add_to_pillar(self): + opts = { + 'renderer': 'json', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': MagicMock() + } + with self.assertRaises(salt.exceptions.SaltClientError) as excinfo: + salt.pillar.RemotePillar(opts, self.grains, 'mocked-minion', 'dev') + self.assertEqual(excinfo.exception.strerror, + '\'pass_to_ext_pillars\' config is malformed.') + + def test_pillar_send_extra_minion_data_from_config(self): + opts = { + 'renderer': 'json', + 'pillarenv': 'fake_pillar_env', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add']} + mock_channel = MagicMock( + crypted_transfer_decode_dictentry=MagicMock(return_value={})) + with patch('salt.transport.Channel.factory', + MagicMock(return_value=mock_channel)): + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked_minion', 'fake_env') + + ret = pillar.compile_pillar() + self.assertEqual(pillar.channel, mock_channel) + mock_channel.crypted_transfer_decode_dictentry.assert_called_once_with( + {'cmd': '_pillar', 'ver': '2', + 'id': 'mocked_minion', + 'grains': {}, + 'saltenv': 'fake_env', + 'pillarenv': 'fake_pillar_env', + 'pillar_override': {}, + 'extra_minion_data': {'path_to_add': 'fake_data'}}, + dictkey='pillar') From 2090d9d32f69d5724838c448650b8f57a20a1be7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 13:08:15 -0400 Subject: [PATCH 217/639] Added tests for AsyncRemotePillar adding extra minion data from config --- tests/unit/test_pillar.py | 53 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index cbc301e662..d905137892 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -439,3 +439,56 @@ class RemotePillarTestCase(TestCase): 'pillar_override': {}, 'extra_minion_data': {'path_to_add': 'fake_data'}}, dictkey='pillar') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@patch('salt.transport.client.AsyncReqChannel.factory', MagicMock()) +class AsyncRemotePillarTestCase(TestCase): + ''' + Tests for instantiating a AsyncRemotePillar in salt.pillar + ''' + def setUp(self): + self.grains = {} + + def tearDown(self): + for attr in ('grains',): + try: + delattr(self, attr) + except AttributeError: + continue + + def test_get_opts_in_pillar_override_call(self): + mock_get_extra_minion_data = MagicMock(return_value={}) + with patch( + 'salt.pillar.RemotePillarMixin.get_ext_pillar_extra_minion_data', + mock_get_extra_minion_data): + + salt.pillar.RemotePillar({}, self.grains, 'mocked-minion', 'dev') + mock_get_extra_minion_data.assert_called_once_with( + {'environment': 'dev'}) + + def test_pillar_send_extra_minion_data_from_config(self): + opts = { + 'renderer': 'json', + 'pillarenv': 'fake_pillar_env', + 'path_to_add': 'fake_data', + 'path_to_add2': {'fake_data5': 'fake_data6', + 'fake_data2': ['fake_data3', 'fake_data4']}, + 'pass_to_ext_pillars': ['path_to_add']} + mock_channel = MagicMock( + crypted_transfer_decode_dictentry=MagicMock(return_value={})) + with patch('salt.transport.client.AsyncReqChannel.factory', + MagicMock(return_value=mock_channel)): + pillar = salt.pillar.RemotePillar(opts, self.grains, + 'mocked_minion', 'fake_env') + + ret = pillar.compile_pillar() + mock_channel.crypted_transfer_decode_dictentry.assert_called_once_with( + {'cmd': '_pillar', 'ver': '2', + 'id': 'mocked_minion', + 'grains': {}, + 'saltenv': 'fake_env', + 'pillarenv': 'fake_pillar_env', + 'pillar_override': {}, + 'extra_minion_data': {'path_to_add': 'fake_data'}}, + dictkey='pillar') From a771dd94f3c41dc02878e2e6aa45f14c73497e14 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 21 Aug 2017 12:50:07 -0400 Subject: [PATCH 218/639] Added tests for external pillar function calls --- tests/unit/test_pillar.py | 238 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 238 insertions(+) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index d905137892..4147f9bf76 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -58,6 +58,244 @@ class PillarTestCase(TestCase): self.assertEqual(pillar.opts['environment'], 'dev') self.assertEqual(pillar.opts['pillarenv'], 'dev') + def test_ext_pillar_no_extra_minion_data_val_dict(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', + arg='foo') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', + arg='foo') + + def test_ext_pillar_no_extra_minion_data_val_list(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', ['foo'], + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', + 'foo') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', ['foo'], + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', + 'foo') + + def test_ext_pillar_no_extra_minion_data_val_elem(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', 'fake_val', + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', 'fake_val') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', 'fake_val', + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with('mocked-minion', + 'fake_pillar', 'fake_val') + + def test_ext_pillar_with_extra_minion_data_val_dict(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', + extra_minion_data={'fake_key': 'foo'}) + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', arg='foo') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', arg='foo', + extra_minion_data={'fake_key': 'foo'}) + + def test_ext_pillar_with_extra_minion_data_val_list(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', + extra_minion_data={'fake_key': 'foo'}) + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', ['bar'], + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', 'bar') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', ['bar'], + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', 'bar', + extra_minion_data={'fake_key': 'foo'}) + + def test_ext_pillar_with_extra_minion_data_val_elem(self): + opts = { + 'renderer': 'json', + 'renderer_blacklist': [], + 'renderer_whitelist': [], + 'state_top': '', + 'pillar_roots': { + 'dev': [], + 'base': [] + }, + 'file_roots': { + 'dev': [], + 'base': [] + }, + 'extension_modules': '', + 'pillarenv_from_saltenv': True + } + mock_ext_pillar_func = MagicMock() + with patch('salt.loader.pillars', + MagicMock(return_value={'fake_ext_pillar': + mock_ext_pillar_func})): + pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', + extra_minion_data={'fake_key': 'foo'}) + # ext pillar function doesn't have the extra_minion_data arg + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=[]))): + pillar._external_pillar_data('fake_pillar', 'bar', + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', 'bar') + # ext pillar function has the extra_minion_data arg + mock_ext_pillar_func.reset_mock() + with patch('inspect.getargspec', + MagicMock(return_value=MagicMock(args=['extra_minion_data']))): + pillar._external_pillar_data('fake_pillar', 'bar', + 'fake_pillar_dirs', 'fake_ext_pillar') + mock_ext_pillar_func.assert_called_once_with( + 'mocked-minion', 'fake_pillar', 'bar', + extra_minion_data={'fake_key': 'foo'}) + def test_malformed_pillar_sls(self): with patch('salt.pillar.compile_template') as compile_template: opts = { From 27e5059f04c975cd74880f2c37f685ba76eb0524 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 25 Aug 2017 05:16:38 -0400 Subject: [PATCH 219/639] Small fixes to documentation --- doc/ref/configuration/minion.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index dabf6872cc..797aa214f7 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2123,9 +2123,9 @@ external pillar functions. Suboptions can be specified using the ':' notation (i.e. ``option:suboption``) -The values are are merged and included in the ``extra_minion_data`` optional +The values are merged and included in the ``extra_minion_data`` optional parameter of the external pillar function. The ``extra_minion_data`` parameter -is passed only to the external pillars functions that have it explicitly +is passed only to the external pillar functions that have it explicitly specified in their definition. If the config contains @@ -2137,7 +2137,7 @@ If the config contains subopt1: value2 subopt2: value3 - add_to_pillar: + pass_to_ext_pillars: - opt1 - opt2: subopt1 From dc082a2087b6183ec91115c8b6d9f0a10edb9c59 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 25 Aug 2017 06:36:25 -0400 Subject: [PATCH 220/639] pylint fix and fix when calling _external_pillar_data function --- tests/unit/test_pillar.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index 4147f9bf76..e48dce0923 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -84,7 +84,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', arg='foo') @@ -93,7 +93,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', arg='foo') @@ -124,7 +124,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', ['foo'], - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', 'foo') @@ -133,7 +133,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', ['foo'], - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', 'foo') @@ -164,7 +164,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', 'fake_val', - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', 'fake_val') # ext pillar function has the extra_minion_data arg @@ -172,7 +172,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', 'fake_val', - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with('mocked-minion', 'fake_pillar', 'fake_val') @@ -203,7 +203,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', arg='foo') # ext pillar function has the extra_minion_data arg @@ -211,7 +211,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', arg='foo', extra_minion_data={'fake_key': 'foo'}) @@ -243,7 +243,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', ['bar'], - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', 'bar') # ext pillar function has the extra_minion_data arg @@ -251,7 +251,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', ['bar'], - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', 'bar', extra_minion_data={'fake_key': 'foo'}) @@ -283,7 +283,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', 'bar', - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', 'bar') # ext pillar function has the extra_minion_data arg @@ -291,7 +291,7 @@ class PillarTestCase(TestCase): with patch('inspect.getargspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', 'bar', - 'fake_pillar_dirs', 'fake_ext_pillar') + 'fake_ext_pillar') mock_ext_pillar_func.assert_called_once_with( 'mocked-minion', 'fake_pillar', 'bar', extra_minion_data={'fake_key': 'foo'}) @@ -625,7 +625,6 @@ class RemotePillarTestCase(TestCase): 'mocked-minion', 'dev') self.assertEqual(pillar.pillar_override, {}) - def test_non_existent_intermediate_opt_in_add_to_pillar(self): opts = { 'renderer': 'json', From 41640479511006fd51fea46ebff35bc595a4a137 Mon Sep 17 00:00:00 2001 From: Viktor Krivak Date: Fri, 25 Aug 2017 15:08:57 +0200 Subject: [PATCH 221/639] Fix apache.config with multiple statement At this moment when you post more than one statement in config only last is used. Also file is rewrited multiple times until last statement is written. Example: salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '8080'}, {'Proxy': "Something"}]" Ends only with Proxy Something and ignore Listen 8080, This patch fix this issue. --- salt/modules/apache.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/salt/modules/apache.py b/salt/modules/apache.py index ad502df530..18bdab2726 100644 --- a/salt/modules/apache.py +++ b/salt/modules/apache.py @@ -446,11 +446,15 @@ def config(name, config, edit=True): salt '*' apache.config /etc/httpd/conf.d/ports.conf config="[{'Listen': '22'}]" ''' + configs = [] for entry in config: key = next(six.iterkeys(entry)) - configs = _parse_config(entry[key], key) - if edit: - with salt.utils.fopen(name, 'w') as configfile: - configfile.write('# This file is managed by Salt.\n') - configfile.write(configs) - return configs + configs.append(_parse_config(entry[key], key)) + + # Python auto-correct line endings + configstext = "\n".join(configs) + if edit: + with salt.utils.fopen(name, 'w') as configfile: + configfile.write('# This file is managed by Salt.\n') + configfile.write(configstext) + return configstext From f7c945f6e44c15c0a62572a73fb8eb101420527f Mon Sep 17 00:00:00 2001 From: Paul Miller Date: Sun, 20 Aug 2017 09:06:39 -0400 Subject: [PATCH 222/639] Prevent spurious "Template does not exist" error This was merged previously (though slightly differently) in #39516 Took me a second to track it down and then realized that I fixed this in 2016.x --- salt/pillar/__init__.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index a62e11dc77..8d5eb7e998 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -405,20 +405,19 @@ class Pillar(object): self.opts['pillarenv'], ', '.join(self.opts['file_roots']) ) else: - tops[self.opts['pillarenv']] = [ - compile_template( - self.client.cache_file( - self.opts['state_top'], - self.opts['pillarenv'] - ), - self.rend, - self.opts['renderer'], - self.opts['renderer_blacklist'], - self.opts['renderer_whitelist'], - self.opts['pillarenv'], - _pillar_rend=True, - ) - ] + top = self.client.cache_file(self.opts['state_top'], self.opts['pillarenv']) + if top: + tops[self.opts['pillarenv']] = [ + compile_template( + top, + self.rend, + self.opts['renderer'], + self.opts['renderer_blacklist'], + self.opts['renderer_whitelist'], + self.opts['pillarenv'], + _pillar_rend=True, + ) + ] else: for saltenv in self._get_envs(): if self.opts.get('pillar_source_merging_strategy', None) == "none": From ccd224177793dec66cb89a62e608f9c622a0ebf6 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 25 Aug 2017 10:23:35 -0600 Subject: [PATCH 223/639] Pin request install to version This fails on centos 6 because its node is too old to support the version of hawk bumped here https://github.com/request/request/pull/2751, we can still test the functionality. This will pull from github, and install a specific tag version, and we still do the uninstall using the github path. This should be more stable. --- tests/integration/states/npm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/states/npm.py b/tests/integration/states/npm.py index 140eaa92ca..adcd603cf8 100644 --- a/tests/integration/states/npm.py +++ b/tests/integration/states/npm.py @@ -40,7 +40,7 @@ class NpmStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): ''' Determine if URL-referenced NPM module can be successfully installed. ''' - ret = self.run_state('npm.installed', name='git://github.com/request/request') + ret = self.run_state('npm.installed', name='request/request#v2.81.1') self.assertSaltTrueReturn(ret) ret = self.run_state('npm.removed', name='git://github.com/request/request') self.assertSaltTrueReturn(ret) From f00d3a9ddc8433e9e92bda7215cc2036e2a9afab Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 25 Aug 2017 13:29:40 -0400 Subject: [PATCH 224/639] Add `disk.format` alias for `disk.format_` --- salt/modules/disk.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/modules/disk.py b/salt/modules/disk.py index 4b7106a4a7..2ff06278c1 100644 --- a/salt/modules/disk.py +++ b/salt/modules/disk.py @@ -22,6 +22,10 @@ import salt.utils.decorators as decorators from salt.utils.decorators import depends from salt.exceptions import CommandExecutionError +__func_alias__ = { + 'format_': 'format' +} + log = logging.getLogger(__name__) HAS_HDPARM = salt.utils.which('hdparm') is not None From 33a30bac06d967b91a1976f3c54ec17ab4b58008 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Fri, 25 Aug 2017 12:31:12 -0500 Subject: [PATCH 225/639] correcting bad format statement in search for policy to be disabled --- salt/modules/win_lgpo.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index efd76192bd..69986e77c0 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4181,8 +4181,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - #//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ] - #policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]') policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: @@ -4213,8 +4211,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, this_valuename = None if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': log.debug('time to disable {0}'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) - this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) if this_policy: this_policy = this_policy[0] if 'class' in this_policy.attrib: @@ -4325,7 +4322,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, log.error(msg.format(this_policy.attrib)) else: log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) log.debug('found this_policy == {0}'.format(this_policy)) if this_policy: From ef7e93eb3f4a7f4ae4c7ae182fd1a2a721640360 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 25 Aug 2017 12:52:26 -0700 Subject: [PATCH 226/639] Reverting this change due to it breaking other uses. --- salt/modules/augeas_cfg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/augeas_cfg.py b/salt/modules/augeas_cfg.py index 401a11eb1b..6b1f1e7b1b 100644 --- a/salt/modules/augeas_cfg.py +++ b/salt/modules/augeas_cfg.py @@ -199,7 +199,7 @@ def execute(context=None, lens=None, commands=(), load_path=None): method = METHOD_MAP[cmd] nargs = arg_map[method] - parts = salt.utils.shlex_split(arg, posix=False) + parts = salt.utils.shlex_split(arg) if len(parts) not in nargs: err = '{0} takes {1} args: {2}'.format(method, nargs, parts) From 453c3d7f20de31f1027f0ace6b6ff8a006e6a8c5 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 25 Aug 2017 19:34:29 -0400 Subject: [PATCH 227/639] Always notify tkwilliams when changes occur on boto files --- .mention-bot | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.mention-bot b/.mention-bot index 86890cfde0..c07f85b9fc 100644 --- a/.mention-bot +++ b/.mention-bot @@ -4,6 +4,11 @@ "name": "ryan-lane", "files": ["salt/**/*boto*.py"], "skipTeamPrs": false + }, + { + "name": "tkwilliams", + "files": ["salt/**/*boto*.py"], + "skipTeamPrs": false } ], "skipTitle": "Merge forward", From 1aa658745df7d3c3ae806b850ef605a032d583bf Mon Sep 17 00:00:00 2001 From: Raymond Lam Date: Sun, 27 Aug 2017 10:08:01 +0800 Subject: [PATCH 228/639] Fix the if ping() if condition and add more exception check --- salt/proxy/junos.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index 2f70750b0b..ba2842b06e 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -50,6 +50,7 @@ try: from jnpr.junos.exception import ConnectClosedError from jnpr.junos.exception import RpcError from jnpr.junos.exception import ConnectError + from ncclient.operations.errors import TimeoutExpiredError except ImportError: HAS_JUNOS = False @@ -160,14 +161,14 @@ def ping(): dev = conn() # call rpc only if ncclient queue is empty. If not empty that means other # rpc call is going on. - if hasattr(dev, '_session') and not dev._session._q.empty(): + if hasattr(dev._conn, '_session') and dev._conn._session._q.empty(): try: dev.rpc.file_list(path='/dev/null', dev_timeout=2) - except RpcTimeoutError: + except (RpcTimeoutError, ConnectClosedError): try: dev.close() - except (RpcError, ConnectError): - pass + except (RpcError, ConnectError, TimeoutExpiredError): + dev.connected = False return dev.connected From f20dda890ac8a460ca237b388b5763f4f20e8357 Mon Sep 17 00:00:00 2001 From: Eliezer Paiewonsky Date: Sun, 27 Aug 2017 17:33:40 -0400 Subject: [PATCH 229/639] Remove references to SaltConf16 and generalize related README --- README.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index d3917d6abe..64b6fe1c5b 100644 --- a/README.rst +++ b/README.rst @@ -67,10 +67,11 @@ Engage SaltStack `SaltConf`_, **User Groups and Meetups** - SaltStack has a vibrant and `global community`_ of customers, users, developers and enthusiasts. Connect with other -Salted folks in your area of the world, or join `SaltConf16`_, the SaltStack -annual user conference, April 19-21 in Salt Lake City. Please let us know if -you would like to start a user group or if we should add your existing -SaltStack user group to this list by emailing: info@saltstack.com +Salted folks in your area of the world, or join `SaltConf`_, the SaltStack +annual user conference held in Salt Lake City. Please visit the `SaltConf`_ site +for details of our next conference. Also, please let us know if you would like +to start a user group or if we should add your existing SaltStack user group to +this list by emailing: info@saltstack.com **SaltStack Training** - Get access to proprietary `SaltStack education offerings`_ through instructor-led training offered on-site, virtually or at @@ -89,9 +90,8 @@ services`_ offerings. * LinkedIn Group - ``_ * Google+ - ``_ -.. _SaltConf: http://www.youtube.com/user/saltstack .. _global community: http://www.meetup.com/pro/saltstack/ -.. _SaltConf16: http://saltconf.com/ +.. _SaltConf: http://saltconf.com/ .. _SaltStack education offerings: http://saltstack.com/training/ .. _SaltStack Certified Engineer (SSCE): http://saltstack.com/certification/ .. _SaltStack professional services: http://saltstack.com/services/ From 1cb4c3f821a74df945bff291de30fb073a789769 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Sun, 27 Aug 2017 20:50:48 -0700 Subject: [PATCH 230/639] Updating mount to allow opts in the state file to be removed and have the volume remounted --- salt/modules/mount.py | 56 +++++++++++++++++++++++++++++++++ salt/states/mount.py | 55 ++++++++++++++++++++++++++++++++ tests/unit/states/test_mount.py | 4 +++ 3 files changed, 115 insertions(+) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index ee263c4d77..7272f57917 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -14,6 +14,7 @@ import salt.utils # Can be removed once test_mode is moved import salt.utils.files import salt.utils.path import salt.utils.platform +import salt.utils.mount from salt.exceptions import CommandNotFoundError, CommandExecutionError # Import 3rd-party libs @@ -1262,3 +1263,58 @@ def is_mounted(name): return True else: return False + + +def read_mount_cache(name): + ''' + .. versionadded:: Oxygen + + Provide information if the path is mounted + + CLI Example: + + .. code-block:: bash + + salt '*' mount.read_mount_cache /mnt/share + ''' + cache = salt.utils.mount.read_cache(__opts__) + if cache: + if 'mounts' in cache and cache['mounts']: + if name in cache['mounts']: + return cache['mounts'][name] + return {} + + +def write_mount_cache(real_name, + device, + mkmnt, + fstype, + opts): + ''' + .. versionadded:: Oxygen + + Provide information if the path is mounted + + CLI Example: + + .. code-block:: bash + + salt '*' mount.write_mount_cache /mnt/share + ''' + cache = salt.utils.mount.read_cache(__opts__) + + if 'mounts' in cache: + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} + else: + cache['mounts'] = {} + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} + + log.debug('=== cache {} ==='.format(cache)) + cache = salt.utils.mount.write_cache(cache, __opts__) + return True diff --git a/salt/states/mount.py b/salt/states/mount.py index b44225b92a..0da0e53935 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -196,6 +196,8 @@ def mounted(name, 'result': True, 'comment': ''} + update_mount_cache = False + if device_name_regex is None: device_name_regex = [] @@ -438,6 +440,50 @@ def mounted(name, # don't write remount into fstab if 'remount' in opts: opts.remove('remount') + + # Update the cache + update_mount_cache = True + + mount_cache = __salt__['mount.read_mount_cache'](real_name) + if 'opts' in mount_cache: + _missing = [opt for opt in mount_cache['opts'] + if opt not in opts] + + if _missing: + if __opts__['test']: + ret['result'] = None + ret['comment'] = ('Remount would be forced because' + ' options ({0})' + 'changed'.format(','.join(_missing))) + return ret + else: + # Some file systems require umounting and mounting if options change + # add others to list that require similiar functionality + if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'): + ret['changes']['umount'] = "Forced unmount and mount because " \ + + "options ({0}) changed".format(opt) + unmount_result = __salt__['mount.umount'](real_name) + if unmount_result is True: + mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) + ret['result'] = mount_result + else: + ret['result'] = False + ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result) + return ret + else: + ret['changes']['umount'] = "Forced remount because " \ + + "options ({0}) changed".format(opt) + remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) + ret['result'] = remount_result + # Cleanup after the remount, so we + # don't write remount into fstab + if 'remount' in opts: + opts.remove('remount') + + update_mount_cache = True + else: + update_mount_cache = True + if real_device not in device_list: # name matches but device doesn't - need to umount _device_mismatch_is_ignored = None @@ -468,6 +514,7 @@ def mounted(name, ret['comment'] = "Unable to unmount" ret['result'] = None return ret + update_mount_cache = True else: ret['comment'] = 'Target was already mounted' # using a duplicate check so I can catch the results of a umount @@ -491,6 +538,7 @@ def mounted(name, out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user) active = __salt__['mount.active'](extended=True) + update_mount_cache = True if isinstance(out, string_types): # Failed to (re)mount, the state has failed! ret['comment'] = out @@ -590,6 +638,13 @@ def mounted(name, config, match_on=match_on) + if update_mount_cache: + cache_result = __salt__['mount.write_mount_cache'](real_name, + device, + mkmnt=mkmnt, + fstype=fstype, + opts=opts) + if out == 'present': ret['comment'] += '. Entry already exists in the fstab.' return ret diff --git a/tests/unit/states/test_mount.py b/tests/unit/states/test_mount.py index 65173ea698..1e1886001f 100644 --- a/tests/unit/states/test_mount.py +++ b/tests/unit/states/test_mount.py @@ -62,6 +62,8 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): mock_str = MagicMock(return_value='salt') mock_user = MagicMock(return_value={'uid': 510}) mock_group = MagicMock(return_value={'gid': 100}) + mock_read_cache = MagicMock(return_value={}) + mock_write_cache = MagicMock(return_value=True) umount1 = ("Forced unmount because devices don't match. " "Wanted: /dev/sdb6, current: /dev/sdb5, /dev/sdb5") with patch.dict(mount.__grains__, {'os': 'Darwin'}): @@ -163,6 +165,8 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(mount.__salt__, {'mount.active': mock_mnt, 'mount.mount': mock_str, 'mount.umount': mock_f, + 'mount.read_mount_cache': mock_read_cache, + 'mount.write_mount_cache': mock_write_cache, 'mount.set_fstab': mock, 'user.info': mock_user, 'group.info': mock_group}): From 1eba8c4b8e74e6abd5cce084b74aa8344efa3ce4 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Mon, 28 Aug 2017 10:11:36 +0300 Subject: [PATCH 231/639] Fix pylint errors Fix pylint errors --- salt/modules/win_iis.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index 38217b07a5..2e2e5113d2 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -836,12 +836,10 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, if _iisVersion() < 8: # IIS 7.5 and earlier have different syntax for associating a certificate with a site # Modify IP spec to IIS 7.5 format - iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") - + iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") # win 2008 uses the following format: ip!port and not ip!port! if iis7path.endswith("!"): - iis7path = iis7path[:-1] - + iis7path = iis7path[:-1] ps_cmd = ['New-Item', '-Path', "'{0}'".format(iis7path), From 25c8190e484cac4658c81d0b730509a5fb7ab815 Mon Sep 17 00:00:00 2001 From: Mapel88 Date: Mon, 28 Aug 2017 10:12:56 +0300 Subject: [PATCH 232/639] Fix pylint errors Fix pylint errors --- salt/states/win_iis.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/salt/states/win_iis.py b/salt/states/win_iis.py index 38d2ed9ae4..6407315c21 100644 --- a/salt/states/win_iis.py +++ b/salt/states/win_iis.py @@ -494,8 +494,8 @@ def container_setting(name, container, settings=None): processModel.maxProcesses: 1 processModel.userName: TestUser processModel.password: TestPassword - processModel.identityType: SpecificUser - + processModel.identityType: SpecificUser + Example of usage for the ``Sites`` container: .. code-block:: yaml @@ -509,9 +509,8 @@ def container_setting(name, container, settings=None): logFile.period: Daily limits.maxUrlSegments: 32 ''' - + identityType_map2string = {0: 'LocalSystem', 1: 'LocalService', 2: 'NetworkService', 3: 'SpecificUser', 4: 'ApplicationPoolIdentity'} - ret = {'name': name, 'changes': {}, 'comment': str(), From 3c6f4d4a1a2fe671cb7b0699f3e7ae7527f024d6 Mon Sep 17 00:00:00 2001 From: m03 Date: Mon, 28 Aug 2017 00:13:13 -0700 Subject: [PATCH 233/639] Add aptly execution module --- salt/modules/aptly.py | 515 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 515 insertions(+) create mode 100644 salt/modules/aptly.py diff --git a/salt/modules/aptly.py b/salt/modules/aptly.py new file mode 100644 index 0000000000..a8d9e921ef --- /dev/null +++ b/salt/modules/aptly.py @@ -0,0 +1,515 @@ +# -*- coding: utf-8 -*- +''' +Aptly Debian repository manager. + +.. versionadded:: Oxygen +''' +# Import python libs +from __future__ import absolute_import +import json +import logging +import os +import re + +# Import salt libs +from salt.exceptions import SaltInvocationError +import salt.utils.path + + +_DEFAULT_CONFIG_PATH = '/etc/aptly.conf' +_LOG = logging.getLogger(__name__) + +# Define the module's virtual name +__virtualname__ = 'aptly' + + +def __virtual__(): + ''' + Only works on systems with the aptly binary in the system path. + ''' + if salt.utils.path.which('aptly'): + return __virtualname__ + return (False, 'The aptly binaries required cannot be found or are not installed.') + + +def _cast_if_numeric(value): + ''' + Determine if the provided value is numeric. + + :return: The converted or passed value. + :rtype: float|int|str + ''' + try: + float_value = float(str(value)) + except ValueError: + if value: + return str(value) + return None + + if float_value.is_integer(): + return int(float_value) + return float_value + + +def _cmd_run(cmd): + ''' + Run the aptly command. + + :return: The string output of the command. + :rtype: str + ''' + cmd.insert(0, 'aptly') + cmd_ret = __salt__['cmd.run_all'](cmd, ignore_retcode=True) + + if cmd_ret['retcode'] != 0: + _LOG.debug('Unable to execute command: %s\nError: %s', cmd, + cmd_ret['stderr']) + + return cmd_ret['stdout'] + + +def _format_repo_args(comment=None, component=None, distribution=None, + uploaders_file=None, saltenv='base'): + ''' + Format the common arguments for creating or editing a repository. + + :param str comment: The description of the repository. + :param str component: The default component to use when publishing. + :param str distribution: The default distribution to use when publishing. + :param str uploaders_file: The repository upload restrictions config. + :param str saltenv: The environment the file resides in. + + :return: A list of the arguments formatted as aptly arguments. + :rtype: list + ''' + ret = list() + cached_uploaders_path = None + settings = {'comment': comment, 'component': component, + 'distribution': distribution} + + if uploaders_file: + cached_uploaders_path = __salt__['cp.cache_file'](uploaders_file, saltenv) + + if not cached_uploaders_path: + _LOG.error('Unable to get cached copy of file: %s', uploaders_file) + return False + + for setting in settings: + if settings[setting] is not None: + ret.append('-{}={}'.format(setting, settings[setting])) + + if cached_uploaders_path: + ret.append('-uploaders-file={}'.format(cached_uploaders_path)) + + return ret + + +def _validate_config(config_path): + ''' + Validate that the configuration file exists and is readable. + + :param str config_path: The path to the configuration file for the aptly instance. + + :return: None + :rtype: None + ''' + _LOG.debug('Checking configuration file: %s', config_path) + + if not os.path.isfile(config_path): + message = 'Unable to get configuration file: {}'.format(config_path) + _LOG.error(message) + raise SaltInvocationError(message) + + +def get_config(config_path=_DEFAULT_CONFIG_PATH): + ''' + Get the configuration data. + + :param str config_path: The path to the configuration file for the aptly instance. + + :return: A dictionary containing the configuration data. + :rtype: dict + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.get_config + ''' + _validate_config(config_path) + + cmd = ['config', 'show', '-config={}'.format(config_path)] + + cmd_ret = _cmd_run(cmd) + + return json.loads(cmd_ret) + + +def list_repos(config_path=_DEFAULT_CONFIG_PATH): + ''' + Get a list of all the repos. + + :param str config_path: The path to the configuration file for the aptly instance. + + :return: A list of the repository names. + :rtype: list + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.list_repos + ''' + _validate_config(config_path) + + cmd = ['repo', 'list', '-config={}'.format(config_path), '-raw=true'] + + cmd_ret = _cmd_run(cmd) + + ret = [line.strip() for line in cmd_ret.splitlines()] + + _LOG.debug('Found repositories: %s', len(ret)) + return ret + + +def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False): + ''' + Get the details of the repository. + + :param str name: The name of the repository. + :param str config_path: The path to the configuration file for the aptly instance. + :param bool with_packages: Return a list of packages in the repo. + + :return: A dictionary containing information about the repository. + :rtype: dict + ''' + _validate_config(config_path) + + ret = dict() + cmd = ['repo', 'show', '-config={}'.format(config_path), + '-with-packages={}'.format(str(with_packages).lower()), + name] + + cmd_ret = _cmd_run(cmd) + + for line in cmd_ret.splitlines(): + try: + # Extract the settings and their values, and attempt to format + # them to match their equivalent setting names. + items = line.split(':') + key = items[0].lower().replace('default', '').strip() + key = ' '.join(key.split()).replace(' ', '_') + ret[key] = _cast_if_numeric(items[1].strip()) + except (AttributeError, IndexError): + # If the line doesn't have the separator or is otherwise invalid, skip it. + _LOG.debug('Skipping line: %s', line) + + if ret: + _LOG.debug('Found repository: %s', name) + else: + _LOG.debug('Unable to find repository: %s', name) + return ret + + +def new_repo(name, config_path=_DEFAULT_CONFIG_PATH, comment=None, component=None, + distribution=None, uploaders_file=None, from_snapshot=None, + saltenv='base'): + ''' + Create the new repository. + + :param str name: The name of the repository. + :param str config_path: The path to the configuration file for the aptly instance. + :param str comment: The description of the repository. + :param str component: The default component to use when publishing. + :param str distribution: The default distribution to use when publishing. + :param str uploaders_file: The repository upload restrictions config. + :param str from_snapshot: The snapshot to initialize the repository contents from. + :param str saltenv: The environment the file resides in. + + :return: A boolean representing whether all changes succeeded. + :rtype: bool + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.new_repo name="test-repo" comment="Test main repo" component="main" distribution="trusty" + ''' + _validate_config(config_path) + + current_repo = __salt__['aptly.get_repo'](name=name) + + if current_repo: + _LOG.debug('Repository already exists: %s', name) + return True + + cmd = ['repo', 'create', '-config={}'.format(config_path)] + repo_params = _format_repo_args(comment=comment, component=component, + distribution=distribution, + uploaders_file=uploaders_file, saltenv=saltenv) + cmd.extend(repo_params) + cmd.append(name) + + if from_snapshot: + cmd.extend(['from', 'snapshot', from_snapshot]) + + _cmd_run(cmd) + repo = __salt__['aptly.get_repo'](name=name) + + if repo: + _LOG.debug('Created repo: %s', name) + return True + _LOG.error('Unable to create repo: %s', name) + return False + + +def set_repo(name, config_path=_DEFAULT_CONFIG_PATH, comment=None, component=None, + distribution=None, uploaders_file=None, saltenv='base'): + ''' + Configure the repository settings. + + :param str name: The name of the repository. + :param str config_path: The path to the configuration file for the aptly instance. + :param str comment: The description of the repository. + :param str component: The default component to use when publishing. + :param str distribution: The default distribution to use when publishing. + :param str uploaders_file: The repository upload restrictions config. + :param str from_snapshot: The snapshot to initialize the repository contents from. + :param str saltenv: The environment the file resides in. + + :return: A boolean representing whether all changes succeeded. + :rtype: bool + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.set_repo name="test-repo" comment="Test universe repo" component="universe" distribution="xenial" + ''' + _validate_config(config_path) + + failed_settings = dict() + + # Only check for settings that were passed in and skip the rest. + settings = {'comment': comment, 'component': component, + 'distribution': distribution} + + for setting in list(settings): + if settings[setting] is None: + settings.pop(setting, None) + + current_settings = __salt__['aptly.get_repo'](name=name) + + if not current_settings: + _LOG.error('Unable to get repo: %s', name) + return False + + # Discard any additional settings that get_repo gives + # us that are not present in the provided arguments. + for current_setting in list(current_settings): + if current_setting not in settings: + current_settings.pop(current_setting, None) + + # Check the existing repo settings to see if they already have the desired values. + if settings == current_settings: + _LOG.debug('Settings already have the desired values for repository: %s', name) + return True + + cmd = ['repo', 'edit', '-config={}'.format(config_path)] + + repo_params = _format_repo_args(comment=comment, component=component, + distribution=distribution, + uploaders_file=uploaders_file, saltenv=saltenv) + cmd.extend(repo_params) + cmd.append(name) + + _cmd_run(cmd) + new_settings = __salt__['aptly.get_repo'](name=name) + + # Check the new repo settings to see if they have the desired values. + for setting in settings: + if settings[setting] != new_settings[setting]: + failed_settings.update({setting: settings[setting]}) + + if failed_settings: + _LOG.error('Unable to change settings for the repository: %s', name) + return False + _LOG.debug('Settings successfully changed to the desired values for repository: %s', name) + return True + + +def delete_repo(name, config_path=_DEFAULT_CONFIG_PATH, force=False): + ''' + Remove the repository. + + :param str name: The name of the repository. + :param str config_path: The path to the configuration file for the aptly instance. + :param bool force: Whether to remove the repository even if it is used as the source + of an existing snapshot. + + :return: A boolean representing whether all changes succeeded. + :rtype: bool + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.delete_repo name="test-repo" + ''' + _validate_config(config_path) + + current_repo = __salt__['aptly.get_repo'](name=name) + + if not current_repo: + _LOG.debug('Repository already absent: %s', name) + return True + + cmd = ['repo', 'drop', '-config={}'.format(config_path), + '-force={}'.format(str(force).lower()), name] + + _cmd_run(cmd) + repo = __salt__['aptly.get_repo'](name=name) + + if repo: + _LOG.error('Unable to remove repo: %s', name) + return False + _LOG.debug('Removed repo: %s', name) + return True + + +def list_mirrors(config_path=_DEFAULT_CONFIG_PATH): + ''' + Get a list of all the mirrors. + + :param str config_path: The path to the configuration file for the aptly instance. + + :return: A list of the mirror names. + :rtype: list + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.list_mirrors + ''' + _validate_config(config_path) + + cmd = ['mirror', 'list', '-config={}'.format(config_path), '-raw=true'] + + cmd_ret = _cmd_run(cmd) + + ret = [line.strip() for line in cmd_ret.splitlines()] + + _LOG.debug('Found mirrors: %s', len(ret)) + return ret + + +def list_published(config_path=_DEFAULT_CONFIG_PATH): + ''' + Get a list of all the published repositories. + + :param str config_path: The path to the configuration file for the aptly instance. + + :return: A list of the published repository names. + :rtype: list + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.list_published + ''' + _validate_config(config_path) + + cmd = ['publish', 'list', '-config={}'.format(config_path), '-raw=true'] + + cmd_ret = _cmd_run(cmd) + + ret = [line.strip() for line in cmd_ret.splitlines()] + + _LOG.debug('Found published repositories: %s', len(ret)) + return ret + + +def list_snapshots(config_path=_DEFAULT_CONFIG_PATH, sort_by_time=False): + ''' + Get a list of all the snapshots. + + :param str config_path: The path to the configuration file for the aptly instance. + :param bool sort_by_time: Whether to sort by creation time instead of by name. + + :return: A list of the snapshot names. + :rtype: list + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.list_snapshots + ''' + _validate_config(config_path) + + cmd = ['snapshot', 'list', '-config={}'.format(config_path), '-raw=true'] + + if sort_by_time: + cmd.append('-sort=time') + else: + cmd.append('-sort=name') + + cmd_ret = _cmd_run(cmd) + + ret = [line.strip() for line in cmd_ret.splitlines()] + + _LOG.debug('Found snapshots: %s', len(ret)) + return ret + + +def cleanup_db(config_path=_DEFAULT_CONFIG_PATH, dry_run=False): + ''' + Remove data regarding unreferenced packages and delete files in the package pool that + are no longer being used by packages. + + :param bool dry_run: Report potential changes without making any changes. + + :return: A dictionary of the package keys and files that were removed. + :rtype: dict + + CLI Example: + + .. code-block:: bash + + salt '*' aptly.cleanup_db + ''' + _validate_config(config_path) + + ret = {'deleted_keys': list(), + 'deleted_files': list()} + + cmd = ['db', 'cleanup', '-config={}'.format(config_path), + '-dry-run={}'.format(str(dry_run).lower()), + '-verbose=true'] + + cmd_ret = _cmd_run(cmd) + + type_pattern = r'^List\s+[\w\s]+(?P(file|key)s)[\w\s]+:$' + list_pattern = r'^\s+-\s+(?P.*)$' + current_block = None + + for line in cmd_ret.splitlines(): + if current_block: + match = re.search(list_pattern, line) + if match: + package_type = 'deleted_{}'.format(current_block) + ret[package_type].append(match.group('package')) + else: + current_block = None + # Intentionally not using an else here, in case of a situation where + # the next list header might be bordered by the previous list. + if not current_block: + match = re.search(type_pattern, line) + if match: + current_block = match.group('package_type') + + _LOG.debug('Package keys identified for deletion: %s', len(ret['deleted_keys'])) + _LOG.debug('Package files identified for deletion: %s', len(ret['deleted_files'])) + return ret From 2ae911fa5eb627dc909495c7a056d327a2dcd70a Mon Sep 17 00:00:00 2001 From: m03 Date: Mon, 28 Aug 2017 01:04:39 -0700 Subject: [PATCH 234/639] Improved list_repos functionality --- salt/modules/aptly.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/salt/modules/aptly.py b/salt/modules/aptly.py index a8d9e921ef..f2d7c3f9f0 100644 --- a/salt/modules/aptly.py +++ b/salt/modules/aptly.py @@ -145,14 +145,15 @@ def get_config(config_path=_DEFAULT_CONFIG_PATH): return json.loads(cmd_ret) -def list_repos(config_path=_DEFAULT_CONFIG_PATH): +def list_repos(config_path=_DEFAULT_CONFIG_PATH, with_packages=False): ''' - Get a list of all the repos. + List all of the repos. :param str config_path: The path to the configuration file for the aptly instance. + :param bool with_packages: Return a list of packages in the repo. - :return: A list of the repository names. - :rtype: list + :return: A dictionary of the repositories. + :rtype: dict CLI Example: @@ -162,13 +163,17 @@ def list_repos(config_path=_DEFAULT_CONFIG_PATH): ''' _validate_config(config_path) + ret = dict() cmd = ['repo', 'list', '-config={}'.format(config_path), '-raw=true'] cmd_ret = _cmd_run(cmd) + repos = [line.strip() for line in cmd_ret.splitlines()] - ret = [line.strip() for line in cmd_ret.splitlines()] + _LOG.debug('Found repositories: %s', len(repos)) - _LOG.debug('Found repositories: %s', len(ret)) + for name in repos: + ret[name] = get_repo(name=name, config_path=config_path, + with_packages=with_packages) return ret @@ -397,7 +402,6 @@ def list_mirrors(config_path=_DEFAULT_CONFIG_PATH): cmd = ['mirror', 'list', '-config={}'.format(config_path), '-raw=true'] cmd_ret = _cmd_run(cmd) - ret = [line.strip() for line in cmd_ret.splitlines()] _LOG.debug('Found mirrors: %s', len(ret)) @@ -424,7 +428,6 @@ def list_published(config_path=_DEFAULT_CONFIG_PATH): cmd = ['publish', 'list', '-config={}'.format(config_path), '-raw=true'] cmd_ret = _cmd_run(cmd) - ret = [line.strip() for line in cmd_ret.splitlines()] _LOG.debug('Found published repositories: %s', len(ret)) @@ -457,7 +460,6 @@ def list_snapshots(config_path=_DEFAULT_CONFIG_PATH, sort_by_time=False): cmd.append('-sort=name') cmd_ret = _cmd_run(cmd) - ret = [line.strip() for line in cmd_ret.splitlines()] _LOG.debug('Found snapshots: %s', len(ret)) From 7071e3425cca31048a7a45b945d8ddcb8e76a562 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 28 Aug 2017 13:55:22 +0200 Subject: [PATCH 235/639] Checking install_time for None value RedHat systems might in some cases return None as install_time, which would cause a ValueError. We are checking for None now. install_date and install_date_time are being set to None in that case. --- salt/utils/pkg/rpm.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py index 599b37ac84..41fa2ec92b 100644 --- a/salt/utils/pkg/rpm.py +++ b/salt/utils/pkg/rpm.py @@ -104,8 +104,12 @@ def parse_pkginfo(line, osarch=None): if epoch not in ('(none)', '0'): version = ':'.join((epoch, version)) - install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" - install_date_time_t = int(install_time) + if install_time: + install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" + install_date_time_t = int(install_time) + else: + install_date = None + install_date_time_t = None return pkginfo(name, version, arch, repoid, install_date, install_date_time_t) From bd76a870ce2e3844cac5dd115de5fbb13adadba8 Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Mon, 28 Aug 2017 09:34:51 +0200 Subject: [PATCH 236/639] Dunder vars are now defined via setup_loader_modules --- tests/unit/modules/test_kubernetes.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index 6efc4d790d..eda8a0a91b 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -7,6 +7,7 @@ from __future__ import absolute_import # Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import ( Mock, @@ -17,16 +18,20 @@ from tests.support.mock import ( from salt.modules import kubernetes -kubernetes.__salt__ = {} -kubernetes.__grains__ = {} -kubernetes.__context__ = {} - @skipIf(NO_MOCK, NO_MOCK_REASON) -class KubernetesTestCase(TestCase): +class KubernetesTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.kubernetes ''' + + def setup_loader_modules(self): + return { + kubernetes: { + '__salt__': {}, + } + } + def test_nodes(self): ''' Test node listing. @@ -82,7 +87,8 @@ class KubernetesTestCase(TestCase): {'items': [{'metadata': {'name': 'mock_pod_name'}}]}} ) self.assertEqual(kubernetes.pods(), ['mock_pod_name']) - self.assertTrue(kubernetes.kubernetes.client.CoreV1Api().list_namespaced_pod().to_dict.called) + self.assertTrue(kubernetes.kubernetes.client.CoreV1Api(). + list_namespaced_pod().to_dict.called) def test_delete_deployments(self): ''' @@ -97,7 +103,8 @@ class KubernetesTestCase(TestCase): ) self.assertEqual(kubernetes.delete_deployment("test"), {}) self.assertTrue( - kubernetes.kubernetes.client.ExtensionsV1beta1Api().delete_namespaced_deployment().to_dict.called) + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + delete_namespaced_deployment().to_dict.called) def test_create_deployments(self): ''' @@ -109,6 +116,8 @@ class KubernetesTestCase(TestCase): mock_kubernetes_lib.client.ExtensionsV1beta1Api.return_value = Mock( **{"create_namespaced_deployment.return_value.to_dict.return_value": {}} ) - self.assertEqual(kubernetes.create_deployment("test", "default", {}, {}, None, None, None), {}) + self.assertEqual(kubernetes.create_deployment("test", "default", {}, {}, + None, None, None), {}) self.assertTrue( - kubernetes.kubernetes.client.ExtensionsV1beta1Api().create_namespaced_deployment().to_dict.called) + kubernetes.kubernetes.client.ExtensionsV1beta1Api(). + create_namespaced_deployment().to_dict.called) From df18a898368d887469acd9032f8888dedd865780 Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Mon, 28 Aug 2017 09:21:52 -0400 Subject: [PATCH 237/639] Lint: Remove unused import --- tests/unit/states/test_docker_image.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/states/test_docker_image.py b/tests/unit/states/test_docker_image.py index 868925ba3d..e96dbc9f9d 100644 --- a/tests/unit/states/test_docker_image.py +++ b/tests/unit/states/test_docker_image.py @@ -17,7 +17,6 @@ from tests.support.mock import ( ) # Import Salt Libs -from salt.exceptions import CommandExecutionError import salt.modules.dockermod as docker_mod import salt.states.docker_image as docker_state From a8b786d0f203f25a2d9023b470d3d300a5313561 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 28 Aug 2017 09:37:06 -0400 Subject: [PATCH 238/639] Reduce the number of days an issue is stale by 25 --- .github/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 673d73dd82..0a5be0ea46 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 1100 is approximately 3 years -daysUntilStale: 1100 +# 1075 is approximately 2 years and 11 months +daysUntilStale: 1075 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 From 8f593b0b025d91e84d951701a7e3541ed0539234 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Mon, 28 Aug 2017 09:24:25 -0500 Subject: [PATCH 239/639] verify that files exist before trying to remove them, win_file.remove raises an exception if the file does not exist --- salt/modules/win_lgpo.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 69986e77c0..b0f9419fc8 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2832,7 +2832,8 @@ def _findOptionValueInSeceditFile(option): _reader = codecs.open(_tfile, 'r', encoding='utf-16') _secdata = _reader.readlines() _reader.close() - _ret = __salt__['file.remove'](_tfile) + if __salt__['file.file_exists'](_tfile): + _ret = __salt__['file.remove'](_tfile) for _line in _secdata: if _line.startswith(option): return True, _line.split('=')[1].strip() @@ -2853,16 +2854,20 @@ def _importSeceditConfig(infdata): _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-config-{0}.inf'.format(_d)) # make sure our temp files don't already exist - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) # add the inf data to the file, win_file sure could use the write() function _ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.append'](_tInfFile, infdata) # run secedit to make the change _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) # cleanup our temp files - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) return True except Exception as e: log.debug('error occurred while trying to import secedit data') From 375b1faa92aa8bd88f6af930fd27bc61e89b43fd Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 28 Aug 2017 10:37:05 -0400 Subject: [PATCH 240/639] Remove deprecation warnings for "env" Removal of these deprecation warnings has no behavioral effect, as support for "env" instead of "saltenv" in these cases has already been removed. Only the warning text will no longer display to users. --- doc/topics/releases/oxygen.rst | 6 +++- salt/fileclient.py | 7 +---- salt/fileserver/__init__.py | 57 +++++----------------------------- salt/fileserver/hgfs.py | 35 +++------------------ salt/fileserver/minionfs.py | 21 ++----------- salt/fileserver/roots.py | 35 +++------------------ salt/fileserver/s3fs.py | 35 +++------------------ salt/fileserver/svnfs.py | 21 ++----------- salt/modules/cmdmod.py | 14 ++------- salt/modules/debconfmod.py | 7 +---- salt/modules/pkg_resource.py | 7 +---- salt/modules/state.py | 42 ++++--------------------- salt/states/file.py | 28 +++-------------- salt/template.py | 7 +---- salt/utils/gitfs.py | 28 +++-------------- salt/utils/pydsl.py | 7 +---- salt/utils/url.py | 7 +---- tests/unit/test_pydsl.py | 7 +---- 18 files changed, 57 insertions(+), 314 deletions(-) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 32124157b0..17110a8e2d 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -112,7 +112,7 @@ populated as a list of domain roles. Beacon configuration changes ----------------------------------------- +---------------------------- In order to remain consistent and to align with other Salt components such as states, support for configuring beacons using dictionary based configuration has been deprecated @@ -729,3 +729,7 @@ Other Miscellaneous Deprecations The ``version.py`` file had the following changes: - The ``rc_info`` function was removed. Please use ``pre_info`` instead. + +Warnings for moving away from the ``env`` option were removed. ``saltenv`` should be +used instead. The removal of these warnings does not have a behavior change. Only +the warning text was removed. diff --git a/salt/fileclient.py b/salt/fileclient.py index 0c0050ebc0..eaaf7ddb91 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -744,12 +744,7 @@ class Client(object): Cache a file then process it as a template ''' if u'env' in kwargs: - salt.utils.versions.warn_until( - u'Oxygen', - u'Parameter \'env\' has been detected in the argument list. This ' - u'parameter is no longer used and has been replaced by \'saltenv\' ' - u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop(u'env') kwargs[u'saltenv'] = saltenv diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py index c3f046fc98..9ba52e6e15 100644 --- a/salt/fileserver/__init__.py +++ b/salt/fileserver/__init__.py @@ -553,12 +553,7 @@ class Fileserver(object): kwargs[args[0]] = args[1] if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') if 'saltenv' in kwargs: saltenv = kwargs.pop('saltenv') @@ -583,12 +578,7 @@ class Fileserver(object): 'dest': ''} if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'path' not in load or 'loc' not in load or 'saltenv' not in load: @@ -609,13 +599,7 @@ class Fileserver(object): Common code for hashing and stating files ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. ' - 'This parameter is no longer used and has been replaced by ' - '\'saltenv\' as of Salt 2016.11.0. This warning will be removed ' - 'in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'path' not in load or 'saltenv' not in load: @@ -656,12 +640,7 @@ class Fileserver(object): Deletes the file_lists cache files ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') saltenv = load.get('saltenv', []) @@ -738,12 +717,7 @@ class Fileserver(object): Return a list of files from the dominant environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = set() @@ -769,12 +743,7 @@ class Fileserver(object): List all emptydirs in the given environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = set() @@ -800,12 +769,7 @@ class Fileserver(object): List all directories in the given environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = set() @@ -831,12 +795,7 @@ class Fileserver(object): Return a list of symlinked files and dirs ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {} diff --git a/salt/fileserver/hgfs.py b/salt/fileserver/hgfs.py index bf7c82dcd8..e386a9e6e8 100644 --- a/salt/fileserver/hgfs.py +++ b/salt/fileserver/hgfs.py @@ -736,12 +736,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', @@ -770,12 +765,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): @@ -804,12 +794,7 @@ def _file_lists(load, form): Return a dict containing the file lists for files and dirs ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') @@ -852,12 +837,7 @@ def _get_file_list(load): Get a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): @@ -897,12 +877,7 @@ def _get_dir_list(load): Get a list of all directories on the master ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): diff --git a/salt/fileserver/minionfs.py b/salt/fileserver/minionfs.py index 52a8fd0616..292bf0f85e 100644 --- a/salt/fileserver/minionfs.py +++ b/salt/fileserver/minionfs.py @@ -165,12 +165,7 @@ def file_hash(load, fnd): ret = {} if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if load['saltenv'] not in envs(): @@ -235,12 +230,7 @@ def file_list(load): Return a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if load['saltenv'] not in envs(): @@ -319,12 +309,7 @@ def dir_list(load): - source-minion/absolute/path ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if load['saltenv'] not in envs(): diff --git a/salt/fileserver/roots.py b/salt/fileserver/roots.py index 9aefb46e25..c795163b9d 100644 --- a/salt/fileserver/roots.py +++ b/salt/fileserver/roots.py @@ -40,12 +40,7 @@ def find_file(path, saltenv='base', **kwargs): Search the environment for the relative path. ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') path = os.path.normpath(path) @@ -117,12 +112,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', @@ -218,12 +208,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'path' not in load or 'saltenv' not in load: @@ -298,12 +283,7 @@ def _file_lists(load, form): Return a dict containing the file lists for files, dirs, emtydirs and symlinks ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if load['saltenv'] not in __opts__['file_roots']: @@ -444,12 +424,7 @@ def symlink_list(load): Return a dict of all symlinks based on a given path on the Master ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {} diff --git a/salt/fileserver/s3fs.py b/salt/fileserver/s3fs.py index 7f262aa3bb..04f0b5e51c 100644 --- a/salt/fileserver/s3fs.py +++ b/salt/fileserver/s3fs.py @@ -126,12 +126,7 @@ def find_file(path, saltenv='base', **kwargs): is missing, or if the MD5 does not match. ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') fnd = {'bucket': None, @@ -168,12 +163,7 @@ def file_hash(load, fnd): Return an MD5 file hash ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {} @@ -201,12 +191,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', @@ -245,12 +230,7 @@ def file_list(load): Return a list of all files on the file server in a specified environment ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = [] @@ -286,12 +266,7 @@ def dir_list(load): Return a list of all directories on the master ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = [] diff --git a/salt/fileserver/svnfs.py b/salt/fileserver/svnfs.py index ac7681ec23..2ba3cc227e 100644 --- a/salt/fileserver/svnfs.py +++ b/salt/fileserver/svnfs.py @@ -631,12 +631,7 @@ def serve_file(load, fnd): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', @@ -665,12 +660,7 @@ def file_hash(load, fnd): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): @@ -723,12 +713,7 @@ def _file_lists(load, form): Return a dict containing the file lists for files, dirs, emptydirs and symlinks ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index d961b8490c..653e9a170b 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -2120,12 +2120,7 @@ def script(source, ) if '__env__' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'__env__\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('__env__') if salt.utils.platform.is_windows() and runas and cwd is None: @@ -2336,12 +2331,7 @@ def script_retcode(source, salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' ''' if '__env__' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'__env__\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('__env__') return script(source=source, diff --git a/salt/modules/debconfmod.py b/salt/modules/debconfmod.py index 2185b2af21..18e19d1cce 100644 --- a/salt/modules/debconfmod.py +++ b/salt/modules/debconfmod.py @@ -186,12 +186,7 @@ def set_file(path, saltenv='base', **kwargs): salt '*' debconf.set_file salt://pathto/pkg.selections ''' if '__env__' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'__env__\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('__env__') path = __salt__['cp.cache_file'](path, saltenv) diff --git a/salt/modules/pkg_resource.py b/salt/modules/pkg_resource.py index 1e156f7e42..c6a07495fb 100644 --- a/salt/modules/pkg_resource.py +++ b/salt/modules/pkg_resource.py @@ -106,12 +106,7 @@ def parse_targets(name=None, salt '*' pkg_resource.parse_targets ''' if '__env__' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'__env__\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('__env__') if __grains__['os'] == 'MacOS' and sources: diff --git a/salt/modules/state.py b/salt/modules/state.py index c585489419..5f2677b39f 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -397,12 +397,7 @@ def template(tem, queue=False, **kwargs): salt '*' state.template '' ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) @@ -839,12 +834,7 @@ def highstate(test=None, queue=False, **kwargs): opts['test'] = _get_test_value(test, **kwargs) if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') if 'saltenv' in kwargs: @@ -1006,12 +996,7 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs): ''' concurrent = kwargs.get('concurrent', False) if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') # Modification to __opts__ lost after this if-else @@ -1489,12 +1474,7 @@ def show_low_sls(mods, test=None, queue=False, **kwargs): salt '*' state.show_low_sls foo saltenv=dev ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) @@ -1580,12 +1560,7 @@ def show_sls(mods, test=None, queue=False, **kwargs): salt '*' state.show_sls core,edit.vim dev ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) @@ -1656,12 +1631,7 @@ def show_top(queue=False, **kwargs): salt '*' state.show_top ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) diff --git a/salt/states/file.py b/salt/states/file.py index 8aea2cfefa..7eb23deb87 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2075,12 +2075,7 @@ def managed(name, - win_inheritance: False ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') name = os.path.expanduser(name) @@ -3208,12 +3203,7 @@ def recurse(name, option is usually not needed except in special circumstances. ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') name = os.path.expanduser(sdecode(name)) @@ -5029,12 +5019,7 @@ def patch(name, hash_ = kwargs.pop('hash', None) if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') name = os.path.expanduser(name) @@ -5687,12 +5672,7 @@ def serialize(name, } ''' if 'env' in kwargs: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop('env') name = os.path.expanduser(name) diff --git a/salt/template.py b/salt/template.py index 7a3d00ebfc..c2a3de7582 100644 --- a/salt/template.py +++ b/salt/template.py @@ -51,12 +51,7 @@ def compile_template(template, log.debug(u'compile template: %s', template) if u'env' in kwargs: - salt.utils.versions.warn_until( - u'Oxygen', - u'Parameter \'env\' has been detected in the argument list. This ' - u'parameter is no longer used and has been replaced by \'saltenv\' ' - u'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kwargs.pop(u'env') if template != u':string:': diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 8c7750c596..e7c4caf94e 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -2636,12 +2636,7 @@ class GitFS(GitBase): Return a chunk from a file based on the data received ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', @@ -2676,12 +2671,7 @@ class GitFS(GitBase): Return a file hash, the hash type is set in the master config file ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): @@ -2710,12 +2700,7 @@ class GitFS(GitBase): Return a dict containing the file lists for files and dirs ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if not os.path.isdir(self.file_list_cachedir): @@ -2784,12 +2769,7 @@ class GitFS(GitBase): Return a dict of all symlinks based on a given path in the repo ''' if 'env' in load: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". load.pop('env') if not salt.utils.stringutils.is_hex(load['saltenv']) \ diff --git a/salt/utils/pydsl.py b/salt/utils/pydsl.py index 1587e61d93..50efce534a 100644 --- a/salt/utils/pydsl.py +++ b/salt/utils/pydsl.py @@ -140,12 +140,7 @@ class Sls(object): def include(self, *sls_names, **kws): if 'env' in kws: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kws.pop('env') saltenv = kws.get('saltenv', self.saltenv) diff --git a/salt/utils/url.py b/salt/utils/url.py index ccf112f5d6..ff02517f9d 100644 --- a/salt/utils/url.py +++ b/salt/utils/url.py @@ -27,12 +27,7 @@ def parse(url): resource = url.split('salt://', 1)[-1] if '?env=' in resource: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the salt:// URL. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". path, saltenv = resource.split('?env=', 1)[0], None elif '?saltenv=' in resource: path, saltenv = resource.split('?saltenv=', 1) diff --git a/tests/unit/test_pydsl.py b/tests/unit/test_pydsl.py index 20087ad17a..3500beb16b 100644 --- a/tests/unit/test_pydsl.py +++ b/tests/unit/test_pydsl.py @@ -91,12 +91,7 @@ class PyDSLRendererTestCase(CommonTestCaseBoilerplate): def render_sls(self, content, sls='', saltenv='base', **kws): if 'env' in kws: - salt.utils.versions.warn_until( - 'Oxygen', - 'Parameter \'env\' has been detected in the argument list. This ' - 'parameter is no longer used and has been replaced by \'saltenv\' ' - 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' - ) + # "env" is not supported; Use "saltenv". kws.pop('env') return self.HIGHSTATE.state.rend['pydsl']( From 324cfd8d1e03c23d5ee99b575f49d65b934857fd Mon Sep 17 00:00:00 2001 From: lomeroe Date: Mon, 28 Aug 2017 09:39:51 -0500 Subject: [PATCH 241/639] correcting bad format statement in search for policy to be disabled (fix for #43166) verify that file exists before attempting to remove (fix for commits from #39773) --- salt/modules/win_lgpo.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 5edb672c90..2b834227e9 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2835,7 +2835,8 @@ def _findOptionValueInSeceditFile(option): _reader = codecs.open(_tfile, 'r', encoding='utf-16') _secdata = _reader.readlines() _reader.close() - _ret = __salt__['file.remove'](_tfile) + if __salt__['file.file_exists'](_tfile): + _ret = __salt__['file.remove'](_tfile) for _line in _secdata: if _line.startswith(option): return True, _line.split('=')[1].strip() @@ -2856,16 +2857,20 @@ def _importSeceditConfig(infdata): _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-config-{0}.inf'.format(_d)) # make sure our temp files don't already exist - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) # add the inf data to the file, win_file sure could use the write() function _ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.append'](_tInfFile, infdata) # run secedit to make the change _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) # cleanup our temp files - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) return True except Exception as e: log.debug('error occurred while trying to import secedit data') @@ -4174,8 +4179,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - #//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ] - #policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]') policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: @@ -4206,8 +4209,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, this_valuename = None if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': log.debug('time to disable {0}'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) - this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) if this_policy: this_policy = this_policy[0] if 'class' in this_policy.attrib: @@ -4318,7 +4320,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, log.error(msg.format(this_policy.attrib)) else: log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) log.debug('found this_policy == {0}'.format(this_policy)) if this_policy: From b6c25978ff6d794c93e6c9f91c3a6aaaa962d628 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Mon, 28 Aug 2017 09:39:51 -0500 Subject: [PATCH 242/639] correcting bad format statement in search for policy to be disabled (fix for #43166) verify that file exists before attempting to remove (fix for commits from #39773) --- salt/modules/win_lgpo.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 3a715a19a6..cf2f5178ab 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -2834,7 +2834,8 @@ def _findOptionValueInSeceditFile(option): _reader = codecs.open(_tfile, 'r', encoding='utf-16') _secdata = _reader.readlines() _reader.close() - _ret = __salt__['file.remove'](_tfile) + if __salt__['file.file_exists'](_tfile): + _ret = __salt__['file.remove'](_tfile) for _line in _secdata: if _line.startswith(option): return True, _line.split('=')[1].strip() @@ -2855,16 +2856,20 @@ def _importSeceditConfig(infdata): _tInfFile = '{0}\\{1}'.format(__salt__['config.get']('cachedir'), 'salt-secedit-config-{0}.inf'.format(_d)) # make sure our temp files don't already exist - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) # add the inf data to the file, win_file sure could use the write() function _ret = __salt__['file.touch'](_tInfFile) _ret = __salt__['file.append'](_tInfFile, infdata) # run secedit to make the change _ret = __salt__['cmd.run']('secedit /configure /db {0} /cfg {1}'.format(_tSdbfile, _tInfFile)) # cleanup our temp files - _ret = __salt__['file.remove'](_tSdbfile) - _ret = __salt__['file.remove'](_tInfFile) + if __salt__['file.file_exists'](_tSdbfile): + _ret = __salt__['file.remove'](_tSdbfile) + if __salt__['file.file_exists'](_tInfFile): + _ret = __salt__['file.remove'](_tInfFile) return True except Exception as e: log.debug('error occurred while trying to import secedit data') @@ -4173,8 +4178,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, existing_data = '' base_policy_settings = {} policy_data = _policy_info() - #//{0}:policy[@displayName = "{1}" and (@class = "Both" or @class = "{2}") ] - #policySearchXpath = etree.XPath('//*[@ns1:id = $id or @ns1:name = $id]') policySearchXpath = '//ns1:*[@id = "{0}" or @name = "{0}"]' try: if admx_policy_definitions is None or adml_policy_resources is None: @@ -4205,8 +4208,7 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, this_valuename = None if str(base_policy_settings[adm_namespace][admPolicy]).lower() == 'disabled': log.debug('time to disable {0}'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) - this_policy = admx_policy_definitions.xpath(policySearchXpath.format('ns1', admPolicy), namespaces={'ns1': adm_namespace}) + this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) if this_policy: this_policy = this_policy[0] if 'class' in this_policy.attrib: @@ -4317,7 +4319,6 @@ def _writeAdminTemplateRegPolFile(admtemplate_data, log.error(msg.format(this_policy.attrib)) else: log.debug('time to enable and set the policy "{0}"'.format(admPolicy)) - #this_policy = policySearchXpath(admx_policy_definitions, id=admPolicy, namespaces={'ns1': adm_namespace}) this_policy = admx_policy_definitions.xpath(policySearchXpath.format(admPolicy), namespaces={'ns1': adm_namespace}) log.debug('found this_policy == {0}'.format(this_policy)) if this_policy: From e44e15540d66706d246c225c03e0ca2d99c96e79 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 28 Aug 2017 11:36:33 -0400 Subject: [PATCH 243/639] lint: remove unused import --- salt/utils/pydsl.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/utils/pydsl.py b/salt/utils/pydsl.py index 50efce534a..fb62598ba1 100644 --- a/salt/utils/pydsl.py +++ b/salt/utils/pydsl.py @@ -88,7 +88,6 @@ from __future__ import absolute_import from uuid import uuid4 as _uuid # Import salt libs -import salt.utils.versions from salt.utils.odict import OrderedDict from salt.state import HighState From 4516e13112fa5fb62b698d5d5ab341770ff51442 Mon Sep 17 00:00:00 2001 From: m03 Date: Mon, 28 Aug 2017 08:55:12 -0700 Subject: [PATCH 244/639] Leverage stringutils --- salt/modules/aptly.py | 23 ++--------------------- salt/utils/stringutils.py | 9 +++++++++ 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/salt/modules/aptly.py b/salt/modules/aptly.py index f2d7c3f9f0..ab6acdd029 100644 --- a/salt/modules/aptly.py +++ b/salt/modules/aptly.py @@ -14,7 +14,7 @@ import re # Import salt libs from salt.exceptions import SaltInvocationError import salt.utils.path - +import salt.utils.stringutils as stringutils _DEFAULT_CONFIG_PATH = '/etc/aptly.conf' _LOG = logging.getLogger(__name__) @@ -32,25 +32,6 @@ def __virtual__(): return (False, 'The aptly binaries required cannot be found or are not installed.') -def _cast_if_numeric(value): - ''' - Determine if the provided value is numeric. - - :return: The converted or passed value. - :rtype: float|int|str - ''' - try: - float_value = float(str(value)) - except ValueError: - if value: - return str(value) - return None - - if float_value.is_integer(): - return int(float_value) - return float_value - - def _cmd_run(cmd): ''' Run the aptly command. @@ -204,7 +185,7 @@ def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False): items = line.split(':') key = items[0].lower().replace('default', '').strip() key = ' '.join(key.split()).replace(' ', '_') - ret[key] = _cast_if_numeric(items[1].strip()) + ret[key] = stringutils.to_none(stringutils.to_num(items[1].strip())) except (AttributeError, IndexError): # If the line doesn't have the separator or is otherwise invalid, skip it. _LOG.debug('Skipping line: %s', line) diff --git a/salt/utils/stringutils.py b/salt/utils/stringutils.py index d3a5fb79c3..fc194930b9 100644 --- a/salt/utils/stringutils.py +++ b/salt/utils/stringutils.py @@ -89,6 +89,15 @@ def to_num(text): return text +def to_none(text): + ''' + Convert a string to None if the string is empty or contains only spaces. + ''' + if str(text).strip(): + return text + return None + + def is_quoted(value): ''' Return a single or double quote, if a string is wrapped in extra quotes. From 133824e56980a58ab829894363b97c43c9cba42d Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 28 Aug 2017 08:59:08 -0700 Subject: [PATCH 245/639] adding utils/mount.py --- salt/utils/mount.py | 73 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 salt/utils/mount.py diff --git a/salt/utils/mount.py b/salt/utils/mount.py new file mode 100644 index 0000000000..86e695bd3c --- /dev/null +++ b/salt/utils/mount.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +''' +Common functions for managing mounts +''' + +# Import python libs +from __future__ import absolute_import +import logging +import os +import yaml + +# Import Salt libs +import salt.utils # Can be removed once is_true is moved +import salt.utils.files +import salt.utils.versions + +from salt.utils.yamldumper import SafeOrderedDumper + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Confine this module to Debian based distros + ''' + return True + + +def _read_file(path): + ''' + Reads and returns the contents of a text file + ''' + try: + with salt.utils.files.fopen(path, 'rb') as contents: + return yaml.safe_load(contents.read()) + except (OSError, IOError): + return {} + + +def get_cache(opts): + ''' + Return the mount cache file location. + ''' + return os.path.join(opts['cachedir'], 'mounts') + + +def read_cache(opts): + ''' + Write the mount cache file. + ''' + cache_file = get_cache(opts) + return _read_file(cache_file) + + +def write_cache(cache, opts): + ''' + Write the mount cache file. + ''' + cache_file = get_cache(opts) + + try: + _cache = salt.utils.stringutils.to_bytes( + yaml.dump( + cache, + Dumper=SafeOrderedDumper + ) + ) + log.debug('=== cache {} ==='.format(_cache)) + with salt.utils.files.fopen(cache_file, 'wb+') as fp_: + fp_.write(_cache) + except (IOError, OSError): + log.error('Failed to cache mounts', + exc_info_on_loglevel=logging.DEBUG) From 56343ac1ca1822f70d711db9119959c26268da22 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Mon, 28 Aug 2017 10:45:53 -0600 Subject: [PATCH 246/639] Add first pass at ip parsing for default gateway This is pretty naive parsing -- route configuration can be extensive, can redirect to other tables, and is very hard to parse. We look for `default via ` and if we can't find it there we give up and just set the grains to True. --- salt/grains/core.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index ac70fb7206..a9f2f4e19d 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2428,15 +2428,17 @@ def default_gateway(): ''' Populates grains which describe whether a server has a default gateway configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps - for a `default` at the beginning of any line. + for a `default` at the beginning of any line. Assuming the standard + `default via ` format for default gateways, it will also parse out the + ip address of the default gateway, and put it in ip4_gw or ip6_gw. If the `ip` command is unavailable, no grains will be populated. List of grains: - ip4_gw: True # True/False if default ipv4 gateway - ip6_gw: True # True/False if default ipv6 gateway - ip_gw: True # True if either of the above is True, False otherwise + ip4_gw: True # ip/True/False if default ipv4 gateway + ip6_gw: True # ip/True/False if default ipv6 gateway + ip_gw: True # True if either of the above is True, False otherwise ''' grains = {} if not salt.utils.which('ip'): @@ -2447,9 +2449,19 @@ def default_gateway(): if __salt__['cmd.run']('ip -4 route show | grep "^default"', python_shell=True): grains['ip_gw'] = True grains['ip4_gw'] = True + try: + gateway_ip = __salt__['cmd.run']('ip -4 route show | grep "^default via"', python_shell=True).split(' ')[2].strip() + grains['ip4_gw'] = gateway_ip if gateway_ip else True + except Exception as exc: + pass if __salt__['cmd.run']('ip -6 route show | grep "^default"', python_shell=True): grains['ip_gw'] = True grains['ip6_gw'] = True + try: + gateway_ip = __salt__['cmd.run']('ip -6 route show | grep "^default via"', python_shell=True).split(' ')[2].strip() + grains['ip6_gw'] = gateway_ip if gateway_ip else True + except Exception as exc: + pass return grains # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From 7bb9851cd9280b40fe3c994be1d3d8d1ea1cca68 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 13:51:12 -0500 Subject: [PATCH 247/639] nfs_export: first draft of arguments --- salt/states/nfs_export.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 salt/states/nfs_export.py diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py new file mode 100644 index 0000000000..4ed3b5fd2c --- /dev/null +++ b/salt/states/nfs_export.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +''' +Management of NFS exports +=============================================== + +.. code-block:: yaml + +To ensure an NFS export exists: + + add_export: + nfs_export.present: + - name: '/srv/nfs' + - hosts: + - '10.0.2.0/24' + - options: + - 'rw' + +To have different options for different hosts on the same export, define a separate state. + +To ensure an NFS export is absent: + + delete_export: + nfs_export.absent: + - name: '/srv/nfs' + +''' + +#from __future__ import absolute_import From da76b9723a786b69e6e144ff0923b2034a826a3c Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 16:20:16 -0500 Subject: [PATCH 248/639] nfs_export: second draft of argument layout --- salt/states/nfs_export.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 4ed3b5fd2c..1d18767b80 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -3,22 +3,30 @@ Management of NFS exports =============================================== -.. code-block:: yaml - To ensure an NFS export exists: +.. code-block:: yaml + add_export: nfs_export.present: - name: '/srv/nfs' - - hosts: - - '10.0.2.0/24' - - options: - - 'rw' + - exports: + - hosts: + - '10.0.2.0/24' + - options: + - 'rw' -To have different options for different hosts on the same export, define a separate state. +This creates the following in /etc/exports: + +.. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) + +Any export of the given path will be modified to match the one specified. To ensure an NFS export is absent: +.. code-block:: yaml + delete_export: nfs_export.absent: - name: '/srv/nfs' From cc3ada64741f930ef7b1d1eb23d1c60f7004c7bc Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Mon, 12 Jun 2017 16:32:18 -0500 Subject: [PATCH 249/639] nfs_export: third draft, with simple & complex modes --- salt/states/nfs_export.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 1d18767b80..d5c49355e0 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -7,14 +7,32 @@ To ensure an NFS export exists: .. code-block:: yaml - add_export: + add_simple_export: + nfs_export.present: + - name: '/srv/nfs' + - hosts: '10.0.2.0/24' + - options: 'rw' + +For more complex exports with multiple groups of hosts: + +.. code-block:: yaml + + add_complex_export: nfs_export.present: - name: '/srv/nfs' - exports: + # First export, same as simple one above - hosts: - - '10.0.2.0/24' - - options: - - 'rw' + - '10.0.2.0/24' + options: + - 'rw' + # Second export + - hosts: + - '192.168.0.0/24' + - '172.19.0.0/16' + options: + - 'ro' + - 'subtree_check' This creates the following in /etc/exports: From 2c7bd81f66928c2d1a29481ca09053b25550c3f4 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 15:43:05 -0500 Subject: [PATCH 250/639] nfs_export: first draft of absent() --- salt/states/nfs_export.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index d5c49355e0..4656be1447 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -52,3 +52,22 @@ To ensure an NFS export is absent: ''' #from __future__ import absolute_import + +def absent(name, exports='/etc/exports'): + path = name + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': ''} + + old = __salt__['nfs3.list_exports'](exports) + if path in old: + __salt__['nfs3.del_export'](exports, path) + ret['comment'] = 'Export {0} removed'.format(path) + ret['changes'] = {'path': path} + ret['result'] = True + else: + ret['comment'] = 'Export {0} already absent'.format(path) + ret['result'] = True + + return ret From 4bfa492b67e5ad77ecf21809217e38e284540422 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:03:11 -0500 Subject: [PATCH 251/639] nfs_export.absent: return better Changes and support test=True --- salt/states/nfs_export.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 4656be1447..6bee5ff2ba 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -62,9 +62,14 @@ def absent(name, exports='/etc/exports'): old = __salt__['nfs3.list_exports'](exports) if path in old: + if __opts__['test']: + ret['comment'] = 'Export {0} would be removed'.format(path) + ret['result'] = None + return ret + __salt__['nfs3.del_export'](exports, path) ret['comment'] = 'Export {0} removed'.format(path) - ret['changes'] = {'path': path} + ret['changes'][path] = old[path] ret['result'] = True else: ret['comment'] = 'Export {0} already absent'.format(path) From 2ccb49b541caee6abfdc36dd4dae9ce9dab43c16 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:06:31 -0500 Subject: [PATCH 252/639] nfs_export.absent: add docstring --- salt/states/nfs_export.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 6bee5ff2ba..2f00a4b527 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- ''' -Management of NFS exports +Management of NFS exports =============================================== To ensure an NFS export exists: @@ -54,6 +54,13 @@ To ensure an NFS export is absent: #from __future__ import absolute_import def absent(name, exports='/etc/exports'): + ''' + Ensure that the named path is not exported + + name + The export path to remove + ''' + path = name ret = {'name': name, 'changes': {}, From b8f6d66f3c3356f5820c7693218f2e546564a0d9 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Tue, 13 Jun 2017 16:45:57 -0500 Subject: [PATCH 253/639] nfs_export: first draft of docs for present() --- salt/states/nfs_export.py | 71 +++++++++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 2f00a4b527..9ee3e234eb 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -11,25 +11,28 @@ To ensure an NFS export exists: nfs_export.present: - name: '/srv/nfs' - hosts: '10.0.2.0/24' - - options: 'rw' + - options: + - 'rw' -For more complex exports with multiple groups of hosts: +This creates the following in /etc/exports: + +.. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) + +For more complex exports with multiple groups of hosts, use 'clients': .. code-block:: yaml add_complex_export: nfs_export.present: - name: '/srv/nfs' - - exports: + - clients: # First export, same as simple one above - - hosts: - - '10.0.2.0/24' + - hosts: '10.0.2.0/24' options: - 'rw' # Second export - - hosts: - - '192.168.0.0/24' - - '172.19.0.0/16' + - hosts: '*.example.com' options: - 'ro' - 'subtree_check' @@ -37,7 +40,7 @@ For more complex exports with multiple groups of hosts: This creates the following in /etc/exports: .. code-block:: bash - /srv/nfs 10.0.2.0/24(rw) + /srv/nfs 10.0.2.0/24(rw) 192.168.0.0/24,172.19.0.0/16(ro,subtree_check) Any export of the given path will be modified to match the one specified. @@ -51,7 +54,55 @@ To ensure an NFS export is absent: ''' -#from __future__ import absolute_import +def present(name, clients=None, hosts=None, options=None, exports='/etc/exports'): + ''' + Ensure that the named export is present with the given options + + name + The export path to configure + + clients + A list of hosts and the options applied to them. + This option may not be used in combination with + the 'hosts' or 'options' shortcuts. + + ... code-block:: yaml + + - clients: + # First export + - hosts: '10.0.2.0/24' + options: + - 'rw' + # Second export + - hosts: '*.example.com' + options: + - 'ro' + - 'subtree_check' + + hosts + A string matching a number of hosts, for example: + + ... code-block:: yaml + + hosts: '10.0.2.123' + + hosts: '10.0.2.0/24' + + hosts: 'minion1.example.com' + + hosts: '*.example.com' + + options + A list of NFS options, for example: + + ... code-block:: yaml + + options: + - 'rw' + - 'subtree_check' + + ''' + def absent(name, exports='/etc/exports'): ''' From 0844f8b47bb1853c38544e145d5f1568ffcb0d63 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 14:52:09 -0500 Subject: [PATCH 254/639] nfs_export: fix docstring syntax and add to index --- doc/ref/states/all/index.rst | 1 + doc/ref/states/all/salt.states.nfs_export.rst | 6 ++++++ salt/states/nfs_export.py | 10 +++++++--- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 doc/ref/states/all/salt.states.nfs_export.rst diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 4803648006..c793d475bf 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -179,6 +179,7 @@ state modules netusers network netyang + nfs_export nftables npm ntp diff --git a/doc/ref/states/all/salt.states.nfs_export.rst b/doc/ref/states/all/salt.states.nfs_export.rst new file mode 100644 index 0000000000..231992626b --- /dev/null +++ b/doc/ref/states/all/salt.states.nfs_export.rst @@ -0,0 +1,6 @@ +====================== +salt.states.nfs_export +====================== + +.. automodule:: salt.states.nfs_export + :members: \ No newline at end of file diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 9ee3e234eb..a1fc5116b2 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -17,6 +17,7 @@ To ensure an NFS export exists: This creates the following in /etc/exports: .. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) For more complex exports with multiple groups of hosts, use 'clients': @@ -40,6 +41,7 @@ For more complex exports with multiple groups of hosts, use 'clients': This creates the following in /etc/exports: .. code-block:: bash + /srv/nfs 10.0.2.0/24(rw) 192.168.0.0/24,172.19.0.0/16(ro,subtree_check) Any export of the given path will be modified to match the one specified. @@ -66,7 +68,7 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' This option may not be used in combination with the 'hosts' or 'options' shortcuts. - ... code-block:: yaml + .. code-block:: yaml - clients: # First export @@ -82,7 +84,7 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' hosts A string matching a number of hosts, for example: - ... code-block:: yaml + .. code-block:: yaml hosts: '10.0.2.123' @@ -92,10 +94,12 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' hosts: '*.example.com' + hosts: '*' + options A list of NFS options, for example: - ... code-block:: yaml + .. code-block:: yaml options: - 'rw' From 0b25b73c60d0c70f61764f555c3b75011885e065 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 14 Jun 2017 16:12:03 -0500 Subject: [PATCH 255/639] nfs_export: add present() --- salt/states/nfs_export.py | 43 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index a1fc5116b2..5252a04e6e 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -106,7 +106,50 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' - 'subtree_check' ''' + path = name + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': ''} + if not clients: + if not hosts: + ret['result'] = False + ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined' + return ret + # options being None is handled by add_export() + clients = [{'hosts': hosts, 'options': options}] + + old = __salt__['nfs3.list_exports'](exports) + if path in old: + if old[path] == clients: + ret['result'] = True + ret['comment'] = 'Export {0} already configured'.format(path) + return ret + + ret['changes']['new'] = clients + ret['changes']['old'] = old[path] + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Export {0} would be changed'.format(path) + return ret + + __salt__['nfs3.del_export'](exports, path) + + else: + ret['changes']['old'] = None + ret['changes']['new'] = clients + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Export {0} would be added'.format(path) + return ret + + for exp in clients: + __salt__['nfs3.add_export'](exports, path, exp['hosts'], exp['options']) + + ret['result'] = True + ret['changes']['new'] = clients + return ret def absent(name, exports='/etc/exports'): ''' From b1788b1e5f9ba7b9c0e188ef3fa369f757003a3b Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 28 Aug 2017 12:35:46 -0600 Subject: [PATCH 256/639] Bring changes from #43228 to 2017.7 --- salt/modules/win_pkg.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 43bd829831..03faa6ecfd 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -973,7 +973,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Version is ignored salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3 - If passed with a comma seperated list in the ``name`` parameter, the + If passed with a comma separated list in the ``name`` parameter, the version will apply to all packages in the list. CLI Example: @@ -1282,7 +1282,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) # Build cmd and arguments - # cmd and arguments must be seperated for use with the task scheduler + # cmd and arguments must be separated for use with the task scheduler if use_msiexec: cmd = msiexec arguments = ['/i', cached_pkg] @@ -1313,7 +1313,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if pkg_name in ['salt-minion', 'salt-minion-py3']: + if re.search(r'salt[\s-]*minion', + pkg_name, + flags=re.IGNORECASE + re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} if not __salt__['task.run'](name='update-salt-software'): log.error('Failed to install {0}'.format(pkg_name)) @@ -1345,7 +1347,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): else: # Combine cmd and arguments - cmd = [cmd].extend(arguments) + cmd = [cmd] + cmd.extend(arguments) # Launch the command result = __salt__['cmd.run_all'](cmd, From b1a3d15b28e7682f4839f971a4eba177d28686b1 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Mon, 28 Aug 2017 13:52:23 -0600 Subject: [PATCH 257/639] Remove trailing whitespace for linter --- salt/modules/win_iis.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/win_iis.py b/salt/modules/win_iis.py index 2e2e5113d2..bf52b4f0d5 100644 --- a/salt/modules/win_iis.py +++ b/salt/modules/win_iis.py @@ -836,10 +836,10 @@ def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, if _iisVersion() < 8: # IIS 7.5 and earlier have different syntax for associating a certificate with a site # Modify IP spec to IIS 7.5 format - iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") + iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!") # win 2008 uses the following format: ip!port and not ip!port! if iis7path.endswith("!"): - iis7path = iis7path[:-1] + iis7path = iis7path[:-1] ps_cmd = ['New-Item', '-Path', "'{0}'".format(iis7path), From 589a1e90f23839ba715ca6f8300df6d4fc442f01 Mon Sep 17 00:00:00 2001 From: mephi42 Date: Tue, 29 Aug 2017 00:15:18 +0200 Subject: [PATCH 258/639] salt.utils.openstack.nova: make identity service type configurable At my $DAYJOB, OpenStack catalog contains identity endpoints v2 and v3. In order to differentiate between them, admins came up with identityv3 service type. This patch allows me to write: provider-id: driver: nova identity_service_type: identityv3 to make use of identityv3 endpoint in this setup. --- salt/utils/openstack/nova.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/utils/openstack/nova.py b/salt/utils/openstack/nova.py index e1ea7b405d..eea3305cf7 100644 --- a/salt/utils/openstack/nova.py +++ b/salt/utils/openstack/nova.py @@ -290,8 +290,9 @@ class SaltNova(object): self.session = keystoneauth1.session.Session(auth=options, verify=verify) conn = client.Client(version=self.version, session=self.session, **self.client_kwargs) self.kwargs['auth_token'] = conn.client.session.get_token() - self.catalog = conn.client.session.get('/auth/catalog', endpoint_filter={'service_type': 'identity'}).json().get('catalog', []) - if conn.client.get_endpoint(service_type='identity').endswith('v3'): + identity_service_type = kwargs.get('identity_service_type', 'identity') + self.catalog = conn.client.session.get('/auth/catalog', endpoint_filter={'service_type': identity_service_type}).json().get('catalog', []) + if conn.client.get_endpoint(service_type=identity_service_type).endswith('v3'): self._v3_setup(region_name) else: self._v2_setup(region_name) From 53bd3a3e236d93850e4f53cebd674767b001825f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 28 Aug 2017 17:30:25 -0500 Subject: [PATCH 259/639] Improve inheritance in salt.utils.gitfs This makes the following changes: 1. Renames the valid_providers param in GitBase to git_providers, allowing for a dictionary mapping provider names to their associated classes. This allows for alternate providers to be used with a GitBase subclass. 2. Renames the get_provider function to verify_provider to reduce confusion with git_providers. 3. Uses super() to run a parent class' dunder init instead of invoking the parent class directly. --- salt/utils/gitfs.py | 54 +++++++++++++++++++--------------- tests/unit/utils/test_gitfs.py | 2 +- 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index bad3cc883e..ef8c2adf74 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -38,7 +38,6 @@ from salt.utils.versions import LooseVersion as _LooseVersion # Import third party libs import salt.ext.six as six -VALID_PROVIDERS = ('pygit2', 'gitpython') # Optional per-remote params that can only be used on a per-remote basis, and # thus do not have defaults in salt/config.py. PER_REMOTE_ONLY = ('name',) @@ -164,7 +163,7 @@ class GitProvider(object): directly. self.provider should be set in the sub-class' __init__ function before - invoking GitProvider.__init__(). + invoking the parent class' __init__. ''' def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): @@ -857,8 +856,10 @@ class GitPython(GitProvider): def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): self.provider = 'gitpython' - GitProvider.__init__(self, opts, remote, per_remote_defaults, - per_remote_only, override_params, cache_root, role) + super(GitPython, self).__init__( + opts, remote, per_remote_defaults, per_remote_only, + override_params, cache_root, role + ) def add_refspecs(self, *refspecs): ''' @@ -1192,8 +1193,10 @@ class Pygit2(GitProvider): def __init__(self, opts, remote, per_remote_defaults, per_remote_only, override_params, cache_root, role='gitfs'): self.provider = 'pygit2' - GitProvider.__init__(self, opts, remote, per_remote_defaults, - per_remote_only, override_params, cache_root, role) + super(Pygit2, self).__init__( + opts, remote, per_remote_defaults, per_remote_only, + override_params, cache_root, role + ) def add_refspecs(self, *refspecs): ''' @@ -1877,11 +1880,17 @@ class Pygit2(GitProvider): fp_.write(blob.data) +GIT_PROVIDERS = { + 'pygit2': Pygit2, + 'gitpython': GitPython, +} + + class GitBase(object): ''' Base class for gitfs/git_pillar ''' - def __init__(self, opts, valid_providers=VALID_PROVIDERS, cache_root=None): + def __init__(self, opts, git_providers=None, cache_root=None): ''' IMPORTANT: If specifying a cache_root, understand that this is also where the remotes will be cloned. A non-default cache_root is only @@ -1889,8 +1898,9 @@ class GitBase(object): out into the winrepo locations and not within the cachedir. ''' self.opts = opts - self.valid_providers = valid_providers - self.get_provider() + self.git_providers = git_providers if git_providers is not None \ + else GIT_PROVIDERS + self.verify_provider() if cache_root is not None: self.cache_root = self.remote_root = cache_root else: @@ -1948,7 +1958,7 @@ class GitBase(object): self.remotes = [] for remote in remotes: - repo_obj = self.provider_class( + repo_obj = self.git_providers[self.provider]( self.opts, remote, per_remote_defaults, @@ -2202,7 +2212,7 @@ class GitBase(object): # Hash file won't exist if no files have yet been served up pass - def get_provider(self): + def verify_provider(self): ''' Determine which provider to use ''' @@ -2223,12 +2233,12 @@ class GitBase(object): # Should only happen if someone does something silly like # set the provider to a numeric value. desired_provider = str(desired_provider).lower() - if desired_provider not in self.valid_providers: + if desired_provider not in self.git_providers: log.critical( 'Invalid {0}_provider \'{1}\'. Valid choices are: {2}' .format(self.role, desired_provider, - ', '.join(self.valid_providers)) + ', '.join(self.git_providers)) ) failhard(self.role) elif desired_provider == 'pygit2' and self.verify_pygit2(): @@ -2241,17 +2251,13 @@ class GitBase(object): .format(self.role) ) failhard(self.role) - if self.provider == 'pygit2': - self.provider_class = Pygit2 - elif self.provider == 'gitpython': - self.provider_class = GitPython def verify_gitpython(self, quiet=False): ''' Check if GitPython is available and at a compatible version (>= 0.3.0) ''' def _recommend(): - if HAS_PYGIT2 and 'pygit2' in self.valid_providers: + if HAS_PYGIT2 and 'pygit2' in self.git_providers: log.error(_RECOMMEND_PYGIT2.format(self.role)) if not HAS_GITPYTHON: @@ -2262,7 +2268,7 @@ class GitBase(object): ) _recommend() return False - elif 'gitpython' not in self.valid_providers: + elif 'gitpython' not in self.git_providers: return False # pylint: disable=no-member @@ -2302,7 +2308,7 @@ class GitBase(object): Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0. ''' def _recommend(): - if HAS_GITPYTHON and 'gitpython' in self.valid_providers: + if HAS_GITPYTHON and 'gitpython' in self.git_providers: log.error(_RECOMMEND_GITPYTHON.format(self.role)) if not HAS_PYGIT2: @@ -2313,7 +2319,7 @@ class GitBase(object): ) _recommend() return False - elif 'pygit2' not in self.valid_providers: + elif 'pygit2' not in self.git_providers: return False # pylint: disable=no-member @@ -2432,7 +2438,7 @@ class GitFS(GitBase): ''' def __init__(self, opts): self.role = 'gitfs' - GitBase.__init__(self, opts) + super(GitFS, self).__init__(opts) def dir_list(self, load): ''' @@ -2735,7 +2741,7 @@ class GitPillar(GitBase): ''' def __init__(self, opts): self.role = 'git_pillar' - GitBase.__init__(self, opts) + super(GitPillar, self).__init__(opts) def checkout(self): ''' @@ -2837,7 +2843,7 @@ class WinRepo(GitBase): ''' def __init__(self, opts, winrepo_dir): self.role = 'winrepo' - GitBase.__init__(self, opts, cache_root=winrepo_dir) + super(WinRepo, self).__init__(opts, cache_root=winrepo_dir) def checkout(self): ''' diff --git a/tests/unit/utils/test_gitfs.py b/tests/unit/utils/test_gitfs.py index c8942e4695..070a46fe75 100644 --- a/tests/unit/utils/test_gitfs.py +++ b/tests/unit/utils/test_gitfs.py @@ -66,7 +66,7 @@ class TestGitFSProvider(TestCase): ('git_pillar', salt.utils.gitfs.GitPillar), ('winrepo', salt.utils.gitfs.WinRepo)): key = '{0}_provider'.format(role_name) - for provider in salt.utils.gitfs.VALID_PROVIDERS: + for provider in salt.utils.gitfs.GIT_PROVIDERS: verify = 'verify_gitpython' mock1 = _get_mock(verify, provider) with patch.object(role_class, verify, mock1): From 46a91a2f5ead32c2f6f38d38b49ceff857d88e49 Mon Sep 17 00:00:00 2001 From: Kurach Alexander Date: Tue, 29 Aug 2017 11:07:02 +0300 Subject: [PATCH 260/639] Update __init__.py add support for unicode symbols in vm names. fix for '''UnicodeEncodeError: 'ascii' codec can't encode character u'\u2013' in position 5: ordinal not in range(128)''' --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index 5eaf1a4f7a..f5f275816a 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -5,7 +5,7 @@ correct cloud modules ''' # Import python libs -from __future__ import absolute_import, print_function, generators +from __future__ import absolute_import, print_function, generators,unicode_literals import os import copy import glob From 8f43da10bbf7356db530c153d1fe7a4f6b7bbc7b Mon Sep 17 00:00:00 2001 From: Timur Date: Tue, 29 Aug 2017 14:05:38 +0300 Subject: [PATCH 261/639] .utils.aws.get_location() expects a dict MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … as it's `provider` argument. --- salt/utils/aws.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/aws.py b/salt/utils/aws.py index 9e1212e92e..011dd346d7 100644 --- a/salt/utils/aws.py +++ b/salt/utils/aws.py @@ -392,7 +392,7 @@ def query(params=None, setname=None, requesturl=None, location=None, service_url = prov_dict.get('service_url', 'amazonaws.com') if not location: - location = get_location(opts, provider) + location = get_location(opts, prov_dict) if endpoint is None: if not requesturl: From 630a1db3ab392fa76190beea620a3dcf2d851c6f Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Tue, 29 Aug 2017 12:07:19 +0100 Subject: [PATCH 262/639] Include the line number by default on the log file format --- salt/config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 0f06f9ccca..bd632b4116 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -55,7 +55,7 @@ _DFLT_LOG_DATEFMT = '%H:%M:%S' _DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S' _DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s' _DFLT_LOG_FMT_LOGFILE = ( - '%(asctime)s,%(msecs)03d [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s' + '%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s' ) _DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*'] From 3c1ddc9bde546fdad3564ef6dddf41dc2ffee34e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Knecht?= Date: Wed, 23 Aug 2017 16:40:11 +0200 Subject: [PATCH 263/639] modules: iptables: correctly parse `--nfmask`/`--ctmask` `iptables-save` can return rules like this one: ``` -A PREROUTING -m connmark ! --mark 0x0/0xffff0000 -j CONNMARK \ --restore-mark --nfmask 0xffff0000 --ctmask 0xffff0000 ``` which leads to the following behavior: ``` $ salt '*' iptables.get_rules minion: Minion did not return. [No response] ``` This commit fixes the behavior of `iptables.get_rules` in this case, which also fixes the `iptables.append` state when such a rule already exists on the minion. --- salt/modules/iptables.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/modules/iptables.py b/salt/modules/iptables.py index a96c975da7..563713ac92 100644 --- a/salt/modules/iptables.py +++ b/salt/modules/iptables.py @@ -1455,6 +1455,8 @@ def _parser(): add_arg('--or-mark', dest='or-mark', action='append') add_arg('--xor-mark', dest='xor-mark', action='append') add_arg('--set-mark', dest='set-mark', action='append') + add_arg('--nfmask', dest='nfmask', action='append') + add_arg('--ctmask', dest='ctmask', action='append') ## CONNSECMARK add_arg('--save', dest='save', action='append') add_arg('--restore', dest='restore', action='append') From 7ea8524621816727d95aa91c5fa68c4449041e1d Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 29 Aug 2017 15:14:07 +0200 Subject: [PATCH 264/639] Add **kwargs to mount.mounted state This silences an error message when using prereq with the mount.mounted state. See #29463 --- salt/states/mount.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/mount.py b/salt/states/mount.py index b44225b92a..a03868ddef 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -75,7 +75,8 @@ def mounted(name, extra_mount_invisible_keys=None, extra_mount_ignore_fs_keys=None, extra_mount_translate_options=None, - hidden_opts=None): + hidden_opts=None, + **kwargs): ''' Verify that a device is mounted From 0d5a46dbaa8815636bd1e78b582ccc9c4b91ad19 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 29 Aug 2017 09:38:11 -0400 Subject: [PATCH 265/639] Update release branch section with a few more details --- doc/topics/development/contributing.rst | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/topics/development/contributing.rst b/doc/topics/development/contributing.rst index fd21d86a23..7941397682 100644 --- a/doc/topics/development/contributing.rst +++ b/doc/topics/development/contributing.rst @@ -263,9 +263,17 @@ against that branch. Release Branches ---------------- -For each release a branch will be created when we are ready to tag. The branch will be the same name as the tag minus the v. For example, the v2017.7.1 release was created from the 2017.7.1 branch. This branching strategy will allow for more stability when there is a need for a re-tag during the testing phase of our releases. +For each release, a branch will be created when the SaltStack release team is +ready to tag. The release branch is created from the parent branch and will be +the same name as the tag minus the ``v``. For example, the ``2017.7.1`` release +branch was created from the ``2017.7`` parent branch and the ``v2017.7.1`` +release was tagged at the ``HEAD`` of the ``2017.7.1`` branch. This branching +strategy will allow for more stability when there is a need for a re-tag during +the testing phase of the release process. -Once the branch is created, the fixes required for a given release, as determined by the SaltStack release team, will be added to this branch. All commits in this branch will be merged forward into the parent branch as well. +Once the release branch is created, the fixes required for a given release, as +determined by the SaltStack release team, will be added to this branch. All +commits in this branch will be merged forward into the parent branch as well. Keeping Salt Forks in Sync ========================== From d3a23e164fa62e6bdc4bb921145e0062507c9aa3 Mon Sep 17 00:00:00 2001 From: Kurach Alexander Date: Tue, 29 Aug 2017 17:00:41 +0300 Subject: [PATCH 266/639] Update __init__.py fix lint err --- salt/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index f5f275816a..0372f91f63 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -5,7 +5,7 @@ correct cloud modules ''' # Import python libs -from __future__ import absolute_import, print_function, generators,unicode_literals +from __future__ import absolute_import, print_function, generators, unicode_literals import os import copy import glob From 3444423c25736138db8ea220b6a8baf431294c08 Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 29 Aug 2017 16:34:02 +0200 Subject: [PATCH 267/639] Add **kwargs to salt.state state This silences an error message when using prereq with the salt.state state. See #29463 and #37090 --- salt/states/saltmod.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index fd7a9139d8..ae48a7e334 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -83,7 +83,8 @@ def state(name, batch=None, queue=False, subset=None, - orchestration_jid=None): + orchestration_jid=None, + **kwargs): ''' Invoke a state run on a given target From c227cb25ad7f7025e661c71b9bb368a9c5b57c4c Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Tue, 29 Aug 2017 17:12:25 +0200 Subject: [PATCH 268/639] Skipping test on ImportError This most probably means that the Kubernetes client lib is not installed. --- tests/unit/modules/test_kubernetes.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index eda8a0a91b..1de939f6b0 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -16,10 +16,15 @@ from tests.support.mock import ( NO_MOCK_REASON ) -from salt.modules import kubernetes +try: + from salt.modules import kubernetes +except ImportError: + kubernetes = False @skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(kubernetes is False, "Probably Kubernetes client lib is not installed. \ + Skipping test_kubernetes.py") class KubernetesTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.kubernetes From 23ec47c74cfa794ab61b485eadd3ce4c8b0bfeca Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 09:16:21 -0600 Subject: [PATCH 269/639] Add _ to regex search --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 03faa6ecfd..10854ef1f6 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1313,7 +1313,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if re.search(r'salt[\s-]*minion', + if re.search(r'salt[\s-_]*minion', pkg_name, flags=re.IGNORECASE + re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} From bad8f56969401040d5f249b8c1cf7e36f5e933bc Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 14 Aug 2017 16:39:20 -0400 Subject: [PATCH 270/639] Always notify ryan-lane when changes occur on boto files --- .mention-bot | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.mention-bot b/.mention-bot index 56be9ab9e6..b6ed4f2b24 100644 --- a/.mention-bot +++ b/.mention-bot @@ -1,4 +1,11 @@ { + "alwaysNotifyForPaths": [ + { + "name": "ryan-lane", + "files": ["salt/**/*boto*.py"], + "skipTeamPrs": false + } + ], "skipTitle": "Merge forward", "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"] } From 40b5a29f90378e15e75c152d5c42e04f0654ad0d Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Thu, 24 Aug 2017 15:55:30 -0600 Subject: [PATCH 271/639] Add basepi to userBlacklist for mention bot --- .mention-bot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mention-bot b/.mention-bot index b6ed4f2b24..86890cfde0 100644 --- a/.mention-bot +++ b/.mention-bot @@ -7,6 +7,6 @@ } ], "skipTitle": "Merge forward", - "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh"] + "userBlacklist": ["cvrebert", "markusgattol", "olliewalsh", "basepi"] } From 2b85757d733d5a82307391f77c15dda2de5a0dc8 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 25 Aug 2017 19:34:29 -0400 Subject: [PATCH 272/639] Always notify tkwilliams when changes occur on boto files --- .mention-bot | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.mention-bot b/.mention-bot index 86890cfde0..c07f85b9fc 100644 --- a/.mention-bot +++ b/.mention-bot @@ -4,6 +4,11 @@ "name": "ryan-lane", "files": ["salt/**/*boto*.py"], "skipTeamPrs": false + }, + { + "name": "tkwilliams", + "files": ["salt/**/*boto*.py"], + "skipTeamPrs": false } ], "skipTitle": "Merge forward", From ae7609b8b270b7e3c7303a71298c968ea73ee4ab Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 29 Aug 2017 18:05:40 +0200 Subject: [PATCH 273/639] Also add **kwargs to unmounted, same story --- salt/states/mount.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/mount.py b/salt/states/mount.py index a03868ddef..fef674ddbc 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -699,7 +699,8 @@ def unmounted(name, device=None, config='/etc/fstab', persist=False, - user=None): + user=None, + **kwargs): ''' .. versionadded:: 0.17.0 From 20a9ae10ed97bbaff1f140dcefe3cfed186a0350 Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 29 Aug 2017 18:13:27 +0200 Subject: [PATCH 274/639] Add **kwargs to some file states This silences an error message when using prereq with some file state. See #29463 and #37090 --- salt/states/file.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 82d85a4e67..c8a8a185f1 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1411,7 +1411,8 @@ def symlink( return ret -def absent(name): +def absent(name, + **kwargs): ''' Make sure that the named file or directory is absent. If it exists, it will be deleted. This will work to reverse any of the functions in the file @@ -1473,7 +1474,8 @@ def absent(name): return ret -def exists(name): +def exists(name, + **kwargs): ''' Verify that the named file or directory is present or exists. Ensures pre-requisites outside of Salt's purview @@ -1499,7 +1501,8 @@ def exists(name): return ret -def missing(name): +def missing(name, + **kwargs): ''' Verify that the named file or directory is missing, this returns True only if the named file is missing but does not remove the file if it is present. From 869e8cc603d1d4580ef6a5f024d455a3dcd590d0 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 28 Aug 2017 11:01:40 -0600 Subject: [PATCH 275/639] Fix `unit.fileserver.test_gitfs` for Windows Put `import pwd` in a try/except block Set `os.environ['USERNAME']` in windows using win_functions Add error function for `shutil.rmtree` --- tests/unit/fileserver/test_gitfs.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index 1920448e66..b3154638b7 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -9,8 +9,12 @@ import os import shutil import tempfile import textwrap -import pwd import logging +import stat +try: + import pwd +except ImportError: + pass # Import 3rd-party libs import yaml @@ -189,7 +193,6 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): self.integration_base_files = os.path.join(FILES, 'file', 'base') # Create the dir if it doesn't already exist - try: shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/') except OSError: @@ -203,7 +206,11 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): if 'USERNAME' not in os.environ: try: - os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name + if salt.utils.is_windows(): + import salt.utils.win_functions + os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() + else: + os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name except AttributeError: log.error('Unable to get effective username, falling back to ' '\'root\'.') @@ -219,14 +226,18 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): Remove the temporary git repository and gitfs cache directory to ensure a clean environment for each test. ''' - shutil.rmtree(self.tmp_repo_dir) - shutil.rmtree(self.tmp_cachedir) - shutil.rmtree(self.tmp_sock_dir) + shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error) del self.tmp_repo_dir del self.tmp_cachedir del self.tmp_sock_dir del self.integration_base_files + def _rmtree_error(self, func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + def test_file_list(self): ret = gitfs.file_list(LOAD) self.assertIn('testfile', ret) From c956d242830b2918fc2609454fe00600dba16093 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 10:15:09 -0600 Subject: [PATCH 276/639] Fix is_windows detection when USERNAME missing --- tests/unit/fileserver/test_gitfs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index b3154638b7..48cce5cf5a 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -206,6 +206,7 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): if 'USERNAME' not in os.environ: try: + import salt.utils if salt.utils.is_windows(): import salt.utils.win_functions os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() From e5daff495acca2862b1deeeecffad277a6ca50d2 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 28 Aug 2017 12:10:55 -0600 Subject: [PATCH 277/639] Fix pkg.install I jacked this up previously convert cmd to a list use cmd.extend --- salt/modules/win_pkg.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index f1b51af308..39c2e59192 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1212,7 +1212,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False)) # Build cmd and arguments - # cmd and arguments must be seperated for use with the task scheduler + # cmd and arguments must be separated for use with the task scheduler if use_msiexec: cmd = msiexec arguments = ['/i', cached_pkg] @@ -1275,7 +1275,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): else: # Combine cmd and arguments - cmd = [cmd].extend(arguments) + cmd = [cmd] + cmd.extend(arguments) # Launch the command result = __salt__['cmd.run_all'](cmd, From ed030a35a50663f9da122c42f91ef0f210388642 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 28 Aug 2017 12:22:11 -0600 Subject: [PATCH 278/639] Use regex to detect salt-minion install --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 39c2e59192..89ba749aba 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1243,7 +1243,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if pkg_name in ['salt-minion', 'salt-minion-py3']: + if re.search(r'salt[\s-]*minion', pkg_name, flags=re.IGNORECASE+re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} if not __salt__['task.run'](name='update-salt-software'): log.error('Failed to install {0}'.format(pkg_name)) From 3cf2b6575c1e2a709186067a9a84f7583ed1ab1d Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 28 Aug 2017 12:39:47 -0600 Subject: [PATCH 279/639] Fix spelling --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 89ba749aba..b5efec590c 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -903,7 +903,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Version is ignored salt '*' pkg.install pkgs="['foo', 'bar']" version=1.2.3 - If passed with a comma seperated list in the ``name`` parameter, the + If passed with a comma separated list in the ``name`` parameter, the version will apply to all packages in the list. CLI Example: From 31ff69f0addf85f5a949a6b462932d7803d50949 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 09:13:44 -0600 Subject: [PATCH 280/639] Add underscore to regex search --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index b5efec590c..50a67dddde 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1243,7 +1243,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if re.search(r'salt[\s-]*minion', pkg_name, flags=re.IGNORECASE+re.UNICODE) is not None: + if re.search(r'salt[\s-_]*minion', pkg_name, flags=re.IGNORECASE+re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} if not __salt__['task.run'](name='update-salt-software'): log.error('Failed to install {0}'.format(pkg_name)) From 13dfabb1ce0ab506f1f564df442db2c1eb14f7ca Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 13:27:18 -0600 Subject: [PATCH 281/639] Fix regex statement, add `.` --- salt/modules/win_pkg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 50a67dddde..8e18ecbae3 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1243,7 +1243,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if re.search(r'salt[\s-_]*minion', pkg_name, flags=re.IGNORECASE+re.UNICODE) is not None: + if re.search(r'salt[\s_.-]*minion', + pkg_name, + flags=re.IGNORECASE+re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} if not __salt__['task.run'](name='update-salt-software'): log.error('Failed to install {0}'.format(pkg_name)) From eccd7a6d763b6ecc2e7e4ab687d5bdab889bf999 Mon Sep 17 00:00:00 2001 From: wcannon Date: Tue, 29 Aug 2017 14:31:05 -0500 Subject: [PATCH 282/639] adding an assertion in check list --- salt/modules/saltcheck.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 43c45cf79c..3b5635b962 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -258,7 +258,8 @@ class SaltCheck(object): self.results_dict_summary = {} self.assertions_list = '''assertEqual assertNotEqual assertTrue assertFalse - assertIn assertGreater + assertIn assertNotIn + assertGreater assertGreaterEqual assertLess assertLessEqual'''.split() self.auto_update_master_cache = _get_auto_update_cache_value From e007a1c26ed2db642c55f539ec7c2ba127144a1c Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 13:57:28 -0600 Subject: [PATCH 283/639] Fix regex, add `.` --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 10854ef1f6..d3434cc2b7 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1313,7 +1313,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Run Scheduled Task # Special handling for installing salt - if re.search(r'salt[\s-_]*minion', + if re.search(r'salt[\s_.-]*minion', pkg_name, flags=re.IGNORECASE + re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} From 2aebbe304a094c203b3837b45fd7f4057d97a9f8 Mon Sep 17 00:00:00 2001 From: Dylan Whichard Date: Tue, 29 Aug 2017 16:17:03 -0400 Subject: [PATCH 284/639] Fix scope of targets in .modules.pacman.install() --- salt/modules/pacman.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/pacman.py b/salt/modules/pacman.py index ac50e7ebfe..ab35b9d85d 100644 --- a/salt/modules/pacman.py +++ b/salt/modules/pacman.py @@ -539,6 +539,7 @@ def install(name=None, cmd.append('pacman') errors = [] + targets = [] if pkg_type == 'file': cmd.extend(['-U', '--noprogressbar', '--noconfirm']) cmd.extend(pkg_params) @@ -549,7 +550,6 @@ def install(name=None, if sysupgrade: cmd.append('-u') cmd.extend(['--noprogressbar', '--noconfirm', '--needed']) - targets = [] wildcards = [] for param, version_num in six.iteritems(pkg_params): if version_num is None: From 26410c662ff88656ff2dd12cad4bd253bfbeb8cb Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 29 Aug 2017 16:24:47 -0400 Subject: [PATCH 285/639] Update some salt.utils.x paths for moved util functions Through merge forwards, some of the old-style paths have made their way into develop. This PR corrects some of these instances on the more popular functions. --- doc/ref/modules/index.rst | 2 +- salt/cli/cp.py | 3 ++- salt/cloud/clouds/oneandone.py | 4 ++-- salt/grains/core.py | 2 +- salt/grains/disks.py | 2 +- salt/modules/cp.py | 2 +- salt/modules/runit.py | 3 ++- salt/modules/slsutil.py | 9 ++++++--- salt/modules/test.py | 3 ++- salt/modules/win_lgpo.py | 6 +++--- salt/modules/yumpkg.py | 2 +- salt/states/group.py | 4 ++-- tests/unit/modules/test_cmdmod.py | 3 +-- tests/unit/modules/test_pip.py | 4 ++-- tests/unit/modules/test_timezone.py | 3 +-- tests/unit/states/test_file.py | 12 ++++++------ tests/unit/test_doc.py | 6 +++--- tests/unit/utils/test_url.py | 13 +++++++------ 18 files changed, 44 insertions(+), 39 deletions(-) diff --git a/doc/ref/modules/index.rst b/doc/ref/modules/index.rst index e68eb94507..d6c7fe9610 100644 --- a/doc/ref/modules/index.rst +++ b/doc/ref/modules/index.rst @@ -451,7 +451,7 @@ For example: ''' Only load if git exists on the system ''' - if salt.utils.which('git') is None: + if salt.utils.path.which('git') is None: return (False, 'The git execution module cannot be loaded: git unavailable.') else: diff --git a/salt/cli/cp.py b/salt/cli/cp.py index bdca7539ca..19efc47ee3 100644 --- a/salt/cli/cp.py +++ b/salt/cli/cp.py @@ -20,6 +20,7 @@ import sys import salt.client import salt.output import salt.utils +import salt.utils.files import salt.utils.gzip_util import salt.utils.itertools import salt.utils.minions @@ -112,7 +113,7 @@ class SaltCP(object): err = 'The referenced file, {0} is not available.'.format(fn_) sys.stderr.write(err + '\n') sys.exit(42) - with salt.utils.fopen(fn_, 'r') as fp_: + with salt.utils.files.fopen(fn_, 'r') as fp_: data = fp_.read() return {fn_: data} diff --git a/salt/cloud/clouds/oneandone.py b/salt/cloud/clouds/oneandone.py index 73526b8b4b..73ed0c853a 100644 --- a/salt/cloud/clouds/oneandone.py +++ b/salt/cloud/clouds/oneandone.py @@ -87,7 +87,6 @@ import pprint import time # Import salt libs -import salt.utils import salt.config as config from salt.exceptions import ( SaltCloudConfigError, @@ -96,6 +95,7 @@ from salt.exceptions import ( SaltCloudExecutionTimeout, SaltCloudSystemExit ) +import salt.utils.files # Import salt.cloud libs import salt.utils.cloud @@ -805,7 +805,7 @@ def load_public_key(vm_): ) ) - with salt.utils.fopen(public_key_filename, 'r') as public_key: + with salt.utils.files.fopen(public_key_filename, 'r') as public_key: key = public_key.read().replace('\n', '') return key diff --git a/salt/grains/core.py b/salt/grains/core.py index 634473fce2..56ec468ed2 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -543,7 +543,7 @@ def _virtual(osdata): command = 'system_profiler' args = ['SPDisplaysDataType'] elif osdata['kernel'] == 'SunOS': - virtinfo = salt.utils.which('virtinfo') + virtinfo = salt.utils.path.which('virtinfo') if virtinfo: try: ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo)) diff --git a/salt/grains/disks.py b/salt/grains/disks.py index bd2069226b..0d76a579c8 100644 --- a/salt/grains/disks.py +++ b/salt/grains/disks.py @@ -148,7 +148,7 @@ def _linux_disks(): def _windows_disks(): - wmic = salt.utils.which('wmic') + wmic = salt.utils.path.which('wmic') namespace = r'\\root\microsoft\windows\storage' path = 'MSFT_PhysicalDisk' diff --git a/salt/modules/cp.py b/salt/modules/cp.py index 1b5a3ddbfb..86634d559c 100644 --- a/salt/modules/cp.py +++ b/salt/modules/cp.py @@ -80,7 +80,7 @@ def recv(files, dest): return 'Destination unavailable' try: - with salt.utils.fopen(final, 'w+') as fp_: + with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: diff --git a/salt/modules/runit.py b/salt/modules/runit.py index 8fcf19d48a..f994aae6cc 100644 --- a/salt/modules/runit.py +++ b/salt/modules/runit.py @@ -58,6 +58,7 @@ log = logging.getLogger(__name__) # Import salt libs from salt.exceptions import CommandExecutionError import salt.utils.files +import salt.utils.path # Function alias to not shadow built-ins. __func_alias__ = { @@ -95,7 +96,7 @@ def __virtual__(): global __virtualname__ __virtualname__ = 'service' return __virtualname__ - if salt.utils.which('sv'): + if salt.utils.path.which('sv'): return __virtualname__ return (False, 'Runit not available. Please install sv') diff --git a/salt/modules/slsutil.py b/salt/modules/slsutil.py index f107841cd4..af8008bb1c 100644 --- a/salt/modules/slsutil.py +++ b/salt/modules/slsutil.py @@ -2,12 +2,15 @@ ''' Utility functions for use with or in SLS files ''' + +# Import Python libs from __future__ import absolute_import +# Import Salt libs import salt.exceptions import salt.loader import salt.template -import salt.utils +import salt.utils.args import salt.utils.dictupdate @@ -172,7 +175,7 @@ def serialize(serializer, obj, **mod_kwargs): {% set json_string = salt.slsutil.serialize('json', {'foo': 'Foo!'}) %} ''' - kwargs = salt.utils.clean_kwargs(**mod_kwargs) + kwargs = salt.utils.args.clean_kwargs(**mod_kwargs) return _get_serialize_fn(serializer, 'serialize')(obj, **kwargs) @@ -196,6 +199,6 @@ def deserialize(serializer, stream_or_string, **mod_kwargs): {% set python_object = salt.slsutil.deserialize('json', '{"foo": "Foo!"}') %} ''' - kwargs = salt.utils.clean_kwargs(**mod_kwargs) + kwargs = salt.utils.args.clean_kwargs(**mod_kwargs) return _get_serialize_fn(serializer, 'deserialize')(stream_or_string, **kwargs) diff --git a/salt/modules/test.py b/salt/modules/test.py index 30008285f8..86d434298d 100644 --- a/salt/modules/test.py +++ b/salt/modules/test.py @@ -15,6 +15,7 @@ import random # Import Salt libs import salt import salt.utils +import salt.utils.args import salt.utils.hashutils import salt.utils.platform import salt.version @@ -323,7 +324,7 @@ def arg_clean(*args, **kwargs): salt '*' test.arg_clean 1 "two" 3.1 txt="hello" wow='{a: 1, b: "hello"}' ''' - return dict(args=args, kwargs=salt.utils.clean_kwargs(**kwargs)) + return dict(args=args, kwargs=salt.utils.args.clean_kwargs(**kwargs)) def fib(num): diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 3a715a19a6..aa93fb24d7 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -4025,14 +4025,14 @@ def _write_regpol_data(data_to_write, reg_pol_header = u'\u5250\u6765\x01\x00' if not os.path.exists(policy_file_path): ret = __salt__['file.makedirs'](policy_file_path) - with salt.utils.fopen(policy_file_path, 'wb') as pol_file: + with salt.utils.files.fopen(policy_file_path, 'wb') as pol_file: if not data_to_write.startswith(reg_pol_header): pol_file.write(reg_pol_header.encode('utf-16-le')) pol_file.write(data_to_write.encode('utf-16-le')) try: gpt_ini_data = '' if os.path.exists(gpt_ini_path): - with salt.utils.fopen(gpt_ini_path, 'rb') as gpt_file: + with salt.utils.files.fopen(gpt_ini_path, 'rb') as gpt_file: gpt_ini_data = gpt_file.read() if not _regexSearchRegPolData(r'\[General\]\r\n', gpt_ini_data): gpt_ini_data = '[General]\r\n' + gpt_ini_data @@ -4087,7 +4087,7 @@ def _write_regpol_data(data_to_write, int("{0}{1}".format(str(version_nums[0]).zfill(4), str(version_nums[1]).zfill(4)), 16), gpt_ini_data[general_location.end():]) if gpt_ini_data: - with salt.utils.fopen(gpt_ini_path, 'wb') as gpt_file: + with salt.utils.files.fopen(gpt_ini_path, 'wb') as gpt_file: gpt_file.write(gpt_ini_data) except Exception as e: msg = 'An error occurred attempting to write to {0}, the exception was {1}'.format( diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 1449813ab6..ad0f4b6b53 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2834,7 +2834,7 @@ def _parse_repo_file(filename): # Try to extract leading comments headers = '' - with salt.utils.fopen(filename, 'r') as rawfile: + with salt.utils.files.fopen(filename, 'r') as rawfile: for line in rawfile: if line.strip().startswith('#'): headers += '{0}\n'.format(line.strip()) diff --git a/salt/states/group.py b/salt/states/group.py index b640ecd56b..8153e2da7f 100644 --- a/salt/states/group.py +++ b/salt/states/group.py @@ -42,7 +42,7 @@ import sys from salt.ext import six # Import Salt libs -import salt.utils +import salt.utils.platform import salt.utils.win_functions @@ -61,7 +61,7 @@ def _changes(name, # User and Domain names are not case sensitive in Windows. Let's make them # all lower case so we can compare properly - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): if lgrp['members']: lgrp['members'] = [user.lower() for user in lgrp['members']] if members: diff --git a/tests/unit/modules/test_cmdmod.py b/tests/unit/modules/test_cmdmod.py index 8fca228dcc..ca2f879d5a 100644 --- a/tests/unit/modules/test_cmdmod.py +++ b/tests/unit/modules/test_cmdmod.py @@ -10,7 +10,6 @@ import sys import tempfile # Import Salt Libs -import salt.utils import salt.utils.platform import salt.modules.cmdmod as cmdmod from salt.exceptions import CommandExecutionError @@ -264,7 +263,7 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): with patch('salt.utils.files.fopen', mock_open(read_data=MOCK_SHELL_FILE)): self.assertFalse(cmdmod._is_valid_shell('foo')) - @skipIf(salt.utils.is_windows(), 'Do not run on Windows') + @skipIf(salt.utils.platform.is_windows(), 'Do not run on Windows') def test_os_environment_remains_intact(self): ''' Make sure the OS environment is not tainted after running a command diff --git a/tests/unit/modules/test_pip.py b/tests/unit/modules/test_pip.py index 9829f3b279..7d7735ff70 100644 --- a/tests/unit/modules/test_pip.py +++ b/tests/unit/modules/test_pip.py @@ -10,7 +10,7 @@ from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs -import salt.utils +import salt.utils.platform import salt.modules.pip as pip from salt.exceptions import CommandExecutionError @@ -297,7 +297,7 @@ class PipTestCase(TestCase, LoaderModuleMockMixin): mock_path.join = join mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(pip.__salt__, {'cmd.run_all': mock}): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): venv_path = 'c:\\test_env' bin_path = os.path.join(venv_path, 'Scripts', 'pip.exe').encode('string-escape') else: diff --git a/tests/unit/modules/test_timezone.py b/tests/unit/modules/test_timezone.py index e33476f3d8..c2fb6a4864 100644 --- a/tests/unit/modules/test_timezone.py +++ b/tests/unit/modules/test_timezone.py @@ -20,7 +20,6 @@ from tests.support.mock import ( from salt.exceptions import CommandExecutionError, SaltInvocationError import salt.modules.timezone as timezone from salt.ext import six -import salt.utils import salt.utils.platform import salt.utils.stringutils @@ -193,7 +192,7 @@ class TimezoneModuleTestCase(TestCase, LoaderModuleMockMixin): name, args, kwargs = timezone.__salt__['file.sed'].mock_calls[0] assert args == ('/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="UTC"') - @skipIf(salt.utils.is_windows(), 'os.symlink not available in Windows') + @skipIf(salt.utils.platform.is_windows(), 'os.symlink not available in Windows') @patch('salt.utils.path.which', MagicMock(return_value=False)) @patch('os.path.exists', MagicMock(return_value=True)) @patch('os.unlink', MagicMock()) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 4febdd57be..bac6594652 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -550,7 +550,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'G12', 'G12', 'G12', 'G12', 'G12']) mock_if = MagicMock(side_effect=[True, False, False, False, False, False, False, False]) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): mock_ret = MagicMock(return_value=ret) else: mock_ret = MagicMock(return_value=(ret, None)) @@ -590,7 +590,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): # Group argument is ignored on Windows systems. Group is set to # user - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): comt = ('User salt is not available Group salt' ' is not available') else: @@ -732,7 +732,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): mock_perms = MagicMock(return_value=ret) else: mock_perms = MagicMock(return_value=(ret, '')) @@ -803,7 +803,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): with patch.object(os.path, 'isfile', mock_f): with patch.dict(filestate.__opts__, {'test': True}): - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): comt = 'The directory "{0}" will be changed' \ ''.format(name) p_chg = {'directory': 'new'} @@ -889,7 +889,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): 'cp.list_master': mock_l}): # Group argument is ignored on Windows systems. Group is set to user - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): comt = ('User salt is not available Group salt' ' is not available') else: @@ -1364,7 +1364,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): # Group argument is ignored on Windows systems. Group is set # to user - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): comt = ('User salt is not available Group salt' ' is not available') else: diff --git a/tests/unit/test_doc.py b/tests/unit/test_doc.py index 52311d2f9e..c3e7e7db17 100644 --- a/tests/unit/test_doc.py +++ b/tests/unit/test_doc.py @@ -14,7 +14,7 @@ from tests.support.unit import TestCase # Import Salt libs import tests.integration as integration import salt.modules.cmdmod -import salt.utils +import salt.utils.platform class DocTestCase(TestCase): @@ -35,7 +35,7 @@ class DocTestCase(TestCase): ''' salt_dir = integration.CODE_DIR - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # No grep in Windows, use findstr # findstr in windows doesn't prepend 'Binary` to binary files, so # use the '/P' switch to skip files with unprintable characters @@ -52,7 +52,7 @@ class DocTestCase(TestCase): if line.startswith('Binary'): continue - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): # Need the space after the colon so it doesn't split the drive # letter key, val = line.split(': ', 1) diff --git a/tests/unit/utils/test_url.py b/tests/unit/utils/test_url.py index 43a74402cf..800d8a090d 100644 --- a/tests/unit/utils/test_url.py +++ b/tests/unit/utils/test_url.py @@ -4,6 +4,7 @@ from __future__ import absolute_import # Import Salt Libs +import salt.utils.platform import salt.utils.url # Import Salt Testing Libs @@ -38,7 +39,7 @@ class UrlTestCase(TestCase): ''' path = '?funny/path with {interesting|chars}' url = 'salt://' + path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = '_funny/path with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, None)) @@ -50,7 +51,7 @@ class UrlTestCase(TestCase): saltenv = 'ambience' path = '?funny/path&with {interesting|chars}' url = 'salt://' + path + '?saltenv=' + saltenv - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = '_funny/path&with {interesting_chars}' self.assertEqual(salt.utils.url.parse(url), (path, saltenv)) @@ -63,7 +64,7 @@ class UrlTestCase(TestCase): ''' path = '? interesting/&path.filetype' url = 'salt://' + path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): url = 'salt://_ interesting/&path.filetype' self.assertEqual(salt.utils.url.create(path), url) @@ -74,7 +75,7 @@ class UrlTestCase(TestCase): ''' saltenv = 'raumklang' path = '? interesting/&path.filetype' - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): path = '_ interesting/&path.filetype' url = 'salt://' + path + '?saltenv=' + saltenv @@ -157,7 +158,7 @@ class UrlTestCase(TestCase): ''' path = 'dir/file.conf' escaped_path = '|' + path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): escaped_path = path self.assertEqual(salt.utils.url.escape(path), escaped_path) @@ -177,7 +178,7 @@ class UrlTestCase(TestCase): path = 'dir/file.conf' url = 'salt://' + path escaped_url = 'salt://|' + path - if salt.utils.is_windows(): + if salt.utils.platform.is_windows(): escaped_url = url self.assertEqual(salt.utils.url.escape(url), escaped_url) From 5185071d5ad07a3e93f84064f66f09e49cb1b4d1 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 14:29:50 -0600 Subject: [PATCH 286/639] Skips `unit.modules.test_groupadd` on Windows There is a test_win_groupadd modules for testing the win_groupadd module on Windows. --- tests/unit/modules/test_groupadd.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_groupadd.py b/tests/unit/modules/test_groupadd.py index b836bd8805..29dfd15ed7 100644 --- a/tests/unit/modules/test_groupadd.py +++ b/tests/unit/modules/test_groupadd.py @@ -5,7 +5,10 @@ # Import Python libs from __future__ import absolute_import -import grp +try: + import grp +except ImportError: + pass # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -13,10 +16,12 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON # Import Salt Libs +import salt.utils import salt.modules.groupadd as groupadd @skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(salt.utils.is_windows(), "Module not available on Windows") class GroupAddTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for salt.modules.groupadd From 26f52bd2b8568c165db762295a393d7ec6eaaa36 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 29 Aug 2017 17:33:01 -0400 Subject: [PATCH 287/639] Move get_colors and get_color_theme to salt.utils.color.py Moves the `get_colors` and `get_color_theme` out of `salt.utils.py` and into a new file named `salt.utils.color.py`. This PR moves the original functions, adds a deprecation warning to the old function paths, and updates any references to the functions in Salt code to the new location. This includes moving a test as well. --- salt/output/highstate.py | 4 +- salt/output/key.py | 4 +- salt/output/nested.py | 4 +- salt/output/no_return.py | 4 +- salt/output/overstatestage.py | 4 +- salt/output/table_out.py | 10 ++- salt/utils/__init__.py | 124 ++++++++++++--------------------- salt/utils/color.py | 92 ++++++++++++++++++++++++ tests/consist.py | 4 +- tests/integration/__init__.py | 5 +- tests/unit/utils/test_color.py | 27 +++++++ tests/unit/utils/test_utils.py | 11 --- 12 files changed, 184 insertions(+), 109 deletions(-) create mode 100644 salt/utils/color.py create mode 100644 tests/unit/utils/test_color.py diff --git a/salt/output/highstate.py b/salt/output/highstate.py index 2f30d6c8f9..60adeb961e 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -108,7 +108,7 @@ import pprint import textwrap # Import salt libs -import salt.utils +import salt.utils.color import salt.utils.stringutils import salt.output from salt.utils.locales import sdecode @@ -158,7 +158,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument def _format_host(host, data): host = sdecode(host) - colors = salt.utils.get_colors( + colors = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) tabular = __opts__.get('state_tabular', False) diff --git a/salt/output/key.py b/salt/output/key.py index dfa42b2e06..afb504918d 100644 --- a/salt/output/key.py +++ b/salt/output/key.py @@ -8,9 +8,9 @@ The ``salt-key`` command makes use of this outputter to format its output. from __future__ import absolute_import # Import salt libs -import salt.utils import salt.output from salt.utils.locales import sdecode +import salt.utils.color def output(data, **kwargs): # pylint: disable=unused-argument @@ -18,7 +18,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument Read in the dict structure generated by the salt key API methods and print the structure. ''' - color = salt.utils.get_colors( + color = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) strip_colors = __opts__.get('strip_colors', True) diff --git a/salt/output/nested.py b/salt/output/nested.py index 8ef0e4c046..563bf5768f 100644 --- a/salt/output/nested.py +++ b/salt/output/nested.py @@ -29,9 +29,9 @@ from numbers import Number # Import salt libs import salt.output +import salt.utils.color import salt.utils.locales import salt.utils.odict -from salt.utils import get_colors from salt.ext.six import string_types @@ -41,7 +41,7 @@ class NestDisplay(object): ''' def __init__(self): self.__dict__.update( - get_colors( + salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme') ) diff --git a/salt/output/no_return.py b/salt/output/no_return.py index 781db538de..00f601bade 100644 --- a/salt/output/no_return.py +++ b/salt/output/no_return.py @@ -15,7 +15,7 @@ Example output:: from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.color # Import 3rd-party libs from salt.ext import six @@ -26,7 +26,7 @@ class NestDisplay(object): Create generator for nested output ''' def __init__(self): - self.colors = salt.utils.get_colors( + self.colors = salt.utils.color.get_colors( __opts__.get(u'color'), __opts__.get(u'color_theme')) diff --git a/salt/output/overstatestage.py b/salt/output/overstatestage.py index d35dd5a311..ee101e7620 100644 --- a/salt/output/overstatestage.py +++ b/salt/output/overstatestage.py @@ -11,7 +11,7 @@ and should not be called directly. from __future__ import absolute_import # Import Salt libs -import salt.utils +import salt.utils.color # Import 3rd-party libs from salt.ext import six @@ -27,7 +27,7 @@ def output(data, **kwargs): # pylint: disable=unused-argument ''' Format the data for printing stage information from the overstate system ''' - colors = salt.utils.get_colors( + colors = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) ostr = '' diff --git a/salt/output/table_out.py b/salt/output/table_out.py index 59c4cd9486..531eb96c0f 100644 --- a/salt/output/table_out.py +++ b/salt/output/table_out.py @@ -42,12 +42,10 @@ from functools import reduce # pylint: disable=redefined-builtin # Import salt libs import salt.output -import salt.utils.locales from salt.ext.six import string_types -from salt.utils import get_colors -from salt.ext.six.moves import map # pylint: disable=redefined-builtin -from salt.ext.six.moves import zip # pylint: disable=redefined-builtin - +from salt.ext.six.moves import map, zip # pylint: disable=redefined-builtin +import salt.utils.color +import salt.utils.locales __virtualname__ = 'table' @@ -78,7 +76,7 @@ class TableDisplay(object): width=50, # column max width wrapfunc=None): # function wrapper self.__dict__.update( - get_colors( + salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme') ) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 3526b6ff31..902bd6d015 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -126,7 +126,6 @@ import salt.utils.dictupdate import salt.utils.versions import salt.version from salt.utils.decorators.jinja import jinja_filter -from salt.textformat import TextFormat from salt.exceptions import ( CommandExecutionError, SaltClientError, CommandNotFoundError, SaltSystemExit, @@ -138,83 +137,6 @@ log = logging.getLogger(__name__) _empty = object() -def get_color_theme(theme): - ''' - Return the color theme to use - ''' - # Keep the heavy lifting out of the module space - import yaml - if not os.path.isfile(theme): - log.warning('The named theme {0} if not available'.format(theme)) - - # Late import to avoid circular import. - import salt.utils.files - try: - with salt.utils.files.fopen(theme, 'rb') as fp_: - colors = yaml.safe_load(fp_.read()) - ret = {} - for color in colors: - ret[color] = '\033[{0}m'.format(colors[color]) - if not isinstance(colors, dict): - log.warning('The theme file {0} is not a dict'.format(theme)) - return {} - return ret - except Exception: - log.warning('Failed to read the color theme {0}'.format(theme)) - return {} - - -def get_colors(use=True, theme=None): - ''' - Return the colors as an easy to use dict. Pass `False` to deactivate all - colors by setting them to empty strings. Pass a string containing only the - name of a single color to be used in place of all colors. Examples: - - .. code-block:: python - - colors = get_colors() # enable all colors - no_colors = get_colors(False) # disable all colors - red_colors = get_colors('RED') # set all colors to red - ''' - - colors = { - 'BLACK': TextFormat('black'), - 'DARK_GRAY': TextFormat('bold', 'black'), - 'RED': TextFormat('red'), - 'LIGHT_RED': TextFormat('bold', 'red'), - 'GREEN': TextFormat('green'), - 'LIGHT_GREEN': TextFormat('bold', 'green'), - 'YELLOW': TextFormat('yellow'), - 'LIGHT_YELLOW': TextFormat('bold', 'yellow'), - 'BLUE': TextFormat('blue'), - 'LIGHT_BLUE': TextFormat('bold', 'blue'), - 'MAGENTA': TextFormat('magenta'), - 'LIGHT_MAGENTA': TextFormat('bold', 'magenta'), - 'CYAN': TextFormat('cyan'), - 'LIGHT_CYAN': TextFormat('bold', 'cyan'), - 'LIGHT_GRAY': TextFormat('white'), - 'WHITE': TextFormat('bold', 'white'), - 'DEFAULT_COLOR': TextFormat('default'), - 'ENDC': TextFormat('reset'), - } - if theme: - colors.update(get_color_theme(theme)) - - if not use: - for color in colors: - colors[color] = '' - if isinstance(use, six.string_types): - # Try to set all of the colors to the passed color - if use in colors: - for color in colors: - # except for color reset - if color == 'ENDC': - continue - colors[color] = colors[use] - - return colors - - def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string @@ -3435,3 +3357,49 @@ def kwargs_warn_until(kwargs, stacklevel=stacklevel, _version_info_=_version_info_, _dont_call_warnings=_dont_call_warnings) + + +def get_color_theme(theme): + ''' + Return the color theme to use + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.color + import salt.utils.versions + + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.get_color_theme\' detected. This function has ' + 'been moved to \'salt.utils.color.get_color_theme\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.color.get_color_theme(theme) + + +def get_colors(use=True, theme=None): + ''' + Return the colors as an easy to use dict. Pass `False` to deactivate all + colors by setting them to empty strings. Pass a string containing only the + name of a single color to be used in place of all colors. Examples: + + .. code-block:: python + + colors = get_colors() # enable all colors + no_colors = get_colors(False) # disable all colors + red_colors = get_colors('RED') # set all colors to red + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.color + import salt.utils.versions + + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.get_colors\' detected. This function has ' + 'been moved to \'salt.utils.color.get_colors\' as of Salt ' + 'Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.color.get_colors(use=use, theme=theme) diff --git a/salt/utils/color.py b/salt/utils/color.py new file mode 100644 index 0000000000..21efb315dc --- /dev/null +++ b/salt/utils/color.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +''' +Functions used for CLI color themes. +''' + +# Import Python libs +from __future__ import absolute_import +import logging +import os + +# Import Salt libs +from salt.ext import six +from salt.textformat import TextFormat +import salt.utils.files + +log = logging.getLogger(__name__) + + +def get_color_theme(theme): + ''' + Return the color theme to use + ''' + # Keep the heavy lifting out of the module space + import yaml + if not os.path.isfile(theme): + log.warning('The named theme {0} if not available'.format(theme)) + + try: + with salt.utils.files.fopen(theme, 'rb') as fp_: + colors = yaml.safe_load(fp_.read()) + ret = {} + for color in colors: + ret[color] = '\033[{0}m'.format(colors[color]) + if not isinstance(colors, dict): + log.warning('The theme file {0} is not a dict'.format(theme)) + return {} + return ret + except Exception: + log.warning('Failed to read the color theme {0}'.format(theme)) + return {} + + +def get_colors(use=True, theme=None): + ''' + Return the colors as an easy to use dict. Pass `False` to deactivate all + colors by setting them to empty strings. Pass a string containing only the + name of a single color to be used in place of all colors. Examples: + + .. code-block:: python + + colors = get_colors() # enable all colors + no_colors = get_colors(False) # disable all colors + red_colors = get_colors('RED') # set all colors to red + + ''' + + colors = { + 'BLACK': TextFormat('black'), + 'DARK_GRAY': TextFormat('bold', 'black'), + 'RED': TextFormat('red'), + 'LIGHT_RED': TextFormat('bold', 'red'), + 'GREEN': TextFormat('green'), + 'LIGHT_GREEN': TextFormat('bold', 'green'), + 'YELLOW': TextFormat('yellow'), + 'LIGHT_YELLOW': TextFormat('bold', 'yellow'), + 'BLUE': TextFormat('blue'), + 'LIGHT_BLUE': TextFormat('bold', 'blue'), + 'MAGENTA': TextFormat('magenta'), + 'LIGHT_MAGENTA': TextFormat('bold', 'magenta'), + 'CYAN': TextFormat('cyan'), + 'LIGHT_CYAN': TextFormat('bold', 'cyan'), + 'LIGHT_GRAY': TextFormat('white'), + 'WHITE': TextFormat('bold', 'white'), + 'DEFAULT_COLOR': TextFormat('default'), + 'ENDC': TextFormat('reset'), + } + if theme: + colors.update(get_color_theme(theme)) + + if not use: + for color in colors: + colors[color] = '' + if isinstance(use, six.string_types): + # Try to set all of the colors to the passed color + if use in colors: + for color in colors: + # except for color reset + if color == 'ENDC': + continue + colors[color] = colors[use] + + return colors diff --git a/tests/consist.py b/tests/consist.py index fea84b59f5..ad4a0b6403 100644 --- a/tests/consist.py +++ b/tests/consist.py @@ -9,13 +9,13 @@ import pprint import optparse # Import Salt libs -import salt.utils +import salt.utils.color # Import 3rd-party libs import yaml from salt.ext import six -colors = salt.utils.get_colors() +colors = salt.utils.color.get_colors() def parse(): diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index f2651f9470..e964ef12ff 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -49,7 +49,8 @@ import salt.minion import salt.runner import salt.output import salt.version -import salt.utils # Can be removed once get_colors and appendproctitle are moved +import salt.utils # Can be removed once appendproctitle is moved +import salt.utils.color import salt.utils.files import salt.utils.path import salt.utils.platform @@ -188,7 +189,7 @@ class TestDaemon(object): def __init__(self, parser): self.parser = parser - self.colors = salt.utils.get_colors(self.parser.options.no_colors is False) + self.colors = salt.utils.color.get_colors (self.parser.options.no_colors is False) if salt.utils.platform.is_windows(): # There's no shell color support on windows... for key in self.colors: diff --git a/tests/unit/utils/test_color.py b/tests/unit/utils/test_color.py new file mode 100644 index 0000000000..cc4c835b65 --- /dev/null +++ b/tests/unit/utils/test_color.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +''' +Unit tests for salt.utils.color.py +''' + +# Import python libs +from __future__ import absolute_import + +# Import Salt Testing libs +from tests.support.unit import TestCase + +# Import Salt libs +import salt.utils.color + + +class ColorUtilsTestCase(TestCase): + + def test_get_colors(self): + ret = salt.utils.color.get_colors() + self.assertEqual('\x1b[0;37m', str(ret['LIGHT_GRAY'])) + + ret = salt.utils.color.get_colors(use=False) + self.assertDictContainsSubset({'LIGHT_GRAY': ''}, ret) + + ret = salt.utils.color.get_colors(use='LIGHT_GRAY') + # LIGHT_YELLOW now == LIGHT_GRAY + self.assertEqual(str(ret['LIGHT_YELLOW']), str(ret['LIGHT_GRAY'])) diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 87fe9b0018..79efe49c9c 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -895,17 +895,6 @@ class UtilsTestCase(TestCase): ret = salt.utils.repack_dictlist(LOREM_IPSUM) self.assertDictEqual(ret, {}) - def test_get_colors(self): - ret = salt.utils.get_colors() - self.assertEqual('\x1b[0;37m', str(ret['LIGHT_GRAY'])) - - ret = salt.utils.get_colors(use=False) - self.assertDictContainsSubset({'LIGHT_GRAY': ''}, ret) - - ret = salt.utils.get_colors(use='LIGHT_GRAY') - # LIGHT_YELLOW now == LIGHT_GRAY - self.assertEqual(str(ret['LIGHT_YELLOW']), str(ret['LIGHT_GRAY'])) - @skipIf(NO_MOCK, NO_MOCK_REASON) def test_daemonize_if(self): # pylint: disable=assignment-from-none From b401340e6c2b359da97b00cb086fde2cbb7a2da7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 15:59:08 -0600 Subject: [PATCH 288/639] Fix `unit.modules.test_inspect_collector` on Windows Uses os.sep instead of unix-style paths in the test Uses salt.utils.path.islink() to detect symlinks instead of os.path.islink(). os.path.islink() does not correctly detect symlinks in Windows Put grp and pwd imports inside a try/except block --- salt/modules/inspectlib/collector.py | 3 +- salt/modules/inspectlib/kiwiproc.py | 7 +++- tests/unit/modules/test_inspect_collector.py | 39 ++++++++++++++++---- 3 files changed, 38 insertions(+), 11 deletions(-) diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py index b87a46b82f..b8ebe7e804 100644 --- a/salt/modules/inspectlib/collector.py +++ b/salt/modules/inspectlib/collector.py @@ -29,6 +29,7 @@ from salt.modules.inspectlib.entities import (AllowedDir, IgnoredDir, Package, PayloadFile, PackageCfgFile) import salt.utils +import salt.utils.path from salt.utils import fsutils from salt.utils import reinit_crypto from salt.exceptions import CommandExecutionError @@ -311,7 +312,7 @@ class Inspector(EnvLoader): continue if not valid or not os.path.exists(obj) or not os.access(obj, os.R_OK): continue - if os.path.islink(obj): + if salt.utils.path.islink(obj): links.append(obj) elif os.path.isdir(obj): dirs.append(obj) diff --git a/salt/modules/inspectlib/kiwiproc.py b/salt/modules/inspectlib/kiwiproc.py index 136cacf00c..40b4f9c0bf 100644 --- a/salt/modules/inspectlib/kiwiproc.py +++ b/salt/modules/inspectlib/kiwiproc.py @@ -17,11 +17,14 @@ # Import python libs from __future__ import absolute_import import os -import grp -import pwd from xml.dom import minidom import platform import socket +try: + import grp + import pwd +except ImportError: + pass # Import salt libs import salt.utils diff --git a/tests/unit/modules/test_inspect_collector.py b/tests/unit/modules/test_inspect_collector.py index cdcb689eb7..0d37519a9e 100644 --- a/tests/unit/modules/test_inspect_collector.py +++ b/tests/unit/modules/test_inspect_collector.py @@ -49,9 +49,15 @@ class InspectorCollectorTestCase(TestCase): :return: ''' - inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid') - self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db') - self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid') + cachedir = os.sep + os.sep.join(['foo', 'cache']) + piddir = os.sep + os.sep.join(['foo', 'pid']) + inspector = Inspector(cachedir=cachedir, piddir=piddir, pidfilename='bar.pid') + self.assertEqual( + inspector.dbfile, + os.sep + os.sep.join(['foo', 'cache', '_minion_collector.db'])) + self.assertEqual( + inspector.pidfile, + os.sep + os.sep.join(['foo', 'pid', 'bar.pid'])) def test_file_tree(self): ''' @@ -60,12 +66,29 @@ class InspectorCollectorTestCase(TestCase): :return: ''' - inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid') + inspector = Inspector(cachedir=os.sep + 'test', + piddir=os.sep + 'test', + pidfilename='bar.pid') tree_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'inspectlib', 'tree_test') - expected_tree = (['/a/a/dummy.a', '/a/b/dummy.b', '/b/b.1', '/b/b.2', '/b/b.3'], - ['/a', '/a/a', '/a/b', '/a/c', '/b', '/c'], - ['/a/a/dummy.ln.a', '/a/b/dummy.ln.b', '/a/c/b.1', '/b/b.4', - '/b/b.5', '/c/b.1', '/c/b.2', '/c/b.3']) + expected_tree = ([os.sep + os.sep.join(['a', 'a', 'dummy.a']), + os.sep + os.sep.join(['a', 'b', 'dummy.b']), + os.sep + os.sep.join(['b', 'b.1']), + os.sep + os.sep.join(['b', 'b.2']), + os.sep + os.sep.join(['b', 'b.3'])], + [os.sep + 'a', + os.sep + os.sep.join(['a', 'a']), + os.sep + os.sep.join(['a', 'b']), + os.sep + os.sep.join(['a', 'c']), + os.sep + 'b', + os.sep + 'c'], + [os.sep + os.sep.join(['a', 'a', 'dummy.ln.a']), + os.sep + os.sep.join(['a', 'b', 'dummy.ln.b']), + os.sep + os.sep.join(['a', 'c', 'b.1']), + os.sep + os.sep.join(['b', 'b.4']), + os.sep + os.sep.join(['b', 'b.5']), + os.sep + os.sep.join(['c', 'b.1']), + os.sep + os.sep.join(['c', 'b.2']), + os.sep + os.sep.join(['c', 'b.3'])]) tree_result = [] for chunk in inspector._get_all_files(tree_root): buff = [] From cec627a60bdbcd8de8662b3ab70ef46c9544df86 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 29 Aug 2017 16:28:12 -0600 Subject: [PATCH 289/639] Skip mac tests for user and group They use grp and pwd --- tests/unit/modules/test_mac_group.py | 9 +++++++-- tests/unit/modules/test_mac_user.py | 25 +++++++++++++++---------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/tests/unit/modules/test_mac_group.py b/tests/unit/modules/test_mac_group.py index 2c03deb357..d69288ccb9 100644 --- a/tests/unit/modules/test_mac_group.py +++ b/tests/unit/modules/test_mac_group.py @@ -5,11 +5,15 @@ # Import python libs from __future__ import absolute_import -import grp +HAS_GRP = True +try: + import grp +except ImportError: + HAS_GRP = False # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch # Import Salt Libs @@ -17,6 +21,7 @@ import salt.modules.mac_group as mac_group from salt.exceptions import SaltInvocationError, CommandExecutionError +@skipIf(not HAS_GRP, "Missing required library 'grp'") class MacGroupTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for the salt.modules.mac_group module diff --git a/tests/unit/modules/test_mac_user.py b/tests/unit/modules/test_mac_user.py index 51402e6cd0..c639f022da 100644 --- a/tests/unit/modules/test_mac_user.py +++ b/tests/unit/modules/test_mac_user.py @@ -2,10 +2,13 @@ ''' :codeauthor: :email:`Nicole Thomas ` ''' - # Import python libs from __future__ import absolute_import -import pwd +HAS_PWD = True +try: + import pwd +except ImportError: + HAS_PWD = False # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -17,6 +20,7 @@ import salt.modules.mac_user as mac_user from salt.exceptions import SaltInvocationError, CommandExecutionError +@skipIf(not HAS_PWD, "Missing required library 'pwd'") @skipIf(NO_MOCK, NO_MOCK_REASON) class MacUserTestCase(TestCase, LoaderModuleMockMixin): ''' @@ -26,14 +30,15 @@ class MacUserTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {mac_user: {}} - mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon', - '/var/virusmails', '/usr/bin/false')), - pwd.struct_passwd(('_appleevents', '*', 55, 55, - 'AppleEvents Daemon', - '/var/empty', '/usr/bin/false')), - pwd.struct_passwd(('_appowner', '*', 87, 87, - 'Application Owner', - '/var/empty', '/usr/bin/false'))] + if HAS_PWD: + mock_pwall = [pwd.struct_passwd(('_amavisd', '*', 83, 83, 'AMaViS Daemon', + '/var/virusmails', '/usr/bin/false')), + pwd.struct_passwd(('_appleevents', '*', 55, 55, + 'AppleEvents Daemon', + '/var/empty', '/usr/bin/false')), + pwd.struct_passwd(('_appowner', '*', 87, 87, + 'Application Owner', + '/var/empty', '/usr/bin/false'))] mock_info_ret = {'shell': '/bin/bash', 'name': 'test', 'gid': 4376, 'groups': ['TEST_GROUP'], 'home': '/Users/foo', 'fullname': 'TEST USER', 'uid': 4376} From 83b0bab34ba2b6e1fc37797511240caf2b552f17 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 29 Aug 2017 16:11:40 -0600 Subject: [PATCH 290/639] opt_args needs to be a dict For napalm grains, otherwise there is an exception during bootup --- salt/grains/napalm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/grains/napalm.py b/salt/grains/napalm.py index 6ac52464c8..fcfbdcfe9f 100644 --- a/salt/grains/napalm.py +++ b/salt/grains/napalm.py @@ -447,8 +447,8 @@ def optional_args(proxy=None): device2: True ''' - opt_args = _get_device_grain('optional_args', proxy=proxy) - if _FORBIDDEN_OPT_ARGS: + opt_args = _get_device_grain('optional_args', proxy=proxy) or {} + if opt_args and _FORBIDDEN_OPT_ARGS: for arg in _FORBIDDEN_OPT_ARGS: opt_args.pop(arg, None) return {'optional_args': opt_args} From 29a763ffcba06b16c300707dc1f1082b52c95df3 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 30 Aug 2017 09:38:39 -0400 Subject: [PATCH 291/639] Lint: Remove extra space --- tests/integration/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e964ef12ff..92dcd8fedf 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -189,7 +189,7 @@ class TestDaemon(object): def __init__(self, parser): self.parser = parser - self.colors = salt.utils.color.get_colors (self.parser.options.no_colors is False) + self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False) if salt.utils.platform.is_windows(): # There's no shell color support on windows... for key in self.colors: From fbfbe316874a3c45bc22db9bf6b22be79ad36755 Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Wed, 30 Aug 2017 10:02:39 -0400 Subject: [PATCH 292/639] Lint: use salt.utils.versions instead of distutils.version for LooseVersion --- salt/cloud/clouds/azurearm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cloud/clouds/azurearm.py b/salt/cloud/clouds/azurearm.py index c3f5777a64..0397aaed44 100644 --- a/salt/cloud/clouds/azurearm.py +++ b/salt/cloud/clouds/azurearm.py @@ -65,6 +65,7 @@ import salt.config as config import salt.utils import salt.utils.cloud import salt.utils.files +from salt.utils.versions import LooseVersion from salt.ext import six import salt.version from salt.exceptions import ( @@ -73,7 +74,6 @@ from salt.exceptions import ( SaltCloudExecutionTimeout, ) from salt.ext.six.moves import filter -from distutils.version import LooseVersion # Import 3rd-party libs HAS_LIBS = False From 784a5ca03674e70e5830012d77f186a7b865e384 Mon Sep 17 00:00:00 2001 From: Marc Koderer Date: Wed, 30 Aug 2017 16:26:26 +0200 Subject: [PATCH 293/639] Fix bigip documentation for delete_pool_member Signed-off-by: Marc Koderer --- salt/modules/bigip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/bigip.py b/salt/modules/bigip.py index 6b1d964e26..bea9d55f31 100644 --- a/salt/modules/bigip.py +++ b/salt/modules/bigip.py @@ -1164,7 +1164,7 @@ def delete_pool_member(hostname, username, password, name, member): CLI Example:: - salt '*' bigip.delete_node bigip admin admin my-pool 10.2.2.2:80 + salt '*' bigip.delete_pool_member bigip admin admin my-pool 10.2.2.2:80 ''' #build session From 04dd8ebedb99a2215ecb920b5657c2a40cbd3c85 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 30 Aug 2017 09:00:42 -0600 Subject: [PATCH 294/639] make sure meta-data grains work on ec2 We need to add on '/' for the end of the directories under latest. Then we should load anything that appears that we can that is json. The try except block is slightly faster than checking if the line starts with '{' --- salt/grains/metadata.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/salt/grains/metadata.py b/salt/grains/metadata.py index 2372aac6c7..dabffa2052 100644 --- a/salt/grains/metadata.py +++ b/salt/grains/metadata.py @@ -17,6 +17,7 @@ metadata server set `metadata_server_grains: True`. from __future__ import absolute_import # Import python libs +import json import os import socket @@ -47,14 +48,28 @@ def _search(prefix="latest/"): Recursively look up all grains in the metadata server ''' ret = {} - for line in http.query(os.path.join(HOST, prefix))['body'].split('\n'): + linedata = http.query(os.path.join(HOST, prefix)) + if 'body' not in linedata: + return ret + for line in linedata['body'].split('\n'): if line.endswith('/'): ret[line[:-1]] = _search(prefix=os.path.join(prefix, line)) + elif prefix == 'latest/': + # (gtmanfred) The first level should have a forward slash since + # they have stuff underneath. This will not be doubled up though, + # because lines ending with a slash are checked first. + ret[line] = _search(prefix=os.path.join(prefix, line + '/')) elif '=' in line: key, value = line.split('=') ret[value] = _search(prefix=os.path.join(prefix, key)) else: - ret[line] = http.query(os.path.join(HOST, prefix, line))['body'] + retdata = http.query(os.path.join(HOST, prefix, line)).get('body', None) + # (gtmanfred) This try except block is slightly faster than + # checking if the string starts with a curly brace + try: + ret[line] = json.loads(retdata) + except ValueError: + ret[line] = retdata return ret From e9f32df0d96c5bf277ab3563e252a65c26d58a40 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 30 Aug 2017 11:10:17 -0500 Subject: [PATCH 295/639] nfs_export: make changes output match, and cleanup --- salt/states/nfs_export.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 5252a04e6e..fd4d1a4f89 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -144,8 +144,9 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' ret['comment'] = 'Export {0} would be added'.format(path) return ret - for exp in clients: - __salt__['nfs3.add_export'](exports, path, exp['hosts'], exp['options']) + for export in clients: + __salt__['nfs3.add_export'](exports, path, export['hosts'], export['options']) + ret['result'] = True ret['changes']['new'] = clients @@ -169,6 +170,7 @@ def absent(name, exports='/etc/exports'): if path in old: if __opts__['test']: ret['comment'] = 'Export {0} would be removed'.format(path) + ret['changes'][path] = old[path] ret['result'] = None return ret From 047ad07da4f1eb62324a138fab8e1a0e43545ded Mon Sep 17 00:00:00 2001 From: Timur Date: Tue, 29 Aug 2017 14:05:38 +0300 Subject: [PATCH 296/639] .utils.aws.get_location() expects a dict MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … as it's `provider` argument. --- salt/utils/aws.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/aws.py b/salt/utils/aws.py index 9e1212e92e..011dd346d7 100644 --- a/salt/utils/aws.py +++ b/salt/utils/aws.py @@ -392,7 +392,7 @@ def query(params=None, setname=None, requesturl=None, location=None, service_url = prov_dict.get('service_url', 'amazonaws.com') if not location: - location = get_location(opts, provider) + location = get_location(opts, prov_dict) if endpoint is None: if not requesturl: From 382bf92de730210af1d5944fd7cb53df9f8f4de4 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 30 Aug 2017 09:36:36 -0600 Subject: [PATCH 297/639] switch virtualbox cloud driver to use __utils__ --- salt/cloud/clouds/virtualbox.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/salt/cloud/clouds/virtualbox.py b/salt/cloud/clouds/virtualbox.py index 903722fd39..4266ce5f0e 100644 --- a/salt/cloud/clouds/virtualbox.py +++ b/salt/cloud/clouds/virtualbox.py @@ -24,7 +24,6 @@ import logging # Import salt libs from salt.exceptions import SaltCloudSystemExit import salt.config as config -import salt.utils.cloud as cloud # Import Third Party Libs try: @@ -136,7 +135,7 @@ def create(vm_info): ) log.debug("Going to fire event: starting create") - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_info['name']), @@ -151,7 +150,7 @@ def create(vm_info): 'clone_from': vm_info['clonefrom'] } - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_info['name']), @@ -174,10 +173,10 @@ def create(vm_info): vm_info['key_filename'] = key_filename vm_info['ssh_host'] = ip - res = cloud.bootstrap(vm_info, __opts__) + res = __utils__['cloud.bootstrap'](vm_info) vm_result.update(res) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'created machine', 'salt/cloud/{0}/created'.format(vm_info['name']), @@ -269,7 +268,7 @@ def list_nodes(kwargs=None, call=None): "private_ips", "public_ips", ] - return cloud.list_nodes_select( + return __utils__['cloud.list_nodes_select']( list_nodes_full('function'), attributes, call, ) @@ -278,7 +277,7 @@ def list_nodes_select(call=None): """ Return a list of the VMs that are on the provider, with select fields """ - return cloud.list_nodes_select( + return __utils__['cloud.list_nodes_select']( list_nodes_full('function'), __opts__['query.selection'], call, ) @@ -306,7 +305,7 @@ def destroy(name, call=None): if not vb_machine_exists(name): return "{0} doesn't exist and can't be deleted".format(name) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), @@ -317,7 +316,7 @@ def destroy(name, call=None): vb_destroy_machine(name) - cloud.fire_event( + __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), From 91b062f5646289a5ae168250b0539e14ced04b34 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 30 Aug 2017 10:41:21 -0600 Subject: [PATCH 298/639] Fix formatting issue, spaces surrounding + --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 8e18ecbae3..f66bd762ee 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1245,7 +1245,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Special handling for installing salt if re.search(r'salt[\s_.-]*minion', pkg_name, - flags=re.IGNORECASE+re.UNICODE) is not None: + flags=re.IGNORECASE + re.UNICODE) is not None: ret[pkg_name] = {'install status': 'task started'} if not __salt__['task.run'](name='update-salt-software'): log.error('Failed to install {0}'.format(pkg_name)) From ec8edd85bafd1c65fad12cb78db6423e1f56ce6d Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 30 Aug 2017 11:44:45 -0500 Subject: [PATCH 299/639] nfs_exports,nfs3: improve error handling of exportfs --- salt/modules/nfs3.py | 4 +++- salt/states/nfs_export.py | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 423d9e9a4c..996dfec88a 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -145,6 +145,8 @@ def reload_exports(): output = __salt__['cmd.run_all'](command) ret['stdout'] = output['stdout'] ret['stderr'] = output['stderr'] - ret['result'] = not output['retcode'] + # exportfs always returns 0, so retcode is useless + # We will consider it an error if stderr is nonempty + ret['result'] = output['stderr'] == "" return ret diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index fd4d1a4f89..428b236e30 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -147,9 +147,11 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' for export in clients: __salt__['nfs3.add_export'](exports, path, export['hosts'], export['options']) - - ret['result'] = True ret['changes']['new'] = clients + + export_attempt = __salt__['nfs3.reload_exports']() + ret['comment'] = export_attempt['stdout'] + "\n" + export_attempt['stderr'] + ret['result'] = export_attempt['result'] return ret def absent(name, exports='/etc/exports'): From f7c6029c949ab05d920566725abb4ad89f208bbc Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 30 Aug 2017 11:51:46 -0500 Subject: [PATCH 300/639] nfs_exports: add reload to absent() with error handling --- salt/states/nfs_export.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 428b236e30..1b28b71b61 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -177,9 +177,14 @@ def absent(name, exports='/etc/exports'): return ret __salt__['nfs3.del_export'](exports, path) - ret['comment'] = 'Export {0} removed'.format(path) + export_attempt = __salt__['nfs3.reload_exports']() + if not export_attempt['result']: + ret['comment'] = export_attempt['stdout'] + "\n" + export_attempt['stderr'] + else: + ret['comment'] = 'Export {0} removed'.format(path) + + ret['result'] = export_attempt['result'] ret['changes'][path] = old[path] - ret['result'] = True else: ret['comment'] = 'Export {0} already absent'.format(path) ret['result'] = True From 53ba05120bddee7fd62d77c75ee0b426238509d5 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 30 Aug 2017 13:18:50 -0500 Subject: [PATCH 301/639] nfs_export: linting --- salt/states/nfs_export.py | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 1b28b71b61..b75a5801f7 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -56,7 +56,11 @@ To ensure an NFS export is absent: ''' -def present(name, clients=None, hosts=None, options=None, exports='/etc/exports'): +def present(name, + clients=None, + hosts=None, + options=None, + exports='/etc/exports'): ''' Ensure that the named export is present with the given options @@ -114,8 +118,8 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' if not clients: if not hosts: - ret['result'] = False - ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined' + ret['result'] = False + ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined' return ret # options being None is handled by add_export() clients = [{'hosts': hosts, 'options': options}] @@ -123,15 +127,15 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' old = __salt__['nfs3.list_exports'](exports) if path in old: if old[path] == clients: - ret['result'] = True - ret['comment'] = 'Export {0} already configured'.format(path) + ret['result'] = True + ret['comment'] = 'Export {0} already configured'.format(path) return ret ret['changes']['new'] = clients ret['changes']['old'] = old[path] if __opts__['test']: ret['result'] = None - ret['comment'] = 'Export {0} would be changed'.format(path) + ret['comment'] = 'Export {0} would be changed'.format(path) return ret __salt__['nfs3.del_export'](exports, path) @@ -144,14 +148,15 @@ def present(name, clients=None, hosts=None, options=None, exports='/etc/exports' ret['comment'] = 'Export {0} would be added'.format(path) return ret - for export in clients: - __salt__['nfs3.add_export'](exports, path, export['hosts'], export['options']) + add_export = __salt__['nfs3.add_export'] + for exp in clients: + add_export(exports, path, exp['hosts'], exp['options']) ret['changes']['new'] = clients - export_attempt = __salt__['nfs3.reload_exports']() - ret['comment'] = export_attempt['stdout'] + "\n" + export_attempt['stderr'] - ret['result'] = export_attempt['result'] + try_reload = __salt__['nfs3.reload_exports']() + ret['comment'] = try_reload['stderr'] + ret['result'] = try_reload['result'] return ret def absent(name, exports='/etc/exports'): @@ -177,13 +182,13 @@ def absent(name, exports='/etc/exports'): return ret __salt__['nfs3.del_export'](exports, path) - export_attempt = __salt__['nfs3.reload_exports']() - if not export_attempt['result']: - ret['comment'] = export_attempt['stdout'] + "\n" + export_attempt['stderr'] + try_reload = __salt__['nfs3.reload_exports']() + if not try_reload['result']: + ret['comment'] = try_reload['stderr'] else: ret['comment'] = 'Export {0} removed'.format(path) - ret['result'] = export_attempt['result'] + ret['result'] = try_reload['result'] ret['changes'][path] = old[path] else: ret['comment'] = 'Export {0} already absent'.format(path) From b18b0b67f35ce5a0de25c2a3554b2de902241231 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 30 Aug 2017 13:27:51 -0500 Subject: [PATCH 302/639] nfs_export: fix last linting errors --- salt/states/nfs_export.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index b75a5801f7..33bf129cf2 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -56,11 +56,12 @@ To ensure an NFS export is absent: ''' + def present(name, - clients=None, - hosts=None, - options=None, - exports='/etc/exports'): + clients=None, + hosts=None, + options=None, + exports='/etc/exports'): ''' Ensure that the named export is present with the given options @@ -118,7 +119,7 @@ def present(name, if not clients: if not hosts: - ret['result'] = False + ret['result'] = False ret['comment'] = 'Either \'clients\' or \'hosts\' must be defined' return ret # options being None is handled by add_export() @@ -127,7 +128,7 @@ def present(name, old = __salt__['nfs3.list_exports'](exports) if path in old: if old[path] == clients: - ret['result'] = True + ret['result'] = True ret['comment'] = 'Export {0} already configured'.format(path) return ret @@ -145,7 +146,7 @@ def present(name, ret['changes']['new'] = clients if __opts__['test']: ret['result'] = None - ret['comment'] = 'Export {0} would be added'.format(path) + ret['comment'] = 'Export {0} would be added'.format(path) return ret add_export = __salt__['nfs3.add_export'] @@ -159,6 +160,7 @@ def present(name, ret['result'] = try_reload['result'] return ret + def absent(name, exports='/etc/exports'): ''' Ensure that the named path is not exported @@ -176,9 +178,9 @@ def absent(name, exports='/etc/exports'): old = __salt__['nfs3.list_exports'](exports) if path in old: if __opts__['test']: - ret['comment'] = 'Export {0} would be removed'.format(path) + ret['comment'] = 'Export {0} would be removed'.format(path) ret['changes'][path] = old[path] - ret['result'] = None + ret['result'] = None return ret __salt__['nfs3.del_export'](exports, path) @@ -186,12 +188,12 @@ def absent(name, exports='/etc/exports'): if not try_reload['result']: ret['comment'] = try_reload['stderr'] else: - ret['comment'] = 'Export {0} removed'.format(path) + ret['comment'] = 'Export {0} removed'.format(path) ret['result'] = try_reload['result'] ret['changes'][path] = old[path] else: ret['comment'] = 'Export {0} already absent'.format(path) - ret['result'] = True + ret['result'] = True return ret From d533877743bb9a7411a035c1cb8cbcd866e27790 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 30 Aug 2017 14:35:04 -0500 Subject: [PATCH 303/639] Use six.integer_types instead of int This catches longs on PY2. --- salt/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 41f06fbb9a..26320b7258 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -1963,7 +1963,7 @@ def is_true(value=None): pass # Now check for truthiness - if isinstance(value, (int, float)): + if isinstance(value, (six.integer_types, float)): return value > 0 elif isinstance(value, six.string_types): return str(value).lower() == 'true' @@ -2735,7 +2735,7 @@ def repack_dictlist(data, if val_cb is None: val_cb = lambda x, y: y - valid_non_dict = (six.string_types, int, float) + valid_non_dict = (six.string_types, six.integer_types, float) if isinstance(data, list): for element in data: if isinstance(element, valid_non_dict): From a3bbe160fc748dc5095b3a9cdc75e98659153f82 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Wed, 30 Aug 2017 15:45:46 -0500 Subject: [PATCH 304/639] uncommented salt.utils import --- salt/modules/saltcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/saltcheck.py b/salt/modules/saltcheck.py index 3b5635b962..f59d2d7e91 100644 --- a/salt/modules/saltcheck.py +++ b/salt/modules/saltcheck.py @@ -52,7 +52,7 @@ import os import time import yaml try: - # import salt.utils + import salt.utils import salt.client import salt.exceptions except ImportError: From 58eef3dc2ca2b3464af8fb68cae03ab06de71926 Mon Sep 17 00:00:00 2001 From: William Cannon Date: Wed, 30 Aug 2017 15:44:25 -0500 Subject: [PATCH 305/639] fixed dunder usage problem in unit test --- tests/unit/modules/test_saltcheck.py | 42 ++++++++++++++++++---------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 7bc45f4318..d2ca89c685 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -1,28 +1,36 @@ # -*- coding: utf-8 -*- '''Unit test for saltcheck execution module''' -# Import python libs -from __future__ import absolute_import, print_function +# Import Python libs +from __future__ import absolute_import -# Import Salt Testing libs -# from tests.support.mixins import LoaderModuleMockMixin -# from tests.support.unit import skipIf, TestCase try: - from tests.support.unit import TestCase - from tests.support.mock import MagicMock, patch import salt.modules.saltcheck as saltcheck -except ImportError as error: - raise ImportError('Unable to import modules: {}'.format(error)) +except: + raise -saltcheck.__salt__ = {} +# Import Salt Testing Libs +try: + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.unit import skipIf, TestCase + from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON + ) +except: + raise -class SaltCheckTestCase(TestCase): - ''' SaltCheckTestCase''' +@skipIf(NO_MOCK, NO_MOCK_REASON) +class LinuxSysctlTestCase(TestCase, LoaderModuleMockMixin): + ''' + TestCase for salt.modules.saltcheck module + ''' - def test_update_master_cache(self): - '''test master cache''' - self.assertTrue(saltcheck.update_master_cache) + def setup_loader_modules(self): + return {saltcheck: {}} def test_call_salt_command(self): '''test simple test.echo module''' @@ -34,6 +42,10 @@ class SaltCheckTestCase(TestCase): returned = sc_instance.call_salt_command(fun="test.echo", args=['hello'], kwargs=None) self.assertEqual(returned, 'hello') + def test_update_master_cache(self): + '''test master cache''' + self.assertTrue(saltcheck.update_master_cache) + def test_call_salt_command2(self): '''test simple test.echo module again''' with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True), From c4ae2de30ffda3a7e81802f508c2ac8cc8514ff7 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 30 Aug 2017 20:38:50 -0600 Subject: [PATCH 306/639] bootstrap can come from dunders --- salt/utils/cloud.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 4677fde631..4b84bd59c4 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -293,12 +293,14 @@ def salt_config_to_yaml(configuration, line_break='\n'): Dumper=SafeOrderedDumper) -def bootstrap(vm_, opts): +def bootstrap(vm_, opts=None): ''' This is the primary entry point for logging into any system (POSIX or Windows) to install Salt. It will make the decision on its own as to which deploy function to call. ''' + if opts is None: + opts = __opts__ deploy_config = salt.config.get_cloud_config_value( 'deploy', vm_, opts, default=False) From 7aeed3357501cefc71f5ae89f308e6f941c25d9a Mon Sep 17 00:00:00 2001 From: Jochen Breuer Date: Wed, 30 Aug 2017 10:14:38 +0200 Subject: [PATCH 307/639] Checking install_time for '(none)' value RedHat systems might in some cases return '(none)' as install_time, which would cause a ValueError. We are checking for '(none)' now. install_date and install_date_time are being set to None in that case. --- salt/utils/pkg/rpm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/pkg/rpm.py b/salt/utils/pkg/rpm.py index 41fa2ec92b..c5ee94aee2 100644 --- a/salt/utils/pkg/rpm.py +++ b/salt/utils/pkg/rpm.py @@ -104,7 +104,7 @@ def parse_pkginfo(line, osarch=None): if epoch not in ('(none)', '0'): version = ':'.join((epoch, version)) - if install_time: + if install_time not in ('(none)', '0'): install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" install_date_time_t = int(install_time) else: From d572d74e923700e70d8bf7df5b206166ba5c35c3 Mon Sep 17 00:00:00 2001 From: vnitinv Date: Sun, 27 Aug 2017 22:31:25 +0530 Subject: [PATCH 308/639] more check with keep alive functionality --- salt/proxy/junos.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index ba2842b06e..54ac4c4ec7 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -127,9 +127,21 @@ def alive(opts): .. versionadded:: Oxygen ''' - thisproxy['conn'].connected = ping() - - return thisproxy['conn'].connected + dev = conn() + # call rpc only if ncclient queue is empty. If not empty that means other + # rpc call is going on. + if hasattr(dev._conn, '_session'): + if dev._conn._session._transport.is_active(): + # there is no on going rpc call. + if dev._conn._session._q.empty(): + thisproxy['conn'].connected = ping() + else: + # ssh connection is lost + dev.connected = False + else: + # other connection modes, like telnet + thisproxy['conn'].connected = ping() + return dev.connected def proxytype(): @@ -159,17 +171,14 @@ def ping(): ''' dev = conn() - # call rpc only if ncclient queue is empty. If not empty that means other - # rpc call is going on. - if hasattr(dev._conn, '_session') and dev._conn._session._q.empty(): + try: + dev.rpc.file_list(path='/dev/null', dev_timeout=2) + return True + except (RpcTimeoutError, ConnectClosedError): try: - dev.rpc.file_list(path='/dev/null', dev_timeout=2) - except (RpcTimeoutError, ConnectClosedError): - try: - dev.close() - except (RpcError, ConnectError, TimeoutExpiredError): - dev.connected = False - return dev.connected + dev.close() + except (RpcError, ConnectError, TimeoutExpiredError): + return False def shutdown(opts): From 2b4da0f0e72aec43dc0dcf4fc5237297acf8fd46 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 30 Aug 2017 18:08:40 -0400 Subject: [PATCH 309/639] Add CODEOWNERS file --- .github/CODEOWNERS | 60 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..29288c6efe --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,60 @@ +# SALTSTACK CODE OWNERS + +# See https://help.github.com/articles/about-codeowners/ +# for more info about CODEOWNERS file + +# Lines starting with '#' are comments. +# Each line is a file pattern followed by one or more owners. + +# See https://help.github.com/articles/about-codeowners/ +# for more info about the CODEOWNERS file + +# Team Boto +salt/**/*boto* @saltstack/team-boto + +# Team Core +salt/auth/ @saltstack/team-core +salt/cache/ @saltstack/team-core +salt/cli/ @saltstack/team-core +salt/client/* @saltstack/team-core +salt/config/* @saltstack/team-core +salt/daemons/ @saltstack/team-core +salt/pillar/ @saltstack/team-core +salt/loader.py @saltstack/team-core +salt/payload.py @saltstack/team-core +salt/**/master* @saltstack/team-core +salt/**/minion* @saltstack/team-core + +# Team Cloud +salt/cloud/ @saltstack/team-cloud +salt/utils/openstack/ @saltstack/team-cloud +salt/utils/aws.py @saltstack/team-cloud +salt/**/*cloud* @saltstack/team-cloud + +# Team NetAPI +salt/cli/api.py @saltstack/team-netapi +salt/client/netapi.py @saltstack/team-netapi +salt/netapi/ @saltstack/team-netapi + +# Team Network +salt/proxy/ @saltstack/team-proxy + +# Team SPM +salt/cli/spm.py @saltstack/team-spm +salt/spm/ @saltstack/team-spm + +# Team SSH +salt/cli/ssh.py @saltstack/team-ssh +salt/client/ssh/ @saltstack/team-ssh +salt/runners/ssh.py @saltstack/team-ssh +salt/**/thin.py @saltstack/team-ssh + +# Team State +salt/state.py @saltstack/team-state + +# Team Transport +salt/transport/ @saltstack/team-transport +salt/utils/zeromq.py @saltstack/team-transport + +# Team Windows +salt/**/*win* @saltstack/team-windows From 1f104cf85b33d414408b131c79f3f85f30143497 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 31 Aug 2017 16:50:22 +0300 Subject: [PATCH 310/639] Fix ldap token groups auth. --- salt/auth/__init__.py | 34 +++++++++++++++++++++------------- salt/auth/ldap.py | 4 ++-- salt/daemons/masterapi.py | 21 +++------------------ salt/master.py | 21 +++------------------ 4 files changed, 29 insertions(+), 51 deletions(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index f90488e153..e39ecf8373 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -200,7 +200,7 @@ class LoadAuth(object): ''' if not self.authenticate_eauth(load): return {} - fstr = '{0}.auth'.format(load['eauth']) + hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5')) tok = str(hash_type(os.urandom(512)).hexdigest()) t_path = os.path.join(self.opts['token_dir'], tok) @@ -224,8 +224,9 @@ class LoadAuth(object): acl_ret = self.__get_acl(load) tdata['auth_list'] = acl_ret - if 'groups' in load: - tdata['groups'] = load['groups'] + groups = self.get_groups(load) + if groups: + tdata['groups'] = groups try: with salt.utils.files.set_umask(0o177): @@ -345,7 +346,7 @@ class LoadAuth(object): return False return True - def get_auth_list(self, load): + def get_auth_list(self, load, token=None): ''' Retrieve access list for the user specified in load. The list is built by eauth module or from master eauth configuration. @@ -353,30 +354,37 @@ class LoadAuth(object): list if the user has no rights to execute anything on this master and returns non-empty list if user is allowed to execute particular functions. ''' + # Get auth list from token + if token and self.opts['keep_acl_in_token'] and 'auth_list' in token: + return token['auth_list'] # Get acl from eauth module. auth_list = self.__get_acl(load) if auth_list is not None: return auth_list - if load['eauth'] not in self.opts['external_auth']: + eauth = token['eauth'] if token else load['eauth'] + if eauth not in self.opts['external_auth']: # No matching module is allowed in config log.warning('Authorization failure occurred.') return None - name = self.load_name(load) # The username we are attempting to auth with - groups = self.get_groups(load) # The groups this user belongs to - eauth_config = self.opts['external_auth'][load['eauth']] - if groups is None or groups is False: + if token: + name = token['name'] + groups = token['groups'] + else: + name = self.load_name(load) # The username we are attempting to auth with + groups = self.get_groups(load) # The groups this user belongs to + eauth_config = self.opts['external_auth'][eauth] + if not groups: groups = [] group_perm_keys = [item for item in eauth_config if item.endswith('%')] # The configured auth groups # First we need to know if the user is allowed to proceed via any of their group memberships. group_auth_match = False for group_config in group_perm_keys: - group_config = group_config.rstrip('%') - for group in groups: - if group == group_config: - group_auth_match = True + if group_config.rstrip('%') in groups: + group_auth_match = True + break # If a group_auth_match is set it means only that we have a # user which matches at least one or more of the groups defined # in the configuration file. diff --git a/salt/auth/ldap.py b/salt/auth/ldap.py index 396c1d00a2..3065429815 100644 --- a/salt/auth/ldap.py +++ b/salt/auth/ldap.py @@ -306,7 +306,7 @@ def groups(username, **kwargs): ''' group_list = [] - bind = _bind(username, kwargs['password'], + bind = _bind(username, kwargs.get('password'), anonymous=_config('anonymous', mandatory=False)) if bind: log.debug('ldap bind to determine group membership succeeded!') @@ -371,7 +371,7 @@ def groups(username, **kwargs): search_results = bind.search_s(search_base, ldap.SCOPE_SUBTREE, search_string, - [_config('accountattributename'), 'cn']) + [_config('accountattributename'), 'cn', _config('groupattribute')]) for _, entry in search_results: if username in entry[_config('accountattributename')]: group_list.append(entry['cn'][0]) diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 9ca6c582fb..d47a5c3aa6 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -1055,12 +1055,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) else: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1102,12 +1097,7 @@ class LocalFuncs(object): return dict(error=dict(name=err_name, message='Authentication failure of type "token" occurred.')) username = token['name'] - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - load['eauth'] = token['eauth'] - load['username'] = username - auth_list = self.loadauth.get_auth_list(load) + auth_list = self.loadauth.get_auth_list(load, token) elif 'eauth' in load: auth_type = 'eauth' err_name = 'EauthAuthenticationError' @@ -1217,12 +1207,7 @@ class LocalFuncs(object): return '' # Get acl from eauth module. - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( diff --git a/salt/master.py b/salt/master.py index 649a89a072..b913aeb1e5 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1705,12 +1705,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.runner_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', @@ -1774,12 +1769,7 @@ class ClearFuncs(object): message='Authentication failure of type "token" occurred.')) # Authorize - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - clear_load['eauth'] = token['eauth'] - clear_load['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(clear_load) + auth_list = self.loadauth.get_auth_list(clear_load, token) if not self.ckminions.wheel_check(auth_list, clear_load['fun']): return dict(error=dict(name='TokenAuthenticationError', message=('Authentication failure of type "token" occurred for ' @@ -1900,12 +1890,7 @@ class ClearFuncs(object): return '' # Get acl - if self.opts['keep_acl_in_token'] and 'auth_list' in token: - auth_list = token['auth_list'] - else: - extra['eauth'] = token['eauth'] - extra['username'] = token['name'] - auth_list = self.loadauth.get_auth_list(extra) + auth_list = self.loadauth.get_auth_list(extra, token) # Authorize the request if not self.ckminions.auth_check( From 19c683b6f9f841d4ca61efe0dd382b8ea7592261 Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Fri, 25 Aug 2017 10:04:54 +0200 Subject: [PATCH 311/639] zypper: support epoch and release as separate attrs in list_pkgs --- salt/modules/pkg_resource.py | 19 ++++++++++++++++--- salt/modules/zypper.py | 14 ++++++-------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/salt/modules/pkg_resource.py b/salt/modules/pkg_resource.py index 1e156f7e42..a88f83ef9b 100644 --- a/salt/modules/pkg_resource.py +++ b/salt/modules/pkg_resource.py @@ -316,7 +316,8 @@ def format_pkg_list(packages, versions_as_list, attr): ''' ret = copy.deepcopy(packages) if attr: - requested_attr = set(['version', 'arch', 'install_date', 'install_date_time_t']) + requested_attr = set(['epoch', 'version', 'release', 'arch', + 'install_date', 'install_date_time_t']) if attr != 'all': requested_attr &= set(attr + ['version']) @@ -326,13 +327,25 @@ def format_pkg_list(packages, versions_as_list, attr): for all_attr in ret[name]: filtered_attr = {} for key in requested_attr: - filtered_attr[key] = all_attr[key] + if all_attr[key]: + filtered_attr[key] = all_attr[key] versions.append(filtered_attr) ret[name] = versions return ret for name in ret: - ret[name] = [d['version'] for d in ret[name]] + ret[name] = [format_version(d['epoch'], d['version'], d['release']) + for d in ret[name]] if not versions_as_list: stringify(ret) return ret + + +def format_version(epoch, version, release): + ''' + Formats a version string for list_pkgs. + ''' + full_version = '{0}:{1}'.format(epoch, version) if epoch else version + if release: + full_version += '-{0}'.format(release) + return full_version diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py index 60b920d929..66658ba1f6 100644 --- a/salt/modules/zypper.py +++ b/salt/modules/zypper.py @@ -666,7 +666,8 @@ def list_pkgs(versions_as_list=False, **kwargs): {'': [{'version' : 'version', 'arch' : 'arch'}]} - Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``, + ``install_date``, ``install_date_time_t``. If ``all`` is specified, all valid attributes will be returned. @@ -702,15 +703,11 @@ def list_pkgs(versions_as_list=False, **kwargs): ret = {} for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines(): name, pkgver, rel, arch, epoch, install_time = line.split('_|-') - if epoch: - pkgver = '{0}:{1}'.format(epoch, pkgver) - if rel: - pkgver += '-{0}'.format(rel) install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" install_date_time_t = int(install_time) - all_attr = {'version': pkgver, 'arch': arch, 'install_date': install_date, - 'install_date_time_t': install_date_time_t} + all_attr = {'epoch': epoch, 'version': pkgver, 'release': rel, 'arch': arch, + 'install_date': install_date, 'install_date_time_t': install_date_time_t} __salt__['pkg_resource.add_pkg'](ret, name, all_attr) for pkgname in ret: @@ -1097,7 +1094,8 @@ def install(name=None, 'version': '', 'arch': ''}}} - Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``, + ``install_date``, ``install_date_time_t``. If ``all`` is specified, all valid attributes will be returned. From 66ecab63c6275860b338ea06ca85743a70e437f2 Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Fri, 25 Aug 2017 10:08:11 +0200 Subject: [PATCH 312/639] yumpkg: support epoch and release as separate attrs in list_pkgs --- salt/modules/yumpkg.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index ad0f4b6b53..ebd36b741f 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -616,7 +616,8 @@ def list_pkgs(versions_as_list=False, **kwargs): {'': [{'version' : 'version', 'arch' : 'arch'}]} - Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``, + ``install_date``, ``install_date_time_t``. If ``all`` is specified, all valid attributes will be returned. @@ -652,7 +653,16 @@ def list_pkgs(versions_as_list=False, **kwargs): osarch=__grains__['osarch'] ) if pkginfo is not None: - all_attr = {'version': pkginfo.version, 'arch': pkginfo.arch, 'install_date': pkginfo.install_date, + # see rpm version string rules available at https://goo.gl/UGKPNd + pkgver = pkginfo.version + epoch = '' + release = '' + if ':' in pkgver: + epoch, pkgver = pkgver.split(":", 1) + if '-' in pkgver: + pkgver, release = pkgver.split("-", 1) + all_attr = {'epoch': epoch, 'version': pkgver, 'release': release, + 'arch': pkginfo.arch, 'install_date': pkginfo.install_date, 'install_date_time_t': pkginfo.install_date_time_t} __salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr) @@ -1284,7 +1294,8 @@ def install(name=None, 'version': '', 'arch': ''}}} - Valid attributes are: ``version``, ``arch``, ``install_date``, ``install_date_time_t``. + Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``, + ``install_date``, ``install_date_time_t``. If ``all`` is specified, all valid attributes will be returned. From 7b0a7432f4d70e3bf7baab9fa33848dfa096af8c Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Fri, 25 Aug 2017 10:08:51 +0200 Subject: [PATCH 313/639] test_zypper: adapt to new supported attrs --- tests/unit/modules/test_zypper.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tests/unit/modules/test_zypper.py b/tests/unit/modules/test_zypper.py index ae343ee7d6..a6a0c88794 100644 --- a/tests/unit/modules/test_zypper.py +++ b/tests/unit/modules/test_zypper.py @@ -533,36 +533,42 @@ Repository 'DUMMY' not found by its alias, number, or URI. patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}), \ patch.dict(zypper.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}): - pkgs = zypper.list_pkgs(attr=['arch', 'install_date_time_t']) + pkgs = zypper.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t']) for pkg_name, pkg_attr in { 'jakarta-commons-discovery': { - 'version': '0.4-129.686', + 'version': '0.4', + 'release': '129.686', 'arch': 'noarch', 'install_date_time_t': 1498636511, }, 'yast2-ftp-server': { - 'version': '3.1.8-8.1', + 'version': '3.1.8', + 'release': '8.1', 'arch': 'x86_64', 'install_date_time_t': 1499257798, }, 'protobuf-java': { - 'version': '2.6.1-3.1.develHead', - 'arch': 'noarch', + 'version': '2.6.1', + 'release': '3.1.develHead', 'install_date_time_t': 1499257756, + 'arch': 'noarch', }, 'susemanager-build-keys-web': { - 'version': '12.0-5.1.develHead', + 'version': '12.0', + 'release': '5.1.develHead', 'arch': 'noarch', 'install_date_time_t': 1498636510, }, 'apache-commons-cli': { - 'version': '1.2-1.233', + 'version': '1.2', + 'release': '1.233', 'arch': 'noarch', 'install_date_time_t': 1498636510, }, 'jose4j': { - 'version': '0.4.4-2.1.develHead', 'arch': 'noarch', + 'version': '0.4.4', + 'release': '2.1.develHead', 'install_date_time_t': 1499257756, }}.items(): self.assertTrue(pkgs.get(pkg_name)) From 3dc46212cc572e9fea719efdb574f5aa960fb18d Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Fri, 25 Aug 2017 10:06:05 +0200 Subject: [PATCH 314/639] test_yumpkg: adapt to new supported attrs --- tests/unit/modules/test_yumpkg.py | 44 ++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/tests/unit/modules/test_yumpkg.py b/tests/unit/modules/test_yumpkg.py index cf754d6289..84e0a0ac52 100644 --- a/tests/unit/modules/test_yumpkg.py +++ b/tests/unit/modules/test_yumpkg.py @@ -103,72 +103,86 @@ class YumTestCase(TestCase, LoaderModuleMockMixin): patch.dict(yumpkg.__salt__, {'pkg_resource.add_pkg': _add_data}), \ patch.dict(yumpkg.__salt__, {'pkg_resource.format_pkg_list': pkg_resource.format_pkg_list}), \ patch.dict(yumpkg.__salt__, {'pkg_resource.stringify': MagicMock()}): - pkgs = yumpkg.list_pkgs(attr=['arch', 'install_date_time_t']) + pkgs = yumpkg.list_pkgs(attr=['epoch', 'release', 'arch', 'install_date_time_t']) for pkg_name, pkg_attr in { 'python-urlgrabber': { - 'version': '3.10-8.el7', + 'version': '3.10', + 'release': '8.el7', 'arch': 'noarch', 'install_date_time_t': 1487838471, }, 'alsa-lib': { - 'version': '1.1.1-1.el7', + 'version': '1.1.1', + 'release': '1.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838475, }, 'gnupg2': { - 'version': '2.0.22-4.el7', + 'version': '2.0.22', + 'release': '4.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838477, }, 'rpm-python': { - 'version': '4.11.3-21.el7', + 'version': '4.11.3', + 'release': '21.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838477, }, 'pygpgme': { - 'version': '0.3-9.el7', + 'version': '0.3', + 'release': '9.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838478, }, 'yum': { - 'version': '3.4.3-150.el7.centos', + 'version': '3.4.3', + 'release': '150.el7.centos', 'arch': 'noarch', 'install_date_time_t': 1487838479, }, 'lzo': { - 'version': '2.06-8.el7', + 'version': '2.06', + 'release': '8.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838479, }, 'qrencode-libs': { - 'version': '3.4.1-3.el7', + 'version': '3.4.1', + 'release': '3.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838480, }, 'ustr': { - 'version': '1.0.4-16.el7', + 'version': '1.0.4', + 'release': '16.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838480, }, 'shadow-utils': { - 'version': '2:4.1.5.1-24.el7', + 'epoch': '2', + 'version': '4.1.5.1', + 'release': '24.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838481, }, 'util-linux': { - 'version': '2.23.2-33.el7', + 'version': '2.23.2', + 'release': '33.el7', 'arch': 'x86_64', 'install_date_time_t': 1487838484, }, 'openssh': { - 'version': '6.6.1p1-33.el7_3', + 'version': '6.6.1p1', + 'release': '33.el7_3', 'arch': 'x86_64', 'install_date_time_t': 1487838485, }, 'virt-what': { - 'version': '1.13-8.el7', - 'arch': 'x86_64', + 'version': '1.13', + 'release': '8.el7', 'install_date_time_t': 1487838486, + 'arch': 'x86_64', }}.items(): self.assertTrue(pkgs.get(pkg_name)) self.assertEqual(pkgs[pkg_name], [pkg_attr]) From 3ad6911210d02b758c253e60a02312e167627396 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Thu, 31 Aug 2017 20:39:35 +0300 Subject: [PATCH 315/639] Fix for tests: don't require 'groups' in the eauth token. --- salt/auth/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index e39ecf8373..73e4c98f8a 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -370,7 +370,7 @@ class LoadAuth(object): if token: name = token['name'] - groups = token['groups'] + groups = token.get('groups') else: name = self.load_name(load) # The username we are attempting to auth with groups = self.get_groups(load) # The groups this user belongs to From a5d9f85db6b4665ab74d2fa1b24aff37fd26a816 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 30 Aug 2017 18:05:17 -0600 Subject: [PATCH 316/639] Modifications to build scripts Py3 installs to c:\python35 Quote the path to the python install directory Fix spellings in comments Move pywin32 dlls to site-packages/win32 instead of python root Remove pywin32 postinstall and testing scripts Add passive switch to py3 uninstall --- pkg/windows/build.bat | 2 +- pkg/windows/build_env_2.ps1 | 24 +++++++++++++++++++++--- pkg/windows/build_env_3.ps1 | 8 ++++++-- pkg/windows/build_pkg.bat | 2 +- pkg/windows/clean_env.bat | 6 +++--- pkg/windows/modules/get-settings.psm1 | 6 +++--- 6 files changed, 35 insertions(+), 13 deletions(-) diff --git a/pkg/windows/build.bat b/pkg/windows/build.bat index 0117718539..59fafde137 100644 --- a/pkg/windows/build.bat +++ b/pkg/windows/build.bat @@ -89,7 +89,7 @@ if Defined x ( if %Python%==2 ( Set "PyDir=C:\Python27" ) else ( - Set "PyDir=C:\Program Files\Python35" + Set "PyDir=C:\Python35" ) Set "PATH=%PATH%;%PyDir%;%PyDir%\Scripts" diff --git a/pkg/windows/build_env_2.ps1 b/pkg/windows/build_env_2.ps1 index 98a922ca3d..b186517812 100644 --- a/pkg/windows/build_env_2.ps1 +++ b/pkg/windows/build_env_2.ps1 @@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python2Dir'])\python.exe") { DownloadFileWithProgress $url $file Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python2']) . . ." - $p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=$($ini['Settings']['Python2Dir'])" -Wait -NoNewWindow -PassThru + $p = Start-Process msiexec -ArgumentList "/i $file /qb ADDLOCAL=DefaultFeature,SharedCRT,Extensions,pip_feature,PrependPath TARGETDIR=`"$($ini['Settings']['Python2Dir'])`"" -Wait -NoNewWindow -PassThru } #------------------------------------------------------------------------------ @@ -191,7 +191,7 @@ If (!($Path.ToLower().Contains("$($ini['Settings']['Scripts2Dir'])".ToLower()))) #============================================================================== # Update PIP and SetupTools -# caching depends on environmant variable SALT_PIP_LOCAL_CACHE +# caching depends on environment variable SALT_PIP_LOCAL_CACHE #============================================================================== Write-Output " ----------------------------------------------------------------" Write-Output " - $script_name :: Updating PIP and SetupTools . . ." @@ -212,7 +212,7 @@ if ( ! [bool]$Env:SALT_PIP_LOCAL_CACHE) { #============================================================================== # Install pypi resources using pip -# caching depends on environmant variable SALT_REQ_LOCAL_CACHE +# caching depends on environment variable SALT_REQ_LOCAL_CACHE #============================================================================== Write-Output " ----------------------------------------------------------------" Write-Output " - $script_name :: Installing pypi resources using pip . . ." @@ -230,6 +230,24 @@ if ( ! [bool]$Env:SALT_REQ_LOCAL_CACHE) { Start_Process_and_test_exitcode "$($ini['Settings']['Python2Dir'])\python.exe" "-m pip install --no-index --find-links=$Env:SALT_REQ_LOCAL_CACHE -r $($script_path)\req_2.txt" "pip install" } +#============================================================================== +# Move PyWin32 DLL's to site-packages\win32 +#============================================================================== +Write-Output " - $script_name :: Moving PyWin32 DLLs . . ." +Move-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs2Dir'])\win32" -Force + +# Remove pywin32_system32 directory +Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ." +Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pywin32_system32" + +# Remove pythonwin directory +Write-Output " - $script_name :: Removing pythonwin Directory . . ." +Remove-Item "$($ini['Settings']['SitePkgs2Dir'])\pythonwin" -Force -Recurse + +# Remove PyWin32 PostInstall and testall Scripts +Write-Output " - $script_name :: Removing PyWin32 scripts . . ." +Remove-Item "$($ini['Settings']['Scripts2Dir'])\pywin32_*" -Force -Recurse + #============================================================================== # Install PyYAML with CLoader # This has to be a compiled binary to get the CLoader diff --git a/pkg/windows/build_env_3.ps1 b/pkg/windows/build_env_3.ps1 index 33f95871ae..0dcbafd996 100644 --- a/pkg/windows/build_env_3.ps1 +++ b/pkg/windows/build_env_3.ps1 @@ -175,7 +175,7 @@ If (Test-Path "$($ini['Settings']['Python3Dir'])\python.exe") { DownloadFileWithProgress $url $file Write-Output " - $script_name :: Installing $($ini[$bitPrograms]['Python3']) . . ." - $p = Start-Process $file -ArgumentList '/passive InstallAllUsers=1 TargetDir="C:\Program Files\Python35" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0' -Wait -NoNewWindow -PassThru + $p = Start-Process $file -ArgumentList "/passive InstallAllUsers=1 TargetDir=`"$($ini['Settings']['Python3Dir'])`" Include_doc=0 Include_tcltk=0 Include_test=0 Include_launcher=0 PrependPath=1 Shortcuts=0" -Wait -NoNewWindow -PassThru } #------------------------------------------------------------------------------ @@ -247,7 +247,7 @@ Start_Process_and_test_exitcode "$($ini['Settings']['Scripts3Dir'])\pip.exe" "i # Move DLL's to Python Root Write-Output " - $script_name :: Moving PyWin32 DLLs . . ." -Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['Python3Dir'])" -Force +Move-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32\*.dll" "$($ini['Settings']['SitePkgs3Dir'])\win32" -Force # Remove pywin32_system32 directory Write-Output " - $script_name :: Removing pywin32_system32 Directory . . ." @@ -257,6 +257,10 @@ Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pywin32_system32" Write-Output " - $script_name :: Removing pythonwin Directory . . ." Remove-Item "$($ini['Settings']['SitePkgs3Dir'])\pythonwin" -Force -Recurse +# Remove PyWin32 PostInstall and testall Scripts +Write-Output " - $script_name :: Removing PyWin32 scripts . . ." +Remove-Item "$($ini['Settings']['Scripts3Dir'])\pywin32_*" -Force -Recurse + #============================================================================== # Fix PyCrypto #============================================================================== diff --git a/pkg/windows/build_pkg.bat b/pkg/windows/build_pkg.bat index 0d30f047ac..95b185bfa7 100644 --- a/pkg/windows/build_pkg.bat +++ b/pkg/windows/build_pkg.bat @@ -56,7 +56,7 @@ if %Python%==2 ( Set "PyVerMajor=2" Set "PyVerMinor=7" ) else ( - Set "PyDir=C:\Program Files\Python35" + Set "PyDir=C:\Python35" Set "PyVerMajor=3" Set "PyVerMinor=5" ) diff --git a/pkg/windows/clean_env.bat b/pkg/windows/clean_env.bat index fb6e63a661..d474264a53 100644 --- a/pkg/windows/clean_env.bat +++ b/pkg/windows/clean_env.bat @@ -17,7 +17,7 @@ if %errorLevel%==0 ( echo. if exist "\Python27" goto RemovePython2 -if exist "\Program Files\Python35" goto RemovePython3 +if exist "\Python35" goto RemovePython3 goto eof :RemovePython2 @@ -53,13 +53,13 @@ goto eof :: 64 bit if exist "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}" ( echo %0 :: - 3.5.3 64bit - "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall + "%LOCALAPPDATA%\Package Cache\{b94f45d6-8461-440c-aa4d-bf197b2c2499}\python-3.5.3-amd64.exe" /uninstall /passive ) :: 32 bit if exist "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}" ( echo %0 :: - 3.5.3 32bit - "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall + "%LOCALAPPDATA%\Package Cache\{a10037e1-4247-47c9-935b-c5ca049d0299}\python-3.5.3" /uninstall /passive ) rem wipe the Python directory diff --git a/pkg/windows/modules/get-settings.psm1 b/pkg/windows/modules/get-settings.psm1 index 292732cb83..5c57738fd3 100644 --- a/pkg/windows/modules/get-settings.psm1 +++ b/pkg/windows/modules/get-settings.psm1 @@ -19,9 +19,9 @@ Function Get-Settings { "Python2Dir" = "C:\Python27" "Scripts2Dir" = "C:\Python27\Scripts" "SitePkgs2Dir" = "C:\Python27\Lib\site-packages" - "Python3Dir" = "C:\Program Files\Python35" - "Scripts3Dir" = "C:\Program Files\Python35\Scripts" - "SitePkgs3Dir" = "C:\Program Files\Python35\Lib\site-packages" + "Python3Dir" = "C:\Python35" + "Scripts3Dir" = "C:\Python35\Scripts" + "SitePkgs3Dir" = "C:\Python35\Lib\site-packages" "DownloadDir" = "$env:Temp\DevSalt" } # The script deletes the DownLoadDir (above) for each install. From d4214ca283cf7b69548816ea6bd5513977b47505 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Thu, 31 Aug 2017 13:44:12 -0500 Subject: [PATCH 317/639] file.py docs: specify absolute paths --- salt/states/file.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 128fc7165a..3acea8f129 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1154,7 +1154,7 @@ def managed(name, the salt master and potentially run through a templating system. name - The location of the file to manage + The location of the file to manage, as an absolute path. source The source file to download to the minion, this source file can be @@ -2041,7 +2041,7 @@ def directory(name, Ensure that a named directory is present and has the right perms name - The location to create or manage a directory + The location to create or manage a directory, as an absolute path user The user to own the directory; this defaults to the user salt is From 14a45918549cc17b6c4edc938448c33a5045fbf5 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Thu, 31 Aug 2017 13:59:07 -0500 Subject: [PATCH 318/639] file.py docs: correct group and mode --- salt/states/file.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 3acea8f129..ffc6f51c35 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1324,13 +1324,15 @@ def managed(name, group The group ownership set for the file, this defaults to the group salt - is running as on the minion On Windows, this is ignored + is running as on the minion. On Windows, this is ignored mode - The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. + The permissions to set on this file, e.g. ``644``, ``0775``, or + ``4664``. - The default mode for new files and directories corresponds umask of salt - process. For existing files and directories it's not enforced. + The default mode for new files and directories corresponds to the + umask of the salt process. The mode of existing files and directories + will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. From 9979ccb613f32a3277caed5f21437c9a8eac5e88 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 31 Aug 2017 14:15:06 -0600 Subject: [PATCH 319/639] Remove Py2 and Py3 in the same run --- pkg/windows/clean_env.bat | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/windows/clean_env.bat b/pkg/windows/clean_env.bat index d474264a53..7c5e497802 100644 --- a/pkg/windows/clean_env.bat +++ b/pkg/windows/clean_env.bat @@ -16,9 +16,10 @@ if %errorLevel%==0 ( ) echo. +:CheckPython2 if exist "\Python27" goto RemovePython2 -if exist "\Python35" goto RemovePython3 -goto eof + +goto CheckPython3 :RemovePython2 rem Uninstall Python 2.7 @@ -47,6 +48,11 @@ goto eof goto eof +:CheckPython3 +if exist "\Python35" goto RemovePython3 + +goto eof + :RemovePython3 echo %0 :: Uninstalling Python 3 ... echo --------------------------------------------------------------------- @@ -63,9 +69,9 @@ goto eof ) rem wipe the Python directory - echo %0 :: Removing the C:\Program Files\Python35 Directory ... + echo %0 :: Removing the C:\Python35 Directory ... echo --------------------------------------------------------------------- - rd /s /q "C:\Program Files\Python35" + rd /s /q "C:\Python35" if %errorLevel%==0 ( echo Successful ) else ( From 3fbf24b91a1f2d4613fe951b05f14ec5552873a8 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 31 Aug 2017 16:56:58 -0600 Subject: [PATCH 320/639] Use os.sep instead of '/' --- salt/netapi/rest_cherrypy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/netapi/rest_cherrypy/__init__.py b/salt/netapi/rest_cherrypy/__init__.py index 6285de289c..d974cda6c1 100644 --- a/salt/netapi/rest_cherrypy/__init__.py +++ b/salt/netapi/rest_cherrypy/__init__.py @@ -20,7 +20,7 @@ try: except ImportError as exc: cpy_error = exc -__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_cherrypy' +__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_cherrypy' logger = logging.getLogger(__virtualname__) cpy_min = '3.2.2' From c93d2ed386182dbe85d6d62ae17d985930db4858 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 31 Aug 2017 17:01:14 -0600 Subject: [PATCH 321/639] Use os.sep instead of '/' --- salt/netapi/rest_tornado/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/netapi/rest_tornado/__init__.py b/salt/netapi/rest_tornado/__init__.py index fc547a02a3..2437714073 100644 --- a/salt/netapi/rest_tornado/__init__.py +++ b/salt/netapi/rest_tornado/__init__.py @@ -10,7 +10,7 @@ import os import salt.auth from salt.utils.versions import StrictVersion as _StrictVersion -__virtualname__ = os.path.abspath(__file__).rsplit('/')[-2] or 'rest_tornado' +__virtualname__ = os.path.abspath(__file__).rsplit(os.sep)[-2] or 'rest_tornado' logger = logging.getLogger(__virtualname__) From ec94a137506f0289bbf0d9c2db4352dc57cb7d2b Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Fri, 1 Sep 2017 10:20:46 -0500 Subject: [PATCH 322/639] cron docs: Remind user to use quotes for special strings Otherwise @ signs are stripped. https://github.com/saltstack/salt/issues/2896 --- salt/states/cron.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/states/cron.py b/salt/states/cron.py index 381bb173ab..efd1039baf 100644 --- a/salt/states/cron.py +++ b/salt/states/cron.py @@ -116,7 +116,7 @@ entry on the minion already contains a numeric value, then using the ``random`` keyword will not modify it. Added the opportunity to set a job with a special keyword like '@reboot' or -'@hourly'. +'@hourly'. Quotes must be used, otherwise PyYAML will strip the '@' sign. .. code-block:: yaml @@ -302,7 +302,8 @@ def present(name, edits. This defaults to the state id special - A special keyword to specify periodicity (eg. @reboot, @hourly...) + A special keyword to specify periodicity (eg. @reboot, @hourly...). + Quotes must be used, otherwise PyYAML will strip the '@' sign. .. versionadded:: 2016.3.0 ''' @@ -388,7 +389,8 @@ def absent(name, edits. This defaults to the state id special - The special keyword used in the job (eg. @reboot, @hourly...) + The special keyword used in the job (eg. @reboot, @hourly...). + Quotes must be used, otherwise PyYAML will strip the '@' sign. ''' ### NOTE: The keyword arguments in **kwargs are ignored in this state, but ### cannot be removed from the function definition, otherwise the use From a8de71f7350428c82df23d26365da0deef58a86a Mon Sep 17 00:00:00 2001 From: rajvidhimar Date: Fri, 1 Sep 2017 18:29:54 +0530 Subject: [PATCH 323/639] Bug fix for keep_alive feature of junos. --- salt/proxy/junos.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index 54ac4c4ec7..d8136e9af0 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -128,6 +128,11 @@ def alive(opts): ''' dev = conn() + + # Check that the underlying netconf connection still exists. + if dev._conn is None: + return False + # call rpc only if ncclient queue is empty. If not empty that means other # rpc call is going on. if hasattr(dev._conn, '_session'): From f36efbd6a7bed64c28b7c525ef382e219621e2af Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 31 Aug 2017 17:10:38 -0600 Subject: [PATCH 324/639] Fix `unit.test_spm` for Windows This only fixes the test... I don't think it fixes SPM on Windows --- salt/spm/__init__.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index 2bd2646b61..dc7e1009d5 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -14,9 +14,12 @@ import shutil import msgpack import hashlib import logging -import pwd -import grp import sys +try: + import pwd + import grp +except ImportError: + pass # Import Salt libs import salt.client @@ -491,10 +494,20 @@ class SPMClient(object): # No defaults for this in config.py; default to the current running # user and group - uid = self.opts.get('spm_uid', os.getuid()) - gid = self.opts.get('spm_gid', os.getgid()) - uname = pwd.getpwuid(uid)[0] - gname = grp.getgrgid(gid)[0] + import salt.utils + if salt.utils.is_windows(): + import salt.utils.win_functions + cur_user = salt.utils.win_functions.get_current_user() + cur_user_sid = salt.utils.win_functions.get_sid_from_name(cur_user) + uid = self.opts.get('spm_uid', cur_user_sid) + gid = self.opts.get('spm_gid', cur_user_sid) + uname = cur_user + gname = cur_user + else: + uid = self.opts.get('spm_uid', os.getuid()) + gid = self.opts.get('spm_gid', os.getgid()) + uname = pwd.getpwuid(uid)[0] + gname = grp.getgrgid(gid)[0] # Second pass: install the files for member in pkg_files: @@ -710,7 +723,7 @@ class SPMClient(object): raise SPMInvocationError('A path to a directory must be specified') if args[1] == '.': - repo_path = os.environ['PWD'] + repo_path = os.getcwd() else: repo_path = args[1] From b8da04c04da6bb26dbbe905cdf26cf5961f22fb6 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 1 Sep 2017 11:27:58 -0600 Subject: [PATCH 325/639] Add Mike's changes Remove unnecessary assignment Use getcwdu() --- salt/spm/__init__.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index dc7e1009d5..47b4defab8 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -497,12 +497,10 @@ class SPMClient(object): import salt.utils if salt.utils.is_windows(): import salt.utils.win_functions - cur_user = salt.utils.win_functions.get_current_user() - cur_user_sid = salt.utils.win_functions.get_sid_from_name(cur_user) - uid = self.opts.get('spm_uid', cur_user_sid) - gid = self.opts.get('spm_gid', cur_user_sid) - uname = cur_user - gname = cur_user + uname = gname = salt.utils.win_functions.get_current_user() + uname_sid = salt.utils.win_functions.get_sid_from_name(uname) + uid = self.opts.get('spm_uid', uname_sid) + gid = self.opts.get('spm_gid', uname_sid) else: uid = self.opts.get('spm_uid', os.getuid()) gid = self.opts.get('spm_gid', os.getgid()) @@ -723,7 +721,7 @@ class SPMClient(object): raise SPMInvocationError('A path to a directory must be specified') if args[1] == '.': - repo_path = os.getcwd() + repo_path = os.getcwdu() else: repo_path = args[1] From 4a86f23bf11315eba93ea8c0fd798ff207581315 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 30 Aug 2017 17:31:23 -0400 Subject: [PATCH 326/639] Move state-related utils to salt.utils.state.py Moves the following functions from `salt.utils` to `salt.utils.state`: - gen_state_tag - search_onfail_requisites - check_onfail_requisites - check_state_result Original functions have a deprecation warning added and all references in Salt code were updated to use the new function path, including moving some unit tests. --- doc/ref/states/aggregate.rst | 2 +- salt/client/mixins.py | 6 +- salt/modules/dockermod.py | 5 +- salt/modules/state.py | 5 +- salt/runners/state.py | 3 +- salt/states/iptables.py | 4 +- salt/states/pkg.py | 4 +- salt/states/saltmod.py | 4 +- salt/utils/__init__.py | 234 +++++++---------- salt/utils/state.py | 155 ++++++++++++ tests/unit/utils/test_state.py | 445 +++++++++++++++++++++++++++++++++ tests/unit/utils/test_utils.py | 423 ------------------------------- 12 files changed, 705 insertions(+), 585 deletions(-) create mode 100644 salt/utils/state.py create mode 100644 tests/unit/utils/test_state.py diff --git a/doc/ref/states/aggregate.rst b/doc/ref/states/aggregate.rst index e8aa61f689..39bdf2ba0f 100644 --- a/doc/ref/states/aggregate.rst +++ b/doc/ref/states/aggregate.rst @@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f for chunk in chunks: # The state runtime uses "tags" to track completed jobs, it may # look familiar with the _|- - tag = salt.utils.gen_state_tag(chunk) + tag = salt.utils.state.gen_state_tag(chunk) if tag in running: # Already ran the pkg state, skip aggregation continue diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 2e4214e52b..0d7641d2e5 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -16,7 +16,8 @@ import copy as pycopy # Import Salt libs import salt.exceptions import salt.minion -import salt.utils +import salt.utils # Can be removed once daemonize, get_specific_user, format_call are moved +import salt.utils.args import salt.utils.doc import salt.utils.error import salt.utils.event @@ -25,6 +26,7 @@ import salt.utils.job import salt.utils.lazy import salt.utils.platform import salt.utils.process +import salt.utils.state import salt.utils.versions import salt.transport import salt.log.setup @@ -396,7 +398,7 @@ class SyncClientMixin(object): data[u'success'] = True if isinstance(data[u'return'], dict) and u'data' in data[u'return']: # some functions can return boolean values - data[u'success'] = salt.utils.check_state_result(data[u'return'][u'data']) + data[u'success'] = salt.utils.state.check_state_result(data[u'return'][u'data']) except (Exception, SystemExit) as ex: if isinstance(ex, salt.exceptions.NotImplemented): data[u'return'] = str(ex) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index 31b7c83422..a0f27a9870 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -207,6 +207,7 @@ import salt.utils.decorators import salt.utils.docker import salt.utils.files import salt.utils.path +import salt.utils.state import salt.utils.stringutils import salt.utils.thin import salt.pillar @@ -5420,7 +5421,7 @@ def sls(name, mods=None, saltenv='base', **kwargs): ) if not isinstance(ret, dict): __context__['retcode'] = 1 - elif not salt.utils.check_state_result(ret): + elif not salt.utils.state.check_state_result(ret): __context__['retcode'] = 2 else: __context__['retcode'] = 0 @@ -5494,7 +5495,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', # Now execute the state into the container ret = sls(id_, mods, saltenv, **kwargs) # fail if the state was not successful - if not dryrun and not salt.utils.check_state_result(ret): + if not dryrun and not salt.utils.state.check_state_result(ret): raise CommandExecutionError(ret) if dryrun is False: ret = commit(id_, name) diff --git a/salt/modules/state.py b/salt/modules/state.py index c585489419..7394a84733 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -33,6 +33,7 @@ import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.platform +import salt.utils.state import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -98,7 +99,7 @@ def _set_retcode(ret, highstate=None): if isinstance(ret, list): __context__['retcode'] = 1 return - if not salt.utils.check_state_result(ret, highstate=highstate): + if not salt.utils.state.check_state_result(ret, highstate=highstate): __context__['retcode'] = 2 @@ -316,7 +317,7 @@ def low(data, queue=False, **kwargs): ret = st_.call(data) if isinstance(ret, list): __context__['retcode'] = 1 - if salt.utils.check_state_result(ret): + if salt.utils.state.check_state_result(ret): __context__['retcode'] = 2 return ret diff --git a/salt/runners/state.py b/salt/runners/state.py index e27298c2b6..61de5070d6 100644 --- a/salt/runners/state.py +++ b/salt/runners/state.py @@ -10,6 +10,7 @@ import logging import salt.loader import salt.utils import salt.utils.event +import salt.utils.state from salt.exceptions import SaltInvocationError LOGGER = logging.getLogger(__name__) @@ -81,7 +82,7 @@ def orchestrate(mods, pillar_enc=pillar_enc, orchestration_jid=orchestration_jid) ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'} - res = salt.utils.check_state_result(ret['data']) + res = salt.utils.state.check_state_result(ret['data']) if res: ret['retcode'] = 0 else: diff --git a/salt/states/iptables.py b/salt/states/iptables.py index ec4eec4e96..35c6cd7465 100644 --- a/salt/states/iptables.py +++ b/salt/states/iptables.py @@ -194,7 +194,7 @@ at some point be deprecated in favor of a more generic ``firewall`` state. from __future__ import absolute_import # Import salt libs -import salt.utils +import salt.utils.state from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS @@ -810,7 +810,7 @@ def mod_aggregate(low, chunks, running): if low.get('fun') not in agg_enabled: return low for chunk in chunks: - tag = salt.utils.gen_state_tag(chunk) + tag = salt.utils.state.gen_state_tag(chunk) if tag in running: # Already ran the iptables state, skip aggregation continue diff --git a/salt/states/pkg.py b/salt/states/pkg.py index 103907eab8..16c404f1de 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -81,9 +81,9 @@ import os import re # Import Salt libs -import salt.utils # Can be removed once gen_state_tag is moved import salt.utils.pkg import salt.utils.platform +import salt.utils.state import salt.utils.versions from salt.output import nested from salt.utils import namespaced_function as _namespaced_function @@ -3071,7 +3071,7 @@ def mod_aggregate(low, chunks, running): if low.get('fun') not in agg_enabled: return low for chunk in chunks: - tag = salt.utils.gen_state_tag(chunk) + tag = salt.utils.state.gen_state_tag(chunk) if tag in running: # Already ran the pkg state, skip aggregation continue diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index ae48a7e334..3c00acde27 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -30,8 +30,8 @@ import time # Import salt libs import salt.syspaths -import salt.utils # Can be removed once check_state_result is moved import salt.utils.event +import salt.utils.state import salt.utils.versions from salt.ext import six @@ -342,7 +342,7 @@ def state(name, except KeyError: m_state = False if m_state: - m_state = salt.utils.check_state_result(m_ret, recurse=True) + m_state = salt.utils.state.check_state_result(m_ret, recurse=True) if not m_state: if minion not in fail_minions: diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 902bd6d015..8c3e3c1274 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -19,9 +19,7 @@ import fnmatch import hashlib import json import logging -import numbers import os -import posixpath import random import re import shlex @@ -29,10 +27,8 @@ import shutil import socket import sys import pstats -import tempfile import time import types -import warnings import string import subprocess import getpass @@ -40,7 +36,6 @@ import getpass # Import 3rd-party libs from salt.ext import six # pylint: disable=import-error -from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module # pylint: disable=redefined-builtin from salt.ext.six.moves import range from salt.ext.six.moves import zip @@ -134,7 +129,6 @@ from salt.exceptions import ( log = logging.getLogger(__name__) -_empty = object() def get_context(template, line, num_lines=5, marker=None): @@ -1376,148 +1370,6 @@ def check_include_exclude(path_str, include_pat=None, exclude_pat=None): return ret -def gen_state_tag(low): - ''' - Generate the running dict tag string from the low data structure - ''' - return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) - - -def search_onfail_requisites(sid, highstate): - """ - For a particular low chunk, search relevant onfail related - states - """ - onfails = [] - if '_|-' in sid: - st = salt.state.split_low_tag(sid) - else: - st = {'__id__': sid} - for fstate, fchunks in six.iteritems(highstate): - if fstate == st['__id__']: - continue - else: - for mod_, fchunk in six.iteritems(fchunks): - if ( - not isinstance(mod_, six.string_types) or - mod_.startswith('__') - ): - continue - else: - if not isinstance(fchunk, list): - continue - else: - # bydefault onfail will fail, but you can - # set onfail_stop: False to prevent the highstate - # to stop if you handle it - onfail_handled = False - for fdata in fchunk: - if not isinstance(fdata, dict): - continue - onfail_handled = (fdata.get('onfail_stop', True) - is False) - if onfail_handled: - break - if not onfail_handled: - continue - for fdata in fchunk: - if not isinstance(fdata, dict): - continue - for knob, fvalue in six.iteritems(fdata): - if knob != 'onfail': - continue - for freqs in fvalue: - for fmod, fid in six.iteritems(freqs): - if not ( - fid == st['__id__'] and - fmod == st.get('state', fmod) - ): - continue - onfails.append((fstate, mod_, fchunk)) - return onfails - - -def check_onfail_requisites(state_id, state_result, running, highstate): - ''' - When a state fail and is part of a highstate, check - if there is onfail requisites. - When we find onfail requisites, we will consider the state failed - only if at least one of those onfail requisites also failed - - Returns: - - True: if onfail handlers suceeded - False: if one on those handler failed - None: if the state does not have onfail requisites - - ''' - nret = None - if ( - state_id and state_result and - highstate and isinstance(highstate, dict) - ): - onfails = search_onfail_requisites(state_id, highstate) - if onfails: - for handler in onfails: - fstate, mod_, fchunk = handler - ofresult = True - for rstateid, rstate in six.iteritems(running): - if '_|-' in rstateid: - st = salt.state.split_low_tag(rstateid) - # in case of simple state, try to guess - else: - id_ = rstate.get('__id__', rstateid) - if not id_: - raise ValueError('no state id') - st = {'__id__': id_, 'state': mod_} - if mod_ == st['state'] and fstate == st['__id__']: - ofresult = rstate.get('result', _empty) - if ofresult in [False, True]: - nret = ofresult - if ofresult is False: - # as soon as we find an errored onfail, we stop - break - # consider that if we parsed onfailes without changing - # the ret, that we have failed - if nret is None: - nret = False - return nret - - -def check_state_result(running, recurse=False, highstate=None): - ''' - Check the total return value of the run and determine if the running - dict has any issues - ''' - if not isinstance(running, dict): - return False - - if not running: - return False - - ret = True - for state_id, state_result in six.iteritems(running): - if not recurse and not isinstance(state_result, dict): - ret = False - if ret and isinstance(state_result, dict): - result = state_result.get('result', _empty) - if result is False: - ret = False - # only override return value if we are not already failed - elif result is _empty and isinstance(state_result, dict) and ret: - ret = check_state_result( - state_result, recurse=True, highstate=highstate) - # if we detect a fail, check for onfail requisites - if not ret: - # ret can be None in case of no onfail reqs, recast it to bool - ret = bool(check_onfail_requisites(state_id, state_result, - running, highstate)) - # return as soon as we got a failure - if not ret: - break - return ret - - def st_mode_to_octal(mode): ''' Convert the st_mode value from a stat(2) call (as returned from os.stat()) @@ -3403,3 +3255,89 @@ def get_colors(use=True, theme=None): 'Oxygen. This warning will be removed in Salt Neon.' ) return salt.utils.color.get_colors(use=use, theme=theme) + + +def gen_state_tag(low): + ''' + Generate the running dict tag string from the low data structure + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.state + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.gen_state_tag\' detected. This function has been ' + 'moved to \'salt.utils.state.gen_state_tag\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.state.gen_state_tag(low) + + +def search_onfail_requisites(sid, highstate): + ''' + For a particular low chunk, search relevant onfail related states + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.state + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.search_onfail_requisites\' detected. This function' + 'has been moved to \'salt.utils.state.search_onfail_requisites\' as of ' + 'Salt Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.state.search_onfail_requisites(sid, highstate) + + +def check_onfail_requisites(state_id, state_result, running, highstate): + ''' + When a state fail and is part of a highstate, check + if there is onfail requisites. + When we find onfail requisites, we will consider the state failed + only if at least one of those onfail requisites also failed + + Returns: + + True: if onfail handlers suceeded + False: if one on those handler failed + None: if the state does not have onfail requisites + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.state + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.check_onfail_requisites\' detected. This function' + 'has been moved to \'salt.utils.state.check_onfail_requisites\' as of ' + 'Salt Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.state.check_onfail_requisites( + state_id, state_result, running, highstate + ) + + +def check_state_result(running, recurse=False, highstate=None): + ''' + Check the total return value of the run and determine if the running + dict has any issues + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.state + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.check_state_result\' detected. This function' + 'has been moved to \'salt.utils.state.check_state_result\' as of ' + 'Salt Oxygen. This warning will be removed in Salt Neon.' + ) + return salt.utils.state.check_state_result( + running, recurse=recurse, highstate=highstate + ) diff --git a/salt/utils/state.py b/salt/utils/state.py new file mode 100644 index 0000000000..c1721d41a2 --- /dev/null +++ b/salt/utils/state.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +''' +Utility functions for state functions + +.. versionadded:: Oxygen +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import Salt libs +from salt.ext import six +import salt.state + +_empty = object() + + +def gen_state_tag(low): + ''' + Generate the running dict tag string from the low data structure + ''' + return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low) + + +def search_onfail_requisites(sid, highstate): + ''' + For a particular low chunk, search relevant onfail related states + ''' + onfails = [] + if '_|-' in sid: + st = salt.state.split_low_tag(sid) + else: + st = {'__id__': sid} + for fstate, fchunks in six.iteritems(highstate): + if fstate == st['__id__']: + continue + else: + for mod_, fchunk in six.iteritems(fchunks): + if ( + not isinstance(mod_, six.string_types) or + mod_.startswith('__') + ): + continue + else: + if not isinstance(fchunk, list): + continue + else: + # bydefault onfail will fail, but you can + # set onfail_stop: False to prevent the highstate + # to stop if you handle it + onfail_handled = False + for fdata in fchunk: + if not isinstance(fdata, dict): + continue + onfail_handled = (fdata.get('onfail_stop', True) + is False) + if onfail_handled: + break + if not onfail_handled: + continue + for fdata in fchunk: + if not isinstance(fdata, dict): + continue + for knob, fvalue in six.iteritems(fdata): + if knob != 'onfail': + continue + for freqs in fvalue: + for fmod, fid in six.iteritems(freqs): + if not ( + fid == st['__id__'] and + fmod == st.get('state', fmod) + ): + continue + onfails.append((fstate, mod_, fchunk)) + return onfails + + +def check_onfail_requisites(state_id, state_result, running, highstate): + ''' + When a state fail and is part of a highstate, check + if there is onfail requisites. + When we find onfail requisites, we will consider the state failed + only if at least one of those onfail requisites also failed + + Returns: + + True: if onfail handlers suceeded + False: if one on those handler failed + None: if the state does not have onfail requisites + + ''' + nret = None + if ( + state_id and state_result and + highstate and isinstance(highstate, dict) + ): + onfails = search_onfail_requisites(state_id, highstate) + if onfails: + for handler in onfails: + fstate, mod_, fchunk = handler + for rstateid, rstate in six.iteritems(running): + if '_|-' in rstateid: + st = salt.state.split_low_tag(rstateid) + # in case of simple state, try to guess + else: + id_ = rstate.get('__id__', rstateid) + if not id_: + raise ValueError('no state id') + st = {'__id__': id_, 'state': mod_} + if mod_ == st['state'] and fstate == st['__id__']: + ofresult = rstate.get('result', _empty) + if ofresult in [False, True]: + nret = ofresult + if ofresult is False: + # as soon as we find an errored onfail, we stop + break + # consider that if we parsed onfailes without changing + # the ret, that we have failed + if nret is None: + nret = False + return nret + + +def check_state_result(running, recurse=False, highstate=None): + ''' + Check the total return value of the run and determine if the running + dict has any issues + ''' + if not isinstance(running, dict): + return False + + if not running: + return False + + ret = True + for state_id, state_result in six.iteritems(running): + if not recurse and not isinstance(state_result, dict): + ret = False + if ret and isinstance(state_result, dict): + result = state_result.get('result', _empty) + if result is False: + ret = False + # only override return value if we are not already failed + elif result is _empty and isinstance(state_result, dict) and ret: + ret = check_state_result( + state_result, recurse=True, highstate=highstate) + # if we detect a fail, check for onfail requisites + if not ret: + # ret can be None in case of no onfail reqs, recast it to bool + ret = bool(check_onfail_requisites(state_id, state_result, + running, highstate)) + # return as soon as we got a failure + if not ret: + break + return ret diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py new file mode 100644 index 0000000000..c6578dc998 --- /dev/null +++ b/tests/unit/utils/test_state.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +''' +Unit Tests for functions located in salt.utils.state.py. +''' + +# Import python libs +from __future__ import absolute_import + +# Import Salt libs +from salt.ext import six +import salt.utils.odict +import salt.utils.state + +# Import Salt Testing libs +from tests.support.unit import TestCase + + +class StateUtilTestCase(TestCase): + ''' + Test case for state util. + ''' + def test_check_state_result(self): + self.assertFalse(salt.utils.state.check_state_result(None), + 'Failed to handle None as an invalid data type.') + self.assertFalse(salt.utils.state.check_state_result([]), + 'Failed to handle an invalid data type.') + self.assertFalse(salt.utils.state.check_state_result({}), + 'Failed to handle an empty dictionary.') + self.assertFalse(salt.utils.state.check_state_result({'host1': []}), + 'Failed to handle an invalid host data structure.') + test_valid_state = {'host1': {'test_state': {'result': 'We have liftoff!'}}} + self.assertTrue(salt.utils.state.check_state_result(test_valid_state)) + test_valid_false_states = { + 'test1': salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': False}), + ])), + ]), + 'test2': salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ('host2', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': False}), + ])), + ]), + 'test3': ['a'], + 'test4': salt.utils.odict.OrderedDict([ + ('asup', salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ('host2', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': False}), + ])) + ])) + ]), + 'test5': salt.utils.odict.OrderedDict([ + ('asup', salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ('host2', salt.utils.odict.OrderedDict([])) + ])) + ]) + } + for test, data in six.iteritems(test_valid_false_states): + self.assertFalse( + salt.utils.state.check_state_result(data), + msg='{0} failed'.format(test)) + test_valid_true_states = { + 'test1': salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ]), + 'test3': salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ('host2', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ]), + 'test4': salt.utils.odict.OrderedDict([ + ('asup', salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])), + ('host2', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': True}), + ])) + ])) + ]), + 'test2': salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': None}), + ('test_state', {'result': True}), + ])), + ('host2', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': True}), + ('test_state', {'result': 'abc'}), + ])) + ]) + } + for test, data in six.iteritems(test_valid_true_states): + self.assertTrue( + salt.utils.state.check_state_result(data), + msg='{0} failed'.format(test)) + test_invalid_true_ht_states = { + 'test_onfail_simple2': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_vstate0', {'result': False}), + ('test_vstate1', {'result': True}), + ])), + ]), + { + 'test_vstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_vstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', True), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_vstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail_integ2': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('t_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate1_|-echo_|-run', { + 'result': False}), + ])), + ]), + { + 'test_ivstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}], + 't': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_ivstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail_integ3': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('t_|-test_ivstate0_|-echo_|-run', { + 'result': True}), + ('cmd_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate1_|-echo_|-run', { + 'result': False}), + ])), + ]), + { + 'test_ivstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}], + 't': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_ivstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail_integ4': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('t_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate1_|-echo_|-run', { + 'result': True}), + ])), + ]), + { + 'test_ivstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}], + 't': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_ivstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + 'test_ivstate2': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', True), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': False}), + ('test_state', {'result': True}), + ])), + ]), + None + ), + 'test_onfail_d': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_state0', {'result': False}), + ('test_state', {'result': True}), + ])), + ]), + {} + ) + } + for test, testdata in six.iteritems(test_invalid_true_ht_states): + data, ht = testdata + for t_ in [a for a in data['host1']]: + tdata = data['host1'][t_] + if '_|-' in t_: + t_ = t_.split('_|-')[1] + tdata['__id__'] = t_ + self.assertFalse( + salt.utils.state.check_state_result(data, highstate=ht), + msg='{0} failed'.format(test)) + + test_valid_true_ht_states = { + 'test_onfail_integ': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('cmd_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate1_|-echo_|-run', { + 'result': True}), + ])), + ]), + { + 'test_ivstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_ivstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail_intega3': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('t_|-test_ivstate0_|-echo_|-run', { + 'result': True}), + ('cmd_|-test_ivstate0_|-echo_|-run', { + 'result': False}), + ('cmd_|-test_ivstate1_|-echo_|-run', { + 'result': True}), + ])), + ]), + { + 'test_ivstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}], + 't': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_ivstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_ivstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), + 'test_onfail_simple': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_vstate0', {'result': False}), + ('test_vstate1', {'result': True}), + ])), + ]), + { + 'test_vstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_vstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail_stop', False), + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_vstate0')])]) + ]), + 'run', + {'order': 10004}]}, + } + ), # order is different + 'test_onfail_simple_rev': ( + salt.utils.odict.OrderedDict([ + ('host1', + salt.utils.odict.OrderedDict([ + ('test_vstate0', {'result': False}), + ('test_vstate1', {'result': True}), + ])), + ]), + { + 'test_vstate0': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + 'run', + {'order': 10002}]}, + 'test_vstate1': { + '__env__': 'base', + '__sls__': u'a', + 'cmd': [salt.utils.odict.OrderedDict([('name', '/bin/true')]), + salt.utils.odict.OrderedDict([ + ('onfail', + [salt.utils.odict.OrderedDict([('cmd', 'test_vstate0')])]) + ]), + salt.utils.odict.OrderedDict([('onfail_stop', False)]), + 'run', + {'order': 10004}]}, + } + ) + } + for test, testdata in six.iteritems(test_valid_true_ht_states): + data, ht = testdata + for t_ in [a for a in data['host1']]: + tdata = data['host1'][t_] + if '_|-' in t_: + t_ = t_.split('_|-')[1] + tdata['__id__'] = t_ + self.assertTrue( + salt.utils.state.check_state_result(data, highstate=ht), + msg='{0} failed'.format(test)) + test_valid_false_state = {'host1': {'test_state': {'result': False}}} + self.assertFalse(salt.utils.check_state_result(test_valid_false_state)) diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 79efe49c9c..7ae386fe63 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -20,7 +20,6 @@ import salt.utils import salt.utils.jid import salt.utils.yamlencoding import salt.utils.zeromq -from salt.utils.odict import OrderedDict from salt.exceptions import (SaltInvocationError, SaltSystemExit, CommandNotFoundError) # Import Python libraries @@ -30,7 +29,6 @@ import zmq from collections import namedtuple # Import 3rd-party libs -from salt.ext import six try: import timelib # pylint: disable=import-error,unused-import HAS_TIMELIB = True @@ -290,427 +288,6 @@ class UtilsTestCase(TestCase): self.assertEqual(salt.utils.sanitize_win_path_string('\\windows\\system'), '\\windows\\system') self.assertEqual(salt.utils.sanitize_win_path_string('\\bo:g|us\\p?at*h>'), '\\bo_g_us\\p_at_h_') - def test_check_state_result(self): - self.assertFalse(salt.utils.check_state_result(None), "Failed to handle None as an invalid data type.") - self.assertFalse(salt.utils.check_state_result([]), "Failed to handle an invalid data type.") - self.assertFalse(salt.utils.check_state_result({}), "Failed to handle an empty dictionary.") - self.assertFalse(salt.utils.check_state_result({'host1': []}), "Failed to handle an invalid host data structure.") - test_valid_state = {'host1': {'test_state': {'result': 'We have liftoff!'}}} - self.assertTrue(salt.utils.check_state_result(test_valid_state)) - test_valid_false_states = { - 'test1': OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': False}), - ])), - ]), - 'test2': OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ('host2', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': False}), - ])), - ]), - 'test3': ['a'], - 'test4': OrderedDict([ - ('asup', OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ('host2', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': False}), - ])) - ])) - ]), - 'test5': OrderedDict([ - ('asup', OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ('host2', OrderedDict([])) - ])) - ]) - } - for test, data in six.iteritems(test_valid_false_states): - self.assertFalse( - salt.utils.check_state_result(data), - msg='{0} failed'.format(test)) - test_valid_true_states = { - 'test1': OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ]), - 'test3': OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ('host2', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ]), - 'test4': OrderedDict([ - ('asup', OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])), - ('host2', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': True}), - ])) - ])) - ]), - 'test2': OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': None}), - ('test_state', {'result': True}), - ])), - ('host2', - OrderedDict([ - ('test_state0', {'result': True}), - ('test_state', {'result': 'abc'}), - ])) - ]) - } - for test, data in six.iteritems(test_valid_true_states): - self.assertTrue( - salt.utils.check_state_result(data), - msg='{0} failed'.format(test)) - test_invalid_true_ht_states = { - 'test_onfail_simple2': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('test_vstate0', {'result': False}), - ('test_vstate1', {'result': True}), - ])), - ]), - { - 'test_vstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_vstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', True), - ('onfail', - [OrderedDict([('cmd', 'test_vstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail_integ2': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('t_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate1_|-echo_|-run', { - 'result': False}), - ])), - ]), - { - 'test_ivstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}], - 't': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_ivstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail_integ3': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('t_|-test_ivstate0_|-echo_|-run', { - 'result': True}), - ('cmd_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate1_|-echo_|-run', { - 'result': False}), - ])), - ]), - { - 'test_ivstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}], - 't': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_ivstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail_integ4': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('t_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate1_|-echo_|-run', { - 'result': True}), - ])), - ]), - { - 'test_ivstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}], - 't': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_ivstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - 'test_ivstate2': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', True), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': False}), - ('test_state', {'result': True}), - ])), - ]), - None - ), - 'test_onfail_d': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('test_state0', {'result': False}), - ('test_state', {'result': True}), - ])), - ]), - {} - ) - } - for test, testdata in six.iteritems(test_invalid_true_ht_states): - data, ht = testdata - for t_ in [a for a in data['host1']]: - tdata = data['host1'][t_] - if '_|-' in t_: - t_ = t_.split('_|-')[1] - tdata['__id__'] = t_ - self.assertFalse( - salt.utils.check_state_result(data, highstate=ht), - msg='{0} failed'.format(test)) - - test_valid_true_ht_states = { - 'test_onfail_integ': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('cmd_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate1_|-echo_|-run', { - 'result': True}), - ])), - ]), - { - 'test_ivstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_ivstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail_intega3': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('t_|-test_ivstate0_|-echo_|-run', { - 'result': True}), - ('cmd_|-test_ivstate0_|-echo_|-run', { - 'result': False}), - ('cmd_|-test_ivstate1_|-echo_|-run', { - 'result': True}), - ])), - ]), - { - 'test_ivstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}], - 't': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_ivstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_ivstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), - 'test_onfail_simple': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('test_vstate0', {'result': False}), - ('test_vstate1', {'result': True}), - ])), - ]), - { - 'test_vstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_vstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail_stop', False), - ('onfail', - [OrderedDict([('cmd', 'test_vstate0')])]) - ]), - 'run', - {'order': 10004}]}, - } - ), # order is different - 'test_onfail_simple_rev': ( - OrderedDict([ - ('host1', - OrderedDict([ - ('test_vstate0', {'result': False}), - ('test_vstate1', {'result': True}), - ])), - ]), - { - 'test_vstate0': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - 'run', - {'order': 10002}]}, - 'test_vstate1': { - '__env__': 'base', - '__sls__': u'a', - 'cmd': [OrderedDict([('name', '/bin/true')]), - OrderedDict([ - ('onfail', - [OrderedDict([('cmd', 'test_vstate0')])]) - ]), - OrderedDict([('onfail_stop', False)]), - 'run', - {'order': 10004}]}, - } - ) - } - for test, testdata in six.iteritems(test_valid_true_ht_states): - data, ht = testdata - for t_ in [a for a in data['host1']]: - tdata = data['host1'][t_] - if '_|-' in t_: - t_ = t_.split('_|-')[1] - tdata['__id__'] = t_ - self.assertTrue( - salt.utils.check_state_result(data, highstate=ht), - msg='{0} failed'.format(test)) - test_valid_false_state = {'host1': {'test_state': {'result': False}}} - self.assertFalse(salt.utils.check_state_result(test_valid_false_state)) - @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not hasattr(zmq, 'IPC_PATH_MAX_LEN'), "ZMQ does not have max length support.") def test_check_ipc_length(self): From 0b35a70be2f3b1823e492bb9300cd65fd3b43a5b Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 1 Sep 2017 13:22:34 -0400 Subject: [PATCH 327/639] Change function names to not be so redundant Now that the name of the util file is state.py, the function names can be shortened: - check_state_result --> check_result - gen_state_tag --> gen_tag --- salt/utils/__init__.py | 10 +++++----- salt/utils/state.py | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 8c3e3c1274..8cf709751c 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -3269,10 +3269,10 @@ def gen_state_tag(low): salt.utils.versions.warn_until( 'Neon', 'Use of \'salt.utils.gen_state_tag\' detected. This function has been ' - 'moved to \'salt.utils.state.gen_state_tag\' as of Salt Oxygen. This ' - 'warning will be removed in Salt Neon.' + 'moved to \'salt.utils.state.gen_tag\' as of Salt Oxygen. This warning ' + 'will be removed in Salt Neon.' ) - return salt.utils.state.gen_state_tag(low) + return salt.utils.state.gen_tag(low) def search_onfail_requisites(sid, highstate): @@ -3335,9 +3335,9 @@ def check_state_result(running, recurse=False, highstate=None): salt.utils.versions.warn_until( 'Neon', 'Use of \'salt.utils.check_state_result\' detected. This function' - 'has been moved to \'salt.utils.state.check_state_result\' as of ' + 'has been moved to \'salt.utils.state.check_result\' as of ' 'Salt Oxygen. This warning will be removed in Salt Neon.' ) - return salt.utils.state.check_state_result( + return salt.utils.state.check_result( running, recurse=recurse, highstate=highstate ) diff --git a/salt/utils/state.py b/salt/utils/state.py index c1721d41a2..292558a16b 100644 --- a/salt/utils/state.py +++ b/salt/utils/state.py @@ -15,7 +15,7 @@ import salt.state _empty = object() -def gen_state_tag(low): +def gen_tag(low): ''' Generate the running dict tag string from the low data structure ''' @@ -121,7 +121,7 @@ def check_onfail_requisites(state_id, state_result, running, highstate): return nret -def check_state_result(running, recurse=False, highstate=None): +def check_result(running, recurse=False, highstate=None): ''' Check the total return value of the run and determine if the running dict has any issues @@ -142,7 +142,7 @@ def check_state_result(running, recurse=False, highstate=None): ret = False # only override return value if we are not already failed elif result is _empty and isinstance(state_result, dict) and ret: - ret = check_state_result( + ret = check_result( state_result, recurse=True, highstate=highstate) # if we detect a fail, check for onfail requisites if not ret: From 8127bca9ed7fe98c726df706504129ab00771b9d Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 1 Sep 2017 13:33:22 -0400 Subject: [PATCH 328/639] Use __utils__ where possible for moved state utils And update all references to new state names in salt.utils.state.py --- doc/ref/states/aggregate.rst | 2 +- salt/client/mixins.py | 2 +- salt/modules/dockermod.py | 5 ++--- salt/modules/state.py | 6 ++---- salt/runners/state.py | 3 +-- salt/states/iptables.py | 3 +-- salt/states/pkg.py | 3 +-- salt/states/saltmod.py | 3 +-- tests/unit/utils/test_state.py | 22 +++++++++++----------- 9 files changed, 21 insertions(+), 28 deletions(-) diff --git a/doc/ref/states/aggregate.rst b/doc/ref/states/aggregate.rst index 39bdf2ba0f..e56357f34c 100644 --- a/doc/ref/states/aggregate.rst +++ b/doc/ref/states/aggregate.rst @@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f for chunk in chunks: # The state runtime uses "tags" to track completed jobs, it may # look familiar with the _|- - tag = salt.utils.state.gen_state_tag(chunk) + tag = salt.utils.state.gen_tag(chunk) if tag in running: # Already ran the pkg state, skip aggregation continue diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 0d7641d2e5..4cdf9c92eb 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -398,7 +398,7 @@ class SyncClientMixin(object): data[u'success'] = True if isinstance(data[u'return'], dict) and u'data' in data[u'return']: # some functions can return boolean values - data[u'success'] = salt.utils.state.check_state_result(data[u'return'][u'data']) + data[u'success'] = salt.utils.state.check_result(data[u'return'][u'data']) except (Exception, SystemExit) as ex: if isinstance(ex, salt.exceptions.NotImplemented): data[u'return'] = str(ex) diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py index a0f27a9870..25bd968063 100644 --- a/salt/modules/dockermod.py +++ b/salt/modules/dockermod.py @@ -207,7 +207,6 @@ import salt.utils.decorators import salt.utils.docker import salt.utils.files import salt.utils.path -import salt.utils.state import salt.utils.stringutils import salt.utils.thin import salt.pillar @@ -5421,7 +5420,7 @@ def sls(name, mods=None, saltenv='base', **kwargs): ) if not isinstance(ret, dict): __context__['retcode'] = 1 - elif not salt.utils.state.check_state_result(ret): + elif not __utils__['state.check_result'](ret): __context__['retcode'] = 2 else: __context__['retcode'] = 0 @@ -5495,7 +5494,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base', # Now execute the state into the container ret = sls(id_, mods, saltenv, **kwargs) # fail if the state was not successful - if not dryrun and not salt.utils.state.check_state_result(ret): + if not dryrun and not __utils__['state.check_result'](ret): raise CommandExecutionError(ret) if dryrun is False: ret = commit(id_, name) diff --git a/salt/modules/state.py b/salt/modules/state.py index 7394a84733..2a714947ad 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -33,7 +33,6 @@ import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.platform -import salt.utils.state import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError @@ -99,8 +98,7 @@ def _set_retcode(ret, highstate=None): if isinstance(ret, list): __context__['retcode'] = 1 return - if not salt.utils.state.check_state_result(ret, highstate=highstate): - + if not __utils__['state.check_result'](ret, highstate=highstate): __context__['retcode'] = 2 @@ -317,7 +315,7 @@ def low(data, queue=False, **kwargs): ret = st_.call(data) if isinstance(ret, list): __context__['retcode'] = 1 - if salt.utils.state.check_state_result(ret): + if __utils__['state.check_result'](ret): __context__['retcode'] = 2 return ret diff --git a/salt/runners/state.py b/salt/runners/state.py index 61de5070d6..25f12a814e 100644 --- a/salt/runners/state.py +++ b/salt/runners/state.py @@ -10,7 +10,6 @@ import logging import salt.loader import salt.utils import salt.utils.event -import salt.utils.state from salt.exceptions import SaltInvocationError LOGGER = logging.getLogger(__name__) @@ -82,7 +81,7 @@ def orchestrate(mods, pillar_enc=pillar_enc, orchestration_jid=orchestration_jid) ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'} - res = salt.utils.state.check_state_result(ret['data']) + res = __utils__['state.check_result'](ret['data']) if res: ret['retcode'] = 0 else: diff --git a/salt/states/iptables.py b/salt/states/iptables.py index 35c6cd7465..6163e79950 100644 --- a/salt/states/iptables.py +++ b/salt/states/iptables.py @@ -194,7 +194,6 @@ at some point be deprecated in favor of a more generic ``firewall`` state. from __future__ import absolute_import # Import salt libs -import salt.utils.state from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS @@ -810,7 +809,7 @@ def mod_aggregate(low, chunks, running): if low.get('fun') not in agg_enabled: return low for chunk in chunks: - tag = salt.utils.state.gen_state_tag(chunk) + tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the iptables state, skip aggregation continue diff --git a/salt/states/pkg.py b/salt/states/pkg.py index 16c404f1de..159f110cbc 100644 --- a/salt/states/pkg.py +++ b/salt/states/pkg.py @@ -83,7 +83,6 @@ import re # Import Salt libs import salt.utils.pkg import salt.utils.platform -import salt.utils.state import salt.utils.versions from salt.output import nested from salt.utils import namespaced_function as _namespaced_function @@ -3071,7 +3070,7 @@ def mod_aggregate(low, chunks, running): if low.get('fun') not in agg_enabled: return low for chunk in chunks: - tag = salt.utils.state.gen_state_tag(chunk) + tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 3c00acde27..e087ae0550 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -31,7 +31,6 @@ import time # Import salt libs import salt.syspaths import salt.utils.event -import salt.utils.state import salt.utils.versions from salt.ext import six @@ -342,7 +341,7 @@ def state(name, except KeyError: m_state = False if m_state: - m_state = salt.utils.state.check_state_result(m_ret, recurse=True) + m_state = __utils__['state.check_result'](m_ret, recurse=True) if not m_state: if minion not in fail_minions: diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py index c6578dc998..2da5554f28 100644 --- a/tests/unit/utils/test_state.py +++ b/tests/unit/utils/test_state.py @@ -19,17 +19,17 @@ class StateUtilTestCase(TestCase): ''' Test case for state util. ''' - def test_check_state_result(self): - self.assertFalse(salt.utils.state.check_state_result(None), + def test_check_result(self): + self.assertFalse(salt.utils.state.check_result(None), 'Failed to handle None as an invalid data type.') - self.assertFalse(salt.utils.state.check_state_result([]), + self.assertFalse(salt.utils.state.check_result([]), 'Failed to handle an invalid data type.') - self.assertFalse(salt.utils.state.check_state_result({}), + self.assertFalse(salt.utils.state.check_result({}), 'Failed to handle an empty dictionary.') - self.assertFalse(salt.utils.state.check_state_result({'host1': []}), + self.assertFalse(salt.utils.state.check_result({'host1': []}), 'Failed to handle an invalid host data structure.') test_valid_state = {'host1': {'test_state': {'result': 'We have liftoff!'}}} - self.assertTrue(salt.utils.state.check_state_result(test_valid_state)) + self.assertTrue(salt.utils.state.check_result(test_valid_state)) test_valid_false_states = { 'test1': salt.utils.odict.OrderedDict([ ('host1', @@ -78,7 +78,7 @@ class StateUtilTestCase(TestCase): } for test, data in six.iteritems(test_valid_false_states): self.assertFalse( - salt.utils.state.check_state_result(data), + salt.utils.state.check_result(data), msg='{0} failed'.format(test)) test_valid_true_states = { 'test1': salt.utils.odict.OrderedDict([ @@ -129,7 +129,7 @@ class StateUtilTestCase(TestCase): } for test, data in six.iteritems(test_valid_true_states): self.assertTrue( - salt.utils.state.check_state_result(data), + salt.utils.state.check_result(data), msg='{0} failed'.format(test)) test_invalid_true_ht_states = { 'test_onfail_simple2': ( @@ -305,7 +305,7 @@ class StateUtilTestCase(TestCase): t_ = t_.split('_|-')[1] tdata['__id__'] = t_ self.assertFalse( - salt.utils.state.check_state_result(data, highstate=ht), + salt.utils.state.check_result(data, highstate=ht), msg='{0} failed'.format(test)) test_valid_true_ht_states = { @@ -439,7 +439,7 @@ class StateUtilTestCase(TestCase): t_ = t_.split('_|-')[1] tdata['__id__'] = t_ self.assertTrue( - salt.utils.state.check_state_result(data, highstate=ht), + salt.utils.state.check_result(data, highstate=ht), msg='{0} failed'.format(test)) test_valid_false_state = {'host1': {'test_state': {'result': False}}} - self.assertFalse(salt.utils.check_state_result(test_valid_false_state)) + self.assertFalse(salt.utils.state.check_result(test_valid_false_state)) From d89143321454b935efd439fd9ec2a83535924b41 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 1 Sep 2017 16:03:11 -0400 Subject: [PATCH 329/639] Adjust test mocking to handle __utils__['state.check_result'] --- doc/ref/states/aggregate.rst | 2 +- tests/unit/modules/test_state.py | 10 +++++++++- tests/unit/states/test_saltmod.py | 9 ++++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/doc/ref/states/aggregate.rst b/doc/ref/states/aggregate.rst index e56357f34c..ce25507a1c 100644 --- a/doc/ref/states/aggregate.rst +++ b/doc/ref/states/aggregate.rst @@ -122,7 +122,7 @@ This example, simplified from the pkg state, shows how to create mod_aggregate f for chunk in chunks: # The state runtime uses "tags" to track completed jobs, it may # look familiar with the _|- - tag = salt.utils.state.gen_tag(chunk) + tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue diff --git a/tests/unit/modules/test_state.py b/tests/unit/modules/test_state.py index 26162a8705..caa89fdf43 100644 --- a/tests/unit/modules/test_state.py +++ b/tests/unit/modules/test_state.py @@ -19,6 +19,8 @@ from tests.support.mock import ( ) # Import Salt Libs +import salt.config +import salt.loader import salt.utils import salt.utils.odict import salt.utils.platform @@ -345,6 +347,10 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): ''' def setup_loader_modules(self): + utils = salt.loader.utils( + salt.config.DEFAULT_MINION_OPTS, + whitelist=['state'] + ) patcher = patch('salt.modules.state.salt.state', MockState()) patcher.start() self.addCleanup(patcher.stop) @@ -355,6 +361,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): 'environment': None, '__cli': 'salt', }, + '__utils__': utils, }, } @@ -977,6 +984,7 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): MockTarFile.path = "" MockJson.flag = False - with patch('salt.utils.files.fopen', mock_open()): + with patch('salt.utils.files.fopen', mock_open()), \ + patch.dict(state.__utils__, {'state.check_result': MagicMock(return_value=True)}): self.assertTrue(state.pkg("/tmp/state_pkg.tgz", 0, "md5")) diff --git a/tests/unit/states/test_saltmod.py b/tests/unit/states/test_saltmod.py index ecfd891476..43bd3e897b 100644 --- a/tests/unit/states/test_saltmod.py +++ b/tests/unit/states/test_saltmod.py @@ -20,6 +20,8 @@ from tests.support.mock import ( ) # Import Salt Libs +import salt.config +import salt.loader import salt.utils.jid import salt.utils.event import salt.states.saltmod as saltmod @@ -31,6 +33,10 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin): Test cases for salt.states.saltmod ''' def setup_loader_modules(self): + utils = salt.loader.utils( + salt.config.DEFAULT_MINION_OPTS, + whitelist=['state'] + ) return { saltmod: { '__env__': 'base', @@ -41,7 +47,8 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin): 'transport': 'tcp' }, '__salt__': {'saltutil.cmd': MagicMock()}, - '__orchestration_jid__': salt.utils.jid.gen_jid() + '__orchestration_jid__': salt.utils.jid.gen_jid(), + '__utils__': utils, } } From 5bd5ea042a83a265b77f3ac8073c3a6e53137e73 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 1 Sep 2017 14:49:50 -0600 Subject: [PATCH 330/639] Fix `unit.modules.test_chef` for Windows Mocks the __opts__ to contain cachedir --- tests/unit/modules/test_chef.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_chef.py b/tests/unit/modules/test_chef.py index 0899ce3a47..03a892a241 100644 --- a/tests/unit/modules/test_chef.py +++ b/tests/unit/modules/test_chef.py @@ -36,7 +36,8 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it execute a chef client run and return a dict ''' - self.assertDictEqual(chef.client(), {}) + with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}): + self.assertDictEqual(chef.client(), {}) # 'solo' function tests: 1 @@ -44,4 +45,5 @@ class ChefTestCase(TestCase, LoaderModuleMockMixin): ''' Test if it execute a chef solo run and return a dict ''' - self.assertDictEqual(chef.solo('/dev/sda1'), {}) + with patch.dict(chef.__opts__, {'cachedir': r'c:\salt\var\cache\salt\minion'}): + self.assertDictEqual(chef.solo('/dev/sda1'), {}) From 9ff03c2d4348b054780ffa0d848c70edb2905edc Mon Sep 17 00:00:00 2001 From: Ken Jordan Date: Fri, 1 Sep 2017 14:57:10 -0600 Subject: [PATCH 331/639] Update Salt Mine documentation to show that the mine_interval option is configured in minutes. --- conf/minion | 2 +- doc/ref/configuration/minion.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/minion b/conf/minion index ed2cfde7dc..7977894217 100644 --- a/conf/minion +++ b/conf/minion @@ -373,7 +373,7 @@ # interface: eth0 # cidr: '10.0.0.0/8' -# The number of seconds a mine update runs. +# The number of minutes between when a mine update runs. #mine_interval: 60 # Windows platforms lack posix IPC and must rely on slower TCP based inter- diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 087f41ef7e..655e7bc1f7 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -674,7 +674,7 @@ Note these can be defined in the pillar for a minion as well. Default: ``60`` -The number of seconds a mine update runs. +The number of minutes between when a mine update runs. .. code-block:: yaml From efef4f6a9b6e40eb172d134c2246a9dda45b3be6 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 1 Sep 2017 17:05:16 -0400 Subject: [PATCH 332/639] Reduce the number of days an issue is stale by 15 --- .github/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 0a5be0ea46..e67b536243 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 1075 is approximately 2 years and 11 months -daysUntilStale: 1075 +# 1060 is approximately 2 years and 11 months +daysUntilStale: 1060 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 From ba0cdd453692a3c1ef1ee283fd9a101716749b89 Mon Sep 17 00:00:00 2001 From: Ken Jordan Date: Fri, 1 Sep 2017 15:13:20 -0600 Subject: [PATCH 333/639] Fix phrasing for mine_interval description --- conf/minion | 2 +- doc/ref/configuration/minion.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/minion b/conf/minion index 7977894217..34a6abeda4 100644 --- a/conf/minion +++ b/conf/minion @@ -373,7 +373,7 @@ # interface: eth0 # cidr: '10.0.0.0/8' -# The number of minutes between when a mine update runs. +# The number of minutes between mine updates. #mine_interval: 60 # Windows platforms lack posix IPC and must rely on slower TCP based inter- diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index 655e7bc1f7..ded0b72699 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -674,7 +674,7 @@ Note these can be defined in the pillar for a minion as well. Default: ``60`` -The number of minutes between when a mine update runs. +The number of minutes between mine updates. .. code-block:: yaml From ae5796a075c5c0c8799178ce5b6abf4b83467e3e Mon Sep 17 00:00:00 2001 From: Brady Catherman Date: Thu, 31 Aug 2017 21:57:52 -0600 Subject: [PATCH 334/639] Ensure that this command doesn't fail randomly. Sometimes when running win_pki.py.import_cert will return a help message asserting that there was an argument error. This is because the underlying json coming out of PowerShell might actually return nil (None) so the .get() with a default return doesn't properly protect against a None getting iterated. --- salt/modules/win_pki.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/modules/win_pki.py b/salt/modules/win_pki.py index ef277f2baf..329da531f0 100644 --- a/salt/modules/win_pki.py +++ b/salt/modules/win_pki.py @@ -170,7 +170,11 @@ def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): if key not in blacklist_keys: cert_info[key.lower()] = item[key] - cert_info['dnsnames'] = [name.get('Unicode') for name in item.get('DnsNameList', {})] + names = item.get('DnsNameList', None) + if isinstance(names, list): + cert_info['dnsnames'] = [name.get('Unicode') for name in names] + else: + cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret From 827802f111fbe5c2f9a50b74a46afdef5eae2a2d Mon Sep 17 00:00:00 2001 From: Petr Michalec Date: Thu, 29 Jun 2017 16:32:17 +0200 Subject: [PATCH 335/639] Decrypt NACL passwords on ext_pillar --- salt/pillar/nacl.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 salt/pillar/nacl.py diff --git a/salt/pillar/nacl.py b/salt/pillar/nacl.py new file mode 100644 index 0000000000..912becfc2b --- /dev/null +++ b/salt/pillar/nacl.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +''' +Decrypt pillar data through the builtin NACL renderer + +In most cases, you'll want to make this the last external pillar used. For +example, to pair with the builtin stack pillar you could do something like +this: + +.. code:: yaml + + nacl.config: + keyfile: /root/.nacl + + ext_pillar: + - stack: /path/to/stack.cfg + - nacl: {} + +Set ``nacl.config`` in your config. + +''' + +from __future__ import absolute_import +import salt + + +def ext_pillar(minion_id, pillar, *args, **kwargs): + render_function = salt.loader.render(__opts__, __salt__).get("nacl") + return render_function(pillar) From cb3af2bbbd1df60ffd624329d5d638a7c5f7d3b6 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Sun, 3 Sep 2017 17:23:44 +1000 Subject: [PATCH 336/639] Docs are wrong cache_dir (bool) and cache_file (str) cannot be passed on the cli (#2) --- salt/modules/win_pkg.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index f66bd762ee..1f85f49fcd 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -913,18 +913,6 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # Version 1.2.3 will apply to packages foo and bar salt '*' pkg.install foo,bar version=1.2.3 - cache_file (str): - A single file to copy down for use with the installer. Copied to the - same location as the installer. Use this over ``cache_dir`` if there - are many files in the directory and you only need a specific file - and don't want to cache additional files that may reside in the - installer directory. Only applies to files on ``salt://`` - - cache_dir (bool): - True will copy the contents of the installer directory. This is - useful for installations that are not a single file. Only applies to - directories on ``salt://`` - extra_install_flags (str): Additional install flags that will be appended to the ``install_flags`` defined in the software definition file. Only From eb526c93ca03b7594139fbb518063ff7f5017cd5 Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Mon, 4 Sep 2017 14:13:40 +0200 Subject: [PATCH 337/639] Update zabbix_return.py test ServerActive IP addresses before using them as it may be that your host is not being monitored by one of them and current returner fails to complete this action --- salt/returners/zabbix_return.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index 6470fe31ff..2415f37514 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -54,9 +54,22 @@ def zbx(): return False -def zabbix_send(key, host, output): - cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" - __salt__['cmd.shell'](cmd) +def zabbix_send(key, host, output): + f = open('/etc/zabbix/zabbix_agentd.conf','r') + for line in f: + if "ServerActive" in line: + flag = "true" + server = line.rsplit('=') + server = server[1].rsplit(',') + for s in server: + cmd = zbx()['sender'] + " -z " + s.replace('\n','') + " -s " + host + " -k " + key + " -o \"" + output +"\"" + __salt__['cmd.shell'](cmd) + break + else: + flag = "false" + if flag == 'false': + cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" + f.close() def returner(ret): From 012bccf0eb17e3f0d391838f4d84cdb76362d799 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Sun, 16 Jul 2017 19:58:25 +0000 Subject: [PATCH 338/639] New execution module: TextFSM --- salt/modules/textfsm_mod.py | 171 ++++++++++++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 salt/modules/textfsm_mod.py diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py new file mode 100644 index 0000000000..578f897ead --- /dev/null +++ b/salt/modules/textfsm_mod.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +''' +TextFSM +======== + +.. versionadded:: Oxygen + +Execution module that processes plain text and extracts data +using TextFSM templates. The output is presented in JSON serializable +data, and can be easily re-used in other modules, or directly +inside the renderer (Jinja, Mako, Genshi, etc.). +''' +from __future__ import absolute_import + +# Import python libs +import logging +log = logging.getLogger(__name__) + +# Import third party modules +import textfsm + +# Import salt modules +import salt.utils + +__virtualname__ = 'textfsm' +__proxyenabled__ = ['*'] + + +def __virtual__(): + return __virtualname__ + + +def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): + ''' + Extracts the data entities from the unstructured + raw text sent as input and returns the data + mapping, processing using the TextFSM template. + + template_path + The path to the TextFSM template. + This can be specified using the absolute path + to the file, or using one of the following URL schemes: + + - ``salt://``, to fetch the template from the Salt fileserver. + - ``http://`` or ``https://`` + - ``ftp://`` + - ``s3://`` + - ``swift://`` + + raw_text: ``None`` + The unstructured text to be parsed. + + raw_text_file: ``None`` + Text file to read, having the raw text to be parsed using the TextFSM template. + Supports the same URL schemes as the ``template_path`` argument. + + saltenv: ``base`` + Salt fileserver envrionment from which to retrieve the file. + Ignored if ``template_path`` is not a ``salt://`` URL. + + CLI Example: + + .. code-block:: bash + + salt '*' textfsm.extract salt://bgp.textfsm raw_text_file=s3://bgp.txt + salt '*' textfsm.extract http://bgp.textfsm raw_text='Groups: 3 Peers: 3 Down peers: 0 ... snip ...' + + .. code-block:: jinja + + {%- set raw_text = 'Groups: 3 Peers: 3 Down peers: 0 ... snip ...' -%} + {%- set textfsm_extract = salt.textfsm.extract('https://bgp.textfsm', raw_text) -%} + + Output example: + + .. code-block:: json + + { + "comment": "", + "result": true, + "out": [ + { + "status": "", + "uptime": "6w3d17h", + "received_v6": "0", + "accepted_v6": "", + "remoteas": "65550", + "received_v4": "5", + "damped_v4": "1", + "active_v6": "0", + "remoteip": "10.247.68.182", + "active_v4": "4", + "accepted_v4": "", + "damped_v6": "0" + }, + { + "status": "", + "uptime": "6w5d6h", + "received_v6": "8", + "accepted_v6": "", + "remoteas": "65550", + "received_v4": "0", + "damped_v4": "0", + "active_v6": "7", + "remoteip": "10.254.166.246", + "active_v4": "0", + "accepted_v4": "", + "damped_v6": "1" + }, + { + "status": "", + "uptime": "9w5d6h", + "received_v6": "0", + "accepted_v6": "", + "remoteas": "65551", + "received_v4": "3", + "damped_v4": "0", + "active_v6": "0", + "remoteip": "192.0.2.100", + "active_v4": "2", + "accepted_v4": "", + "damped_v6": "0" + } + ] + } + ''' + ret = { + 'result': False, + 'comment': '', + 'out': None + } + tpl_cached_path = __salt__['cp.cache_file'](template_path, saltenv=saltenv) + if tpl_cached_path is False: + ret['comment'] = 'Unable to read the TextFSM template from {}'.format(template_path) + log.error(ret['comment']) + return ret + try: + log.debug('Reading TextFSM template from cache path: {}'.format(tpl_cached_path)) + tpl_file_handle = salt.utils.fopen(tpl_cached_path, 'r') + fsm_handler = textfsm.TextFSM(tpl_file_handle) + except textfsm.TextFSMTemplateError as tfte: + log.error('Unable to parse the TextFSM template', exc_info=True) + log.error(tpl_file_handle.read()) + ret['comment'] = 'Unable to parse the TextFSM template from {}. Please check the logs.'.format(template_path) + return ret + if not raw_text and raw_text_file: + log.debug('Trying to read the raw input from {}'.format(raw_text_file)) + raw_text = __salt__['cp.get_file_str'](raw_text_file, saltenv=saltenv) + log.debug('Raw text input read from file:') + log.debug(raw_text) + if raw_text is False: + ret['comment'] = 'Unable to read from {}. Please specify a valid input file or text.'.format(raw_text_file) + log.error(ret['comment']) + return ret + else: + ret['comment'] = 'Please specify a valid input file or text.' + log.error(ret['comment']) + return ret + objects = fsm_handler.ParseText(raw_text) + textfsm_data = [] + for obj in objects: + index = 0 + entry = {} + for entry_value in obj: + entry[fsm_handler.header[index].lower()] = entry_value + index += 1 + textfsm_data.append(entry) + ret.update({ + 'result': True, + 'out': textfsm_data + }) + return ret From 20ee34da5035d716dc28859c9e7a5e6c556a7e67 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 09:40:00 +0000 Subject: [PATCH 339/639] Define index function and avoid redundant code --- salt/modules/textfsm_mod.py | 222 ++++++++++++++++++++++++++---------- 1 file changed, 163 insertions(+), 59 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 578f897ead..a2bc267c09 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ''' TextFSM -======== +======= .. versionadded:: Oxygen @@ -13,15 +13,26 @@ inside the renderer (Jinja, Mako, Genshi, etc.). from __future__ import absolute_import # Import python libs +import os import logging -log = logging.getLogger(__name__) # Import third party modules -import textfsm +try: + import textfsm + HAS_TEXTFSM = True +except ImportError: + HAS_TEXTFSM = False +try: + import clitable + HAS_CLITABLE = True +except ImportError: + HAS_CLITABLE = False # Import salt modules import salt.utils +log = logging.getLogger(__name__) + __virtualname__ = 'textfsm' __proxyenabled__ = ['*'] @@ -30,6 +41,25 @@ def __virtual__(): return __virtualname__ +def _clitable_to_dict(objects, fsm_handler): + ''' + Converts TextFSM cli_table object to list of dictionaries. + ''' + objs = [] + log.debug('Cli Table:') + log.debug(objects) + log.debug('FSM handler:') + log.debug(fsm_handler) + for row in objects: + temp_dict = {} + for index, element in enumerate(row): + temp_dict[fsm_handler.header[index].lower()] = element + objs.append(temp_dict) + log.debug('Extraction result:') + log.debug(objs) + return objs + + def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): ''' Extracts the data entities from the unstructured @@ -62,13 +92,54 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): .. code-block:: bash - salt '*' textfsm.extract salt://bgp.textfsm raw_text_file=s3://bgp.txt - salt '*' textfsm.extract http://bgp.textfsm raw_text='Groups: 3 Peers: 3 Down peers: 0 ... snip ...' + salt '*' textfsm.extract salt://junos_ver.textfsm raw_text_file=s3://junos_ver.txt + salt '*' textfsm.extract http://junos_ver.textfsm raw_text='Hostname: router.abc ... snip ...' .. code-block:: jinja - {%- set raw_text = 'Groups: 3 Peers: 3 Down peers: 0 ... snip ...' -%} - {%- set textfsm_extract = salt.textfsm.extract('https://bgp.textfsm', raw_text) -%} + {%- set raw_text = 'Hostname: router.abc ... snip ...' -%} + {%- set textfsm_extract = salt.textfsm.extract('https://junos_ver.textfsm', raw_text) -%} + + Raw text example: + + .. code-block:: text + + Hostname: router.abc + Model: mx960 + JUNOS Base OS boot [9.1S3.5] + JUNOS Base OS Software Suite [9.1S3.5] + JUNOS Kernel Software Suite [9.1S3.5] + JUNOS Crypto Software Suite [9.1S3.5] + JUNOS Packet Forwarding Engine Support (M/T Common) [9.1S3.5] + JUNOS Packet Forwarding Engine Support (MX Common) [9.1S3.5] + JUNOS Online Documentation [9.1S3.5] + JUNOS Routing Software Suite [9.1S3.5] + + TextFSM Example: + + .. code-block:: text + + Value Chassis (\S+) + Value Required Model (\S+) + Value Boot (.*) + Value Base (.*) + Value Kernel (.*) + Value Crypto (.*) + Value Documentation (.*) + Value Routing (.*) + + Start + # Support multiple chassis systems. + ^\S+:$$ -> Continue.Record + ^${Chassis}:$$ + ^Model: ${Model} + ^JUNOS Base OS boot \[${Boot}\] + ^JUNOS Software Release \[${Base}\] + ^JUNOS Base OS Software Suite \[${Base}\] + ^JUNOS Kernel Software Suite \[${Kernel}\] + ^JUNOS Crypto Software Suite \[${Crypto}\] + ^JUNOS Online Documentation \[${Documentation}\] + ^JUNOS Routing Software Suite \[${Routing}\] Output example: @@ -79,46 +150,14 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): "result": true, "out": [ { - "status": "", - "uptime": "6w3d17h", - "received_v6": "0", - "accepted_v6": "", - "remoteas": "65550", - "received_v4": "5", - "damped_v4": "1", - "active_v6": "0", - "remoteip": "10.247.68.182", - "active_v4": "4", - "accepted_v4": "", - "damped_v6": "0" - }, - { - "status": "", - "uptime": "6w5d6h", - "received_v6": "8", - "accepted_v6": "", - "remoteas": "65550", - "received_v4": "0", - "damped_v4": "0", - "active_v6": "7", - "remoteip": "10.254.166.246", - "active_v4": "0", - "accepted_v4": "", - "damped_v6": "1" - }, - { - "status": "", - "uptime": "9w5d6h", - "received_v6": "0", - "accepted_v6": "", - "remoteas": "65551", - "received_v4": "3", - "damped_v4": "0", - "active_v6": "0", - "remoteip": "192.0.2.100", - "active_v4": "2", - "accepted_v4": "", - "damped_v6": "0" + "kernel": "9.1S3.5", + "documentation": "9.1S3.5", + "boot": "9.1S3.5", + "crypto": "9.1S3.5", + "chassis": "", + "routing": "9.1S3.5", + "base": "9.1S3.5", + "model": "mx960" } ] } @@ -156,16 +195,81 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): log.error(ret['comment']) return ret objects = fsm_handler.ParseText(raw_text) - textfsm_data = [] - for obj in objects: - index = 0 - entry = {} - for entry_value in obj: - entry[fsm_handler.header[index].lower()] = entry_value - index += 1 - textfsm_data.append(entry) - ret.update({ - 'result': True, - 'out': textfsm_data - }) + ret['out'] = _clitable_to_dict(objects, fsm_handler) + ret['result'] = True + return ret + + +def index(command, + platform, + output=None, + output_file=None, + textfsm_path=None, + index_file=None, + saltenv='base', + include_empty=False, + include_pat=None, + exclude_pat=None): + ret = { + 'out': None, + 'result': False, + 'comment': '' + } + if not HAS_CLITABLE: + ret['comment'] = 'TextFSM doesnt seem that has clitable embedded.' + log.error(ret['comment']) + return ret + if not textfsm_path: + log.debug('No TextFSM templates path specified, trying to look into the opts and pillar') + textfsm_path = __opts__.get('textfsm_path') or __pillar__.get('textfsm_path') + if not textfsm_path: + ret['comment'] = 'No TextFSM templates path specified. Please configure in opts/pillar/function args.' + log.error(ret['comment']) + return ret + log.debug('Caching {} using the Salt fileserver'.format(textfsm_path)) + textfsm_cachedir_ret = __salt__['cp.cache_dir'](textfsm_path, + saltenv=saltenv, + include_empty=include_empty, + include_pat=include_pat, + exclude_pat=exclude_pat) + log.debug('Cache fun return:') + log.debug(textfsm_cachedir_ret) + if not textfsm_cachedir_ret: + ret['comment'] = 'Unable to fetch from {}. Is the TextFSM path correctly specified?'.format(textfsm_path) + log.error(ret['comment']) + return ret + textfsm_cachedir = os.path.dirname(textfsm_cachedir_ret[0]) # first item + index_file = __opts__.get('textfsm_index', 'index') + index_file_path = os.path.join(textfsm_cachedir, index_file) + log.debug('Using the cached index file: {}'.format(index_file_path)) + log.debug('TextFSM templates cached under: {}'.format(textfsm_cachedir)) + textfsm_obj = clitable.CliTable(index_file_path, textfsm_cachedir) + attrs = { + 'Command': command, + 'Platform': platform + } + log.debug('Command: {Command}, Platform: {Platform}'.format(**attrs)) + if not output and output_file: + log.debug('Processing the output from {}'.format(output_file)) + output = __salt__['cp.get_file_str'](output_file, saltenv=saltenv) + if output is False: + ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(output_file) + log.error(ret['comment']) + return ret + log.debug('Raw text input read from file:') + log.debug(output) + else: + ret['comment'] = 'Please specify a valid output text or file' + log.error(ret['comment']) + return ret + try: + # Parse output through template + log.debug('Processing the output:') + log.debug(output) + textfsm_obj.ParseCmd(output, attrs) + ret['out'] = _clitable_to_dict(textfsm_obj, textfsm_obj) + ret['result'] = True + except clitable.CliTableError as cterr: + log.error('Unable to proces the CliTable', exc_info=True) + ret['comment'] = 'Unable to process the output through the CliTable. Please see logs for more details.' return ret From 3e0d675e8c2f75fcfe3b7bccf173da8b0fe77d37 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 09:46:22 +0000 Subject: [PATCH 340/639] More verbose debug logs --- salt/modules/textfsm_mod.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index a2bc267c09..06ab5330fd 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -167,6 +167,8 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): 'comment': '', 'out': None } + log.debug('Using the saltenv: {}'.format(saltenv)) + log.debug('Caching {} using the Salt fileserver'.format(template_path)) tpl_cached_path = __salt__['cp.cache_file'](template_path, saltenv=saltenv) if tpl_cached_path is False: ret['comment'] = 'Unable to read the TextFSM template from {}'.format(template_path) @@ -175,17 +177,15 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): try: log.debug('Reading TextFSM template from cache path: {}'.format(tpl_cached_path)) tpl_file_handle = salt.utils.fopen(tpl_cached_path, 'r') + log.debug(tpl_file_handle.read()) fsm_handler = textfsm.TextFSM(tpl_file_handle) except textfsm.TextFSMTemplateError as tfte: log.error('Unable to parse the TextFSM template', exc_info=True) - log.error(tpl_file_handle.read()) ret['comment'] = 'Unable to parse the TextFSM template from {}. Please check the logs.'.format(template_path) return ret if not raw_text and raw_text_file: log.debug('Trying to read the raw input from {}'.format(raw_text_file)) raw_text = __salt__['cp.get_file_str'](raw_text_file, saltenv=saltenv) - log.debug('Raw text input read from file:') - log.debug(raw_text) if raw_text is False: ret['comment'] = 'Unable to read from {}. Please specify a valid input file or text.'.format(raw_text_file) log.error(ret['comment']) @@ -194,6 +194,8 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): ret['comment'] = 'Please specify a valid input file or text.' log.error(ret['comment']) return ret + log.debug('Processing the raw text:') + log.debug(raw_text) objects = fsm_handler.ParseText(raw_text) ret['out'] = _clitable_to_dict(objects, fsm_handler) ret['result'] = True @@ -226,6 +228,7 @@ def index(command, ret['comment'] = 'No TextFSM templates path specified. Please configure in opts/pillar/function args.' log.error(ret['comment']) return ret + log.debug('Using the saltenv: {}'.format(saltenv)) log.debug('Caching {} using the Salt fileserver'.format(textfsm_path)) textfsm_cachedir_ret = __salt__['cp.cache_dir'](textfsm_path, saltenv=saltenv, @@ -256,16 +259,14 @@ def index(command, ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(output_file) log.error(ret['comment']) return ret - log.debug('Raw text input read from file:') - log.debug(output) else: ret['comment'] = 'Please specify a valid output text or file' log.error(ret['comment']) return ret + log.debug('Processing the raw text:') + log.debug(output) try: # Parse output through template - log.debug('Processing the output:') - log.debug(output) textfsm_obj.ParseCmd(output, attrs) ret['out'] = _clitable_to_dict(textfsm_obj, textfsm_obj) ret['result'] = True From ed54a341b07431950e8db7b677db556eb9427ba9 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 11:12:31 +0000 Subject: [PATCH 341/639] Improve opts and provide doc --- salt/modules/textfsm_mod.py | 181 ++++++++++++++++++++++++++++++++++-- 1 file changed, 172 insertions(+), 9 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 06ab5330fd..63486f5c83 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -22,6 +22,7 @@ try: HAS_TEXTFSM = True except ImportError: HAS_TEXTFSM = False + try: import clitable HAS_CLITABLE = True @@ -92,13 +93,15 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): .. code-block:: bash - salt '*' textfsm.extract salt://junos_ver.textfsm raw_text_file=s3://junos_ver.txt - salt '*' textfsm.extract http://junos_ver.textfsm raw_text='Hostname: router.abc ... snip ...' + salt '*' textfsm.extract salt://textfsm/juniper_version_template raw_text_file=s3://junos_ver.txt + salt '*' textfsm.extract http://some-server/textfsm/juniper_version_template raw_text='Hostname: router.abc ... snip ...' + + Jinja template example: .. code-block:: jinja {%- set raw_text = 'Hostname: router.abc ... snip ...' -%} - {%- set textfsm_extract = salt.textfsm.extract('https://junos_ver.textfsm', raw_text) -%} + {%- set textfsm_extract = salt.textfsm.extract('https://some-server/textfsm/juniper_version_template', raw_text) -%} Raw text example: @@ -203,7 +206,9 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): def index(command, - platform, + platform=None, + platform_grain_name=None, + platform_column_name=None, output=None, output_file=None, textfsm_path=None, @@ -212,6 +217,147 @@ def index(command, include_empty=False, include_pat=None, exclude_pat=None): + ''' + Dynamically identify the template required to extract the + information from the unstructured raw text. + + The output has the same structure as the ``extract`` execution + function, the difference being that ``index`` is capable + to identify what template to use, based on the platform + details and the ``command``. + + command + The command executed on the device, to get the output. + + platform + The platform name, as defined in the TextFSM index file. + + .. note:: + For ease of use, it is recommended to define the TextFSM + indexfile with values that can be matches using the grains. + + platform_grain_name + The name of the grain used to identify the platform name + in the TextFSM index file. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_platform_grain``. + + .. note:: + This option is ignored when ``platform`` is specified. + + platform_column_name: ``Platform`` + The column name used to identify the platform, + exactly as specified in the TextFSM index file. + Default: ``Platform``. + + .. note:: + This is field is case sensitive, make sure + to assign the correct value to this option, + exactly as defined in the index file. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_platform_column_name``. + + output + The raw output from the device, to be parsed + and extract the structured data. + + output_file + The path to a file that contains the raw output from the device, + used to extract the structured data. + This option supports the usual Salt-specific schemes: ``file://``, + ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://``. + + textfsm_path + The path where the TextFSM templates can be found. This can be either + absolute path on the server, either specified using the following URL + schemes: ``file://``, ``salt://``, ``http://``, ``https://``, ``ftp://``, + ``s3://``, ``swift://``. + + .. note:: + This needs to be a directory with a flat structure, having an + index file (whose name can be specified using the ``index_file`` option) + and a number of TextFSM templates. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_path``. + + index_file: ``index`` + The name of the TextFSM index file, under the ``textfsm_path``. Default: ``index``. + + .. note:: + This option can be also specified in the minion configuration + file or pillar as ``textfsm_index_file``. + + saltenv: ``base`` + Salt fileserver envrionment from which to retrieve the file. + Ignored if ``textfsm_path`` is not a ``salt://`` URL. + + include_empty: ``False`` + Include empty files under the ``textfsm_path``. + + include_pat + Glob or regex to narrow down the files cached from the given path. + If matching with a regex, the regex must be prefixed with ``E@``, + otherwise the expression will be interpreted as a glob. + + exclude_pat + Glob or regex to exclude certain files from being cached from the given path. + If matching with a regex, the regex must be prefixed with ``E@``, + otherwise the expression will be interpreted as a glob. + + .. note:: + If used with ``include_pat``, files matching this pattern will be + excluded from the subset of files defined by ``include_pat``. + + CLI Example: + + .. code-block:: bash + + salt '*' textfsm.index 'sh ver' platform=Juniper output_file=salt://textfsm/juniper_version_example textfsm_path=salt://textfsm/ + salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=ftp://textfsm/ platform_textfsm_key=Vendor + salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=https://some-server/textfsm/ platform_textfsm_key=Vendor platform_grain_name=vendor + + TextFSM index file example: + + ``salt://textfsm/index`` + + .. code-block:: text + + Template, Hostname, Vendor, Command + juniper_version_template, .*, Juniper, sh[[ow]] ve[[rsion]] + + The usage can be simplified, + by defining (some of) the following options: ``textfsm_platform_grain``, + ``textfsm_path``, ``textfsm_platform_column_name``, or ``textfsm_index_file``, + in the (proxy) minion configuration file or pillar. + + Configuration example: + + .. code-block:: yaml + + textfsm_platform_grain: vendor + textfsm_path: salt://textfsm/ + textfsm_platform_column_name: Vendor + + And the CLI usage becomes as simple as: + + .. code-block:: bash + + salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example + + Usgae inside a Jinja template: + + .. code-block:: jinja + + {%- set command = 'sh ver' -%} + {%- set output = salt.net.cli(command) -%} + {%- set textfsm_extract = salt.textfsm.index(command, output=output) -%} + ''' ret = { 'out': None, 'result': False, @@ -221,6 +367,20 @@ def index(command, ret['comment'] = 'TextFSM doesnt seem that has clitable embedded.' log.error(ret['comment']) return ret + if not platform: + platform_grain_name = __opts__.get('textfsm_platform_grain') or\ + __pillar__.get('textfsm_platform_grain', platform_grain_name) + if platform_grain_name: + log.debug('Using the {} grain to identify the platform name'.format(platform_grain_name)) + platform = __grains__.get(platform_grain_name) + if not platform: + ret['comment'] = 'Unable to identify the platform name using the {} grain.'.format(platform_grain_name) + return ret + log.info('Using platform: {}'.format(platform)) + else: + ret['comment'] = 'No platform specified, no platform grain identifier configured.' + log.error(ret['comment']) + return ret if not textfsm_path: log.debug('No TextFSM templates path specified, trying to look into the opts and pillar') textfsm_path = __opts__.get('textfsm_path') or __pillar__.get('textfsm_path') @@ -242,16 +402,19 @@ def index(command, log.error(ret['comment']) return ret textfsm_cachedir = os.path.dirname(textfsm_cachedir_ret[0]) # first item - index_file = __opts__.get('textfsm_index', 'index') + index_file = __opts__.get('textfsm_index_file') or __pillar__.get('textfsm_index_file', 'index') index_file_path = os.path.join(textfsm_cachedir, index_file) log.debug('Using the cached index file: {}'.format(index_file_path)) log.debug('TextFSM templates cached under: {}'.format(textfsm_cachedir)) textfsm_obj = clitable.CliTable(index_file_path, textfsm_cachedir) attrs = { - 'Command': command, - 'Platform': platform + 'Command': command } - log.debug('Command: {Command}, Platform: {Platform}'.format(**attrs)) + platform_column_name = __opts__.get('textfsm_platform_column_name') or\ + __pillar__.get('textfsm_platform_column_name'. 'Platform') + log.info('Using the TextFSM platform idenfiticator: {}'.format(platform_column_name)) + attrs[platform_column_name] = platform + log.debug('Processing the TextFSM index file using the attributes: {}'.format(attrs)) if not output and output_file: log.debug('Processing the output from {}'.format(output_file)) output = __salt__['cp.get_file_str'](output_file, saltenv=saltenv) @@ -272,5 +435,5 @@ def index(command, ret['result'] = True except clitable.CliTableError as cterr: log.error('Unable to proces the CliTable', exc_info=True) - ret['comment'] = 'Unable to process the output through the CliTable. Please see logs for more details.' + ret['comment'] = 'Unable to process the output: {}'.format(cterr) return ret From 8d968fa886b3d194c0461aa2de06e9e5db752c25 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 11:35:33 +0000 Subject: [PATCH 342/639] Deps: documentation note and load condition --- salt/modules/textfsm_mod.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 63486f5c83..3e964b8527 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -9,6 +9,13 @@ Execution module that processes plain text and extracts data using TextFSM templates. The output is presented in JSON serializable data, and can be easily re-used in other modules, or directly inside the renderer (Jinja, Mako, Genshi, etc.). + +:depends: - textfsm Python library + +.. note:: + + For Python 2/3 compatibility, it is more recommended to + install the ``jtextfsm`` library: ``pip install jtextfsm``. ''' from __future__ import absolute_import @@ -39,7 +46,12 @@ __proxyenabled__ = ['*'] def __virtual__(): - return __virtualname__ + ''' + Only load this execution module if TextFSM is installed. + ''' + if HAS_TEXTFSM: + return __virtualname__ + return (False, 'The textfsm execution module failed to load: requires the textfsm library.') def _clitable_to_dict(objects, fsm_handler): From 80c5479dfaa85b1e0ea3d7e37b23f2e39fa0c113 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 11:38:26 +0000 Subject: [PATCH 343/639] Textfsm execution module autodoc --- doc/ref/modules/all/index.rst | 1 + doc/ref/modules/all/salt.modules.textfsm_mod.rst | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 doc/ref/modules/all/salt.modules.textfsm_mod.rst diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index 1365f38453..8af1268a50 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -417,6 +417,7 @@ execution modules test testinframod test_virtual + textfsm_mod timezone tls tomcat diff --git a/doc/ref/modules/all/salt.modules.textfsm_mod.rst b/doc/ref/modules/all/salt.modules.textfsm_mod.rst new file mode 100644 index 0000000000..7b2c64b956 --- /dev/null +++ b/doc/ref/modules/all/salt.modules.textfsm_mod.rst @@ -0,0 +1,5 @@ +salt.modules.textfsm_mod module +=============================== + +.. automodule:: salt.modules.textfsm_mod + :members: From 07ea62419720a896715cf19202f9274131d60552 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Mon, 17 Jul 2017 07:16:17 -0600 Subject: [PATCH 344/639] Fix obvious typo --- salt/modules/textfsm_mod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 3e964b8527..768985c712 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -423,7 +423,7 @@ def index(command, 'Command': command } platform_column_name = __opts__.get('textfsm_platform_column_name') or\ - __pillar__.get('textfsm_platform_column_name'. 'Platform') + __pillar__.get('textfsm_platform_column_name', 'Platform') log.info('Using the TextFSM platform idenfiticator: {}'.format(platform_column_name)) attrs[platform_column_name] = platform log.debug('Processing the TextFSM index file using the attributes: {}'.format(attrs)) From ec2a83baded21fde1821d5009d3e323bbdf4fce2 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 13:23:52 +0000 Subject: [PATCH 345/639] More doc typos and correct tests --- salt/modules/textfsm_mod.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 768985c712..60aa0ecc5b 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -196,7 +196,8 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): fsm_handler = textfsm.TextFSM(tpl_file_handle) except textfsm.TextFSMTemplateError as tfte: log.error('Unable to parse the TextFSM template', exc_info=True) - ret['comment'] = 'Unable to parse the TextFSM template from {}. Please check the logs.'.format(template_path) + ret['comment'] = 'Unable to parse the TextFSM template from {}: {}. Please check the logs.'.format( + template_path, tfte) return ret if not raw_text and raw_text_file: log.debug('Trying to read the raw input from {}'.format(raw_text_file)) @@ -205,7 +206,7 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): ret['comment'] = 'Unable to read from {}. Please specify a valid input file or text.'.format(raw_text_file) log.error(ret['comment']) return ret - else: + if not raw_text: ret['comment'] = 'Please specify a valid input file or text.' log.error(ret['comment']) return ret @@ -331,8 +332,8 @@ def index(command, .. code-block:: bash salt '*' textfsm.index 'sh ver' platform=Juniper output_file=salt://textfsm/juniper_version_example textfsm_path=salt://textfsm/ - salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=ftp://textfsm/ platform_textfsm_key=Vendor - salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=https://some-server/textfsm/ platform_textfsm_key=Vendor platform_grain_name=vendor + salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=ftp://textfsm/ platform_column_name=Vendor + salt '*' textfsm.index 'sh ver' output_file=salt://textfsm/juniper_version_example textfsm_path=https://some-server/textfsm/ platform_column_name=Vendor platform_grain_name=vendor TextFSM index file example: @@ -434,7 +435,7 @@ def index(command, ret['comment'] = 'Unable to read from {}. Please specify a valid file or text.'.format(output_file) log.error(ret['comment']) return ret - else: + if not output: ret['comment'] = 'Please specify a valid output text or file' log.error(ret['comment']) return ret From 38f85a6d2fc46ba26c740b9e5f02b0ce34082d79 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 17 Jul 2017 14:12:27 +0000 Subject: [PATCH 346/639] Lint --- salt/modules/textfsm_mod.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 60aa0ecc5b..873d12b020 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -74,7 +74,7 @@ def _clitable_to_dict(objects, fsm_handler): def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): - ''' + r''' Extracts the data entities from the unstructured raw text sent as input and returns the data mapping, processing using the TextFSM template. @@ -191,7 +191,11 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): return ret try: log.debug('Reading TextFSM template from cache path: {}'.format(tpl_cached_path)) + # Disabling pylint W8470 to nto complain about fopen. + # Unfortunately textFSM needs the file handle rather than the content... + # pylint: disable=W8470 tpl_file_handle = salt.utils.fopen(tpl_cached_path, 'r') + # pylint: disable=W8470 log.debug(tpl_file_handle.read()) fsm_handler = textfsm.TextFSM(tpl_file_handle) except textfsm.TextFSMTemplateError as tfte: From 81939063081c383f4600c1a1377127b490e979e1 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Fri, 21 Jul 2017 15:42:45 +0000 Subject: [PATCH 347/639] Fix file handler bug --- salt/modules/textfsm_mod.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 873d12b020..74c0b3765f 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -37,7 +37,7 @@ except ImportError: HAS_CLITABLE = False # Import salt modules -import salt.utils +import salt.utils.files log = logging.getLogger(__name__) @@ -194,9 +194,10 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): # Disabling pylint W8470 to nto complain about fopen. # Unfortunately textFSM needs the file handle rather than the content... # pylint: disable=W8470 - tpl_file_handle = salt.utils.fopen(tpl_cached_path, 'r') + tpl_file_handle = salt.utils.files.fopen(tpl_cached_path, 'r') # pylint: disable=W8470 log.debug(tpl_file_handle.read()) + tpl_file_handle.seek(0) # move the object position back at the top of the file fsm_handler = textfsm.TextFSM(tpl_file_handle) except textfsm.TextFSMTemplateError as tfte: log.error('Unable to parse the TextFSM template', exc_info=True) From d90ed050ba0744192cfb50a1e6d3292f6e5cc437 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 4 Sep 2017 13:44:51 +0000 Subject: [PATCH 348/639] Import fopen function from salt.utils.files or salt.utils The fopen function does not seem to be available in the existing Salt releases, under the __utils__ dunder (It will be in Oxygen as the `files.fopen` key). For backwards compatibility and this module to be able to be ported as extension modules when running earlier releases than Oxygen, we can import the fopen function in a try-except block, looking firstly to import it from the new salt.utils.files module. --- salt/modules/textfsm_mod.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/modules/textfsm_mod.py b/salt/modules/textfsm_mod.py index 74c0b3765f..760f27d3a0 100644 --- a/salt/modules/textfsm_mod.py +++ b/salt/modules/textfsm_mod.py @@ -36,8 +36,10 @@ try: except ImportError: HAS_CLITABLE = False -# Import salt modules -import salt.utils.files +try: + from salt.utils.files import fopen +except ImportError: + from salt.utils import fopen log = logging.getLogger(__name__) @@ -194,7 +196,7 @@ def extract(template_path, raw_text=None, raw_text_file=None, saltenv='base'): # Disabling pylint W8470 to nto complain about fopen. # Unfortunately textFSM needs the file handle rather than the content... # pylint: disable=W8470 - tpl_file_handle = salt.utils.files.fopen(tpl_cached_path, 'r') + tpl_file_handle = fopen(tpl_cached_path, 'r') # pylint: disable=W8470 log.debug(tpl_file_handle.read()) tpl_file_handle.seek(0) # move the object position back at the top of the file From abab6fd91c4080d596930445a56566918b9e411d Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Thu, 1 Jun 2017 12:55:01 +0000 Subject: [PATCH 349/639] Override minion opts with pillar data --- salt/minion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/minion.py b/salt/minion.py index c9cfc6cb1f..428b84bb85 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -3118,6 +3118,9 @@ class ProxyMinion(Minion): if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] + # update opts, to override data with pillar data + self.opts.update(self.opts['pillar']) + fq_proxyname = self.opts['proxy']['proxytype'] # Need to load the modules so they get all the dunder variables From fd499887f9903ddf530063bdf3c0ec4c2d185110 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Wed, 5 Jul 2017 10:14:50 +0000 Subject: [PATCH 350/639] Define new proxy merge pillar in opts... opts --- salt/config/__init__.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 0f06f9ccca..c100741347 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -569,6 +569,20 @@ VALID_OPTS = { # False in 2016.3.0 'add_proxymodule_to_opts': bool, + # Merge pillar data into configuration opts. + # As multiple proxies can run on the same server, we may need different + # configuration options for each, while there's one single configuration file. + # The solution is merging the pillar data of each proxy minion into the opts. + 'proxy_merge_pillar_in_opts': bool, + + # Deep merge of pillar data into configuration opts. + # Evaluated only when `proxy_merge_pillar_in_opts` is True. + 'proxy_deep_merge_pillar_in_opts': bool, + + # The strategy used when merging pillar into opts. + # Considered only when `proxy_merge_pillar_in_opts` is True. + 'proxy_merge_pillar_in_opts_strategy': str, + # In some particular cases, always alive proxies are not beneficial. # This option can be used in those less dynamic environments: # the user can request the connection @@ -1637,6 +1651,10 @@ DEFAULT_PROXY_MINION_OPTS = { 'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'], 'default_include': 'proxy.d/*.conf', + 'proxy_merge_pillar_in_opts': False, + 'proxy_deep_merge_pillar_in_opts': False, + 'proxy_merge_pillar_in_opts_strategy': 'smart', + # By default, proxies will preserve the connection. # If this option is set to False, # the connection with the remote dumb device From 96b31d5643bb9bffc9f2c913c356c8643664555c Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Wed, 5 Jul 2017 10:27:49 +0000 Subject: [PATCH 351/639] Override proxy opts with pillar data when required --- salt/minion.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/minion.py b/salt/minion.py index 428b84bb85..f57e61f2f3 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -100,6 +100,7 @@ import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup +import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.executors import FUNCTION_EXECUTORS @@ -3118,8 +3119,11 @@ class ProxyMinion(Minion): if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] - # update opts, to override data with pillar data - self.opts.update(self.opts['pillar']) + if self.opts.get('proxy_merge_pillar_in_opts'): + self.opts = salt.utils.dictupdate.merge(self.opts, + self.opts['pillar'], + strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'), + merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False)) fq_proxyname = self.opts['proxy']['proxytype'] From 732b63b0b945430b759431831bde7b716d2633a8 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Wed, 5 Jul 2017 10:52:27 +0000 Subject: [PATCH 352/639] Merge mine details whenever possible --- salt/minion.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/salt/minion.py b/salt/minion.py index f57e61f2f3..a6fb6a2797 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -3120,10 +3120,24 @@ class ProxyMinion(Minion): self.opts['proxy'] = self.opts['pillar']['proxy'] if self.opts.get('proxy_merge_pillar_in_opts'): + # Override proxy opts with pillar data when the user required. self.opts = salt.utils.dictupdate.merge(self.opts, self.opts['pillar'], strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'), merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False)) + else: + # Even when not required, some details such as mine configuration + # should be merged anyway whenever possible. + if 'mine_interval' in self.opts['pillar']: + self.opts['mine_interval'] = self.opts['pillar']['mine_interval'] + if 'mine_functions' in self.opts['pillar']: + general_proxy_mines = self.opts.get('mine_functions', []) + specific_proxy_mines = self.opts['pillar']['mine_functions'] + try: + self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines + except TypeError as terr: + log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format( + self.opts['id'])) fq_proxyname = self.opts['proxy']['proxytype'] From cdc0d9674a3d2bd9e40bbe5d0fda39686c5f8f93 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Wed, 5 Jul 2017 10:54:45 +0000 Subject: [PATCH 353/639] Allow disabling the mines details merge --- salt/config/__init__.py | 5 +++++ salt/minion.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index c100741347..5216323b5c 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -583,6 +583,9 @@ VALID_OPTS = { # Considered only when `proxy_merge_pillar_in_opts` is True. 'proxy_merge_pillar_in_opts_strategy': str, + # Allow enabling mine details using pillar data. + 'proxy_mines_pillar': bool, + # In some particular cases, always alive proxies are not beneficial. # This option can be used in those less dynamic environments: # the user can request the connection @@ -1655,6 +1658,8 @@ DEFAULT_PROXY_MINION_OPTS = { 'proxy_deep_merge_pillar_in_opts': False, 'proxy_merge_pillar_in_opts_strategy': 'smart', + 'proxy_mines_pillar': True, + # By default, proxies will preserve the connection. # If this option is set to False, # the connection with the remote dumb device diff --git a/salt/minion.py b/salt/minion.py index a6fb6a2797..6b7c82a8d7 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -3125,7 +3125,7 @@ class ProxyMinion(Minion): self.opts['pillar'], strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'), merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False)) - else: + elif self.opts.get('proxy_mines_pillar'): # Even when not required, some details such as mine configuration # should be merged anyway whenever possible. if 'mine_interval' in self.opts['pillar']: From aad39ba665b7bd85c4ed54d3ee8f896ed5b9b008 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 4 Sep 2017 14:17:14 +0000 Subject: [PATCH 354/639] Document the new opts --- doc/ref/configuration/proxy.rst | 50 +++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/doc/ref/configuration/proxy.rst b/doc/ref/configuration/proxy.rst index 974e0af890..e55f3fc01b 100644 --- a/doc/ref/configuration/proxy.rst +++ b/doc/ref/configuration/proxy.rst @@ -118,3 +118,53 @@ has to be closed after every command. .. code-block:: yaml proxy_always_alive: False + +``proxy_merge_pillar_in_opts`` +------------------------------ + +.. versionadded:: 2017.7.3 + +Default: ``False``. + +Wheter the pillar data to be merged into the proxy configuration options. +As multiple proxies can run on the same server, we may need different +configuration options for each, while there's one single configuration file. +The solution is merging the pillar data of each proxy minion into the opts. + +.. code-block:: yaml + + proxy_merge_pillar_in_opts: True + +``proxy_deep_merge_pillar_in_opts`` +----------------------------------- + +.. versionadded:: 2017.7.3 + +Default: ``False``. + +Deep merge of pillar data into configuration opts. +This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is +enabled. + +``proxy_merge_pillar_in_opts_strategy`` +--------------------------------------- + +.. versionadded:: 2017.7.3 + +Default: ``smart``. + +The strategy used when merging pillar configuration into opts. +This option is evaluated only when :conf_proxy:`proxy_merge_pillar_in_opts` is +enabled. + +``proxy_mines_pillar`` +---------------------- + +.. versionadded:: 2017.7.3 + +Default: ``True``. + +Allow enabling mine details using pillar data. This evaluates the mine +configuration under the pillar, for the following regular minion options that +are also equally available on the proxy minion: :conf_minion:`mine_interval`, +and :conf_minion:`mine_functions`. From b046e2a08b4ee3ae6abd9fbdad3e0c0a4e0d521f Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Fri, 2 Jun 2017 16:00:39 +0000 Subject: [PATCH 355/639] New beacon: NAPALM --- salt/beacons/napalm_beacon.py | 158 ++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 salt/beacons/napalm_beacon.py diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py new file mode 100644 index 0000000000..789128496c --- /dev/null +++ b/salt/beacons/napalm_beacon.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +''' +Watch napalm function and fire events. + +:depends: - napalm_base Python module >= 0.9.5 + +:note: The ``napalm`` beacon only works on (proxy) minions. +''' +# Import Python libs +from __future__ import absolute_import +import re + +# Import salt libs +from salt.ext import six + +# Import third party libs +try: + import napalm_base + HAS_NAPALM_BASE = True +except ImportError: + HAS_NAPALM_BASE = False + +__virtualname__ = 'napalm' + +import logging +log = logging.getLogger(__name__) + + +def __virtual__(): + if HAS_NAPALM_BASE: + return __virtualname__ + return False + + +def _compare(cur_cmp, cur_struct): + ''' + Compares two obejcts and return a boolean value + when there's a match. + ''' + if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict): + for cmp_key, cmp_value in six.iteritems(cur_cmp): + if cmp_key == '*': + # matches any key from the source dictionary + if isinstance(cmp_value, dict): + found = False + for _, cur_struct_val in six.iteritems(cur_struct): + found |= _compare(cmp_value, cur_struct_val) + return found + else: + found = False + if isinstance(cur_struct, (list, tuple)): + for cur_ele in cur_struct: + found |= _compare(cmp_value, cur_ele) + elif isinstance(cur_struct, dict): + for _, cur_ele in six.iteritems(cur_struct): + found |= _compare(cmp_value, cur_ele) + return found + else: + if isinstance(cmp_value, dict): + if cmp_key not in cur_struct: + return False + return _compare(cmp_value, cur_struct[cmp_key]) + if isinstance(cmp_value, list): + found = False + for _, cur_struct_val in six.iteritems(cur_struct): + found |= _compare(cmp_value, cur_struct_val) + return found + else: + return _compare(cmp_value, cur_struct[cmp_key]) + elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)): + found = False + for cur_cmp_ele in cur_cmp: + for cur_struct_ele in cur_struct: + found |= _compare(cur_cmp_ele, cur_struct_ele) + return found + elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool): + return cur_cmp == cur_struct + elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \ + isinstance(cur_struct, (six.string_types, six.text_type)): + matched = re.match(cur_cmp, cur_struct, re.I) + if matched: + return True + # we can enhance this to allow mathematical operations + return False + return False + + +def beacon(config): + ''' + Watch napalm function and fire events. + + Example Config + + .. code-block:: yaml + + beacons: + napalm: + - net.interfaces: + '*': + is_up: False + - bgp.neighbors: + _args: + - 172.17.17.1 + global: + '*': + - up: False + ''' + log.debug('Executing napalm beacon with config:') + log.debug(config) + ret = [] + for mod in config: + if not mod: + continue + event = {} + fun = mod.keys()[0] + fun_cfg = mod.values()[0] + args = fun_cfg.pop('_args', []) + kwargs = fun_cfg.pop('_kwargs', {}) + log.debug('Executing {fun} with {args} and {kwargs}'.format( + fun=fun, + args=args, + kwargs=kwargs + )) + fun_ret = __salt__[fun](*args, **kwargs) + log.debug('Got the reply from the minion:') + log.debug(fun_ret) + if not fun_ret.get('result', False): + log.error('Error whilst executing {fun}'.format(fun)) + log.error(fun_ret) + continue + fun_ret_out = fun_ret['out'] + log.debug('Comparing to:') + log.debug(fun_cfg) + try: + fun_cmp_result = _compare(fun_cfg, fun_ret_out) + except Exception as err: + log.error(err, exc_info=True) + # catch any exception and continue + # to not jeopardise the execution of the next function in the list + continue + log.debug('Result of comparison: {res}'.format(res=fun_cmp_result)) + if fun_cmp_result: + log.info('Matched {fun} with {cfg}'.format( + fun=fun, + cfg=fun_cfg + )) + event['tag'] = '{fun}'.format(fun=fun) + event['fun'] = fun + event['args'] = args + event['kwargs'] = kwargs + event['data'] = fun_ret + event['match'] = fun_cfg + log.debug('Queueing event:') + log.debug(event) + ret.append(event) + log.debug('NAPALM beacon generated the events:') + log.debug(ret) + return ret From 8002ee81126ec2dbdf47dfeffc296886d04cd93e Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 4 Sep 2017 16:41:35 +0000 Subject: [PATCH 356/639] Mathematical comparison and better documentation --- salt/beacons/napalm_beacon.py | 134 +++++++++++++++++++++++++--------- 1 file changed, 99 insertions(+), 35 deletions(-) diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py index 789128496c..8e89561b4b 100644 --- a/salt/beacons/napalm_beacon.py +++ b/salt/beacons/napalm_beacon.py @@ -1,35 +1,90 @@ # -*- coding: utf-8 -*- ''' -Watch napalm function and fire events. +NAPALM functions +================ -:depends: - napalm_base Python module >= 0.9.5 +Watch napalm functions and fire events on specific triggers. -:note: The ``napalm`` beacon only works on (proxy) minions. +.. note:: + The ``napalm`` beacon only work only when running under + a regular or a proxy minion. + +The configuration accepts a list of Salt functions to be +invoked, and the corresponding output hierarchy that should +be matched against. When we want to match on any element +at a certain level, we can have ``*`` to match anything. + +To invoke a certain function with certain arguments, +they can be specified using the ``_args`` key, or +``_kwargs`` to configure more specific key-value arguments. + +The right operand can also accept mathematical comparisons +when applicable (i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.). + +Configuration Example: + +.. code-block:: yaml + + beacons: + napalm: + - net.interfaces: + # fire events when any interfaces is down + '*': + is_up: false + - net.interfaces: + # fire events only when the xe-0/0/0 interface is down + 'xe-0/0/0': + is_up: false + - bgp.neighbors: + # fire events only when the 172.17.17.1 BGP neighbor is down + _args: + - 172.17.17.1 + global: + '*': + up: false + - ntp.stats: + # fire when there's any NTP peer unsynchornized + synchronized: false + - ntp.stats: + # fire only when the synchronization + # with with the 172.17.17.2 NTP server is lost + _args: + - 172.17.17.2 + synchronized: false + - ntp.stats: + # fire only when there's a NTP peer with + # synchronization stratum > 5 + stratum: '> 5' ''' -# Import Python libs from __future__ import absolute_import + +# Import Python std lib import re +import logging -# Import salt libs +# Import Salt modules from salt.ext import six +import salt.utils.napalm -# Import third party libs -try: - import napalm_base - HAS_NAPALM_BASE = True -except ImportError: - HAS_NAPALM_BASE = False +log = logging.getLogger(__name__) +_numeric_regex = re.compile('^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$') +_numeric_operand = { + '<': '__lt__', + '>': '__gt__', + '>=': '__ge__', + '<=': '__le__', + '==': '__eq__', + '!=': '__ne__', +} __virtualname__ = 'napalm' -import logging -log = logging.getLogger(__name__) - def __virtual__(): - if HAS_NAPALM_BASE: - return __virtualname__ - return False + ''' + This beacon can only work when running under a regular or a proxy minion. + ''' + return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _compare(cur_cmp, cur_struct): @@ -38,6 +93,7 @@ def _compare(cur_cmp, cur_struct): when there's a match. ''' if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict): + log.debug('Comparing dict to dict') for cmp_key, cmp_value in six.iteritems(cur_cmp): if cmp_key == '*': # matches any key from the source dictionary @@ -68,19 +124,43 @@ def _compare(cur_cmp, cur_struct): else: return _compare(cmp_value, cur_struct[cmp_key]) elif isinstance(cur_cmp, (list, tuple)) and isinstance(cur_struct, (list, tuple)): + log.debug('Comparing list to list') found = False for cur_cmp_ele in cur_cmp: for cur_struct_ele in cur_struct: found |= _compare(cur_cmp_ele, cur_struct_ele) return found + elif isinstance(cur_cmp, dict) and isinstance(cur_struct, (list, tuple)): + log.debug('Comparing dict to list (of dicts?)') + found = False + for cur_struct_ele in cur_struct: + found |= _compare(cur_cmp, cur_struct_ele) + return found elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool): + log.debug('Comparing booleans') return cur_cmp == cur_struct elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \ isinstance(cur_struct, (six.string_types, six.text_type)): + log.debug('Comparing strings (and regex?)') + # Trying literal match matched = re.match(cur_cmp, cur_struct, re.I) if matched: return True - # we can enhance this to allow mathematical operations + return False + elif isinstance(cur_cmp, (six.integer_types, float)) and \ + isinstance(cur_struct, (six.integer_types, float)): + log.debug('Comparing numeric values') + # numeric compare + return cur_cmp == cur_struct + elif isinstance(cur_struct, (six.integer_types, float)) and \ + isinstance(cur_cmp, (six.string_types, six.text_type)): + # Comapring the numerical value agains a presumably mathematical value + log.debug('Comparing a numeric value (%d) with a string (%s)', cur_struct, cur_cmp) + numeric_compare = _numeric_regex.match(cur_cmp) + # determine if the value to compare agains is a mathematical operand + if numeric_compare: + compare_value = numeric_compare.group(2) + return getattr(float(cur_struct), _numeric_operand[numeric_compare.group(1)])(float(compare_value)) return False return False @@ -88,22 +168,6 @@ def _compare(cur_cmp, cur_struct): def beacon(config): ''' Watch napalm function and fire events. - - Example Config - - .. code-block:: yaml - - beacons: - napalm: - - net.interfaces: - '*': - is_up: False - - bgp.neighbors: - _args: - - 172.17.17.1 - global: - '*': - - up: False ''' log.debug('Executing napalm beacon with config:') log.debug(config) @@ -144,7 +208,7 @@ def beacon(config): fun=fun, cfg=fun_cfg )) - event['tag'] = '{fun}'.format(fun=fun) + event['tag'] = '{os}/{fun}'.format(os=__grains__['os'], fun=fun) event['fun'] = fun event['args'] = args event['kwargs'] = kwargs From bfc374abd1f70484d1f2036ef1bbeb37c3571cf1 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 4 Sep 2017 16:43:29 +0000 Subject: [PATCH 357/639] Index napalm beacon to autodoc --- doc/ref/beacons/all/index.rst | 1 + doc/ref/beacons/all/salt.beacons.napalm_beacon.rst | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 doc/ref/beacons/all/salt.beacons.napalm_beacon.rst diff --git a/doc/ref/beacons/all/index.rst b/doc/ref/beacons/all/index.rst index c0970f4f6c..7fccfc5b15 100644 --- a/doc/ref/beacons/all/index.rst +++ b/doc/ref/beacons/all/index.rst @@ -22,6 +22,7 @@ beacon modules load log memusage + napalm_beacon network_info network_settings pkg diff --git a/doc/ref/beacons/all/salt.beacons.napalm_beacon.rst b/doc/ref/beacons/all/salt.beacons.napalm_beacon.rst new file mode 100644 index 0000000000..ff5bbc4b01 --- /dev/null +++ b/doc/ref/beacons/all/salt.beacons.napalm_beacon.rst @@ -0,0 +1,6 @@ +========================== +salt.beacons.napalm_beacon +========================== + +.. automodule:: salt.beacons.napalm_beacon + :members: From 649cf31da40275cc0575c7deaba3d194d5152633 Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 4 Sep 2017 16:45:23 +0000 Subject: [PATCH 358/639] Doc version added --- salt/beacons/napalm_beacon.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py index 8e89561b4b..d0631fc911 100644 --- a/salt/beacons/napalm_beacon.py +++ b/salt/beacons/napalm_beacon.py @@ -5,6 +5,8 @@ NAPALM functions Watch napalm functions and fire events on specific triggers. +.. versionadded:: Oxygen + .. note:: The ``napalm`` beacon only work only when running under a regular or a proxy minion. From 42fd04f8dd3d86cac2f6bfa59a7e7fa93cd1deb7 Mon Sep 17 00:00:00 2001 From: Andreas Thienemann Date: Mon, 4 Sep 2017 20:21:22 +0200 Subject: [PATCH 359/639] Fix rh_ip template use for Fedora Salt assumes that all Fedora releases conform to the RHEL6 version. This means that on current Fedora releases an older interface template is used resulting in non-working routes. The logic could probably use a bit of rework as I could see future Fedora releases diverging more from RHEL but for now an additional check was added that handles Fedora 18 and higher as RHEL7 (which was branched off from F18). --- salt/modules/rh_ip.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/modules/rh_ip.py b/salt/modules/rh_ip.py index 33d0955041..d29392f07a 100644 --- a/salt/modules/rh_ip.py +++ b/salt/modules/rh_ip.py @@ -1013,7 +1013,10 @@ def build_interface(iface, iface_type, enabled, **settings): salt '*' ip.build_interface eth0 eth ''' if __grains__['os'] == 'Fedora': - rh_major = '6' + if __grains__['osmajorrelease'] >= 18: + rh_major = '7' + else: + rh_major = '6' else: rh_major = __grains__['osrelease'][:1] From ca22f4f3b4a0a0bb21fe5df211668bb3dc5fb433 Mon Sep 17 00:00:00 2001 From: Lukas Erlacher Date: Fri, 1 Sep 2017 17:03:47 +1000 Subject: [PATCH 360/639] Add `replace` parameter to file.recurse state Simply add `replace` parameter and pass it through to `managed`. Closes #16313 --- salt/states/file.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index c8a8a185f1..6a1429da7b 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -3060,6 +3060,7 @@ def recurse(name, sym_mode=None, template=None, context=None, + replace=True, defaults=None, include_empty=False, backup='', @@ -3148,6 +3149,11 @@ def recurse(name, The template option is required when recursively applying templates. + replace : True + If set to ``False`` and the file already exists, the file will not be + modified even if changes would otherwise be made. Permissions and + ownership will still be enforced, however. + context Overrides default context variables passed to the template. @@ -3337,8 +3343,8 @@ def recurse(name, if _ret['changes']: ret['changes'][path] = _ret['changes'] - def manage_file(path, source): - if clean and os.path.exists(path) and os.path.isdir(path): + def manage_file(path, source, replace): + if clean and os.path.exists(path) and os.path.isdir(path) and replace: _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: _ret['comment'] = u'Replacing directory {0} with a ' \ @@ -3368,6 +3374,7 @@ def recurse(name, attrs=None, template=template, makedirs=True, + replace=replace, context=context, defaults=defaults, backup=backup, @@ -3424,7 +3431,7 @@ def recurse(name, for dirname in mng_dirs: manage_directory(dirname) for dest, src in mng_files: - manage_file(dest, src) + manage_file(dest, src, replace) if clean: # TODO: Use directory(clean=True) instead From ba7d00f58ef10dd441dbc90ea1d92f28eb92d7d1 Mon Sep 17 00:00:00 2001 From: Robin Lutz Date: Tue, 5 Sep 2017 09:34:54 +0200 Subject: [PATCH 361/639] use dep_getkey for atom extraction to allow non existing versions and packages, added unit test to verify behaviour --- salt/modules/ebuild.py | 30 +++++-- salt/modules/portage_config.py | 28 +++--- tests/unit/modules/test_portage_config.py | 102 +++++++++++++++++++--- 3 files changed, 131 insertions(+), 29 deletions(-) diff --git a/salt/modules/ebuild.py b/salt/modules/ebuild.py index ea41656eaf..5295c2652c 100644 --- a/salt/modules/ebuild.py +++ b/salt/modules/ebuild.py @@ -75,9 +75,20 @@ def _porttree(): def _p_to_cp(p): - ret = _porttree().dbapi.xmatch("match-all", p) - if ret: - return portage.cpv_getkey(ret[0]) + try: + ret = portage.dep_getkey(p) + if ret: + return ret + except portage.exception.InvalidAtom: + pass + + try: + ret = _porttree().dbapi.xmatch('bestmatch-visible', p) + if ret: + return portage.dep_getkey(ret) + except portage.exception.InvalidAtom: + pass + return None @@ -91,11 +102,14 @@ def _allnodes(): def _cpv_to_cp(cpv): - ret = portage.cpv_getkey(cpv) - if ret: - return ret - else: - return cpv + try: + ret = portage.dep_getkey(cpv) + if ret: + return ret + except portage.exception.InvalidAtom: + pass + + return cpv def _cpv_to_version(cpv): diff --git a/salt/modules/portage_config.py b/salt/modules/portage_config.py index 2a05ebb451..8441956bc3 100644 --- a/salt/modules/portage_config.py +++ b/salt/modules/portage_config.py @@ -75,6 +75,8 @@ def _get_config_file(conf, atom): if parts.cp == '*/*': # parts.repo will be empty if there is no repo part relative_path = parts.repo or "gentoo" + elif str(parts.cp).endswith('/*'): + relative_path = str(parts.cp).split("/")[0] + "_" else: relative_path = os.path.join(*[x for x in os.path.split(parts.cp) if x != '*']) else: @@ -92,9 +94,20 @@ def _p_to_cp(p): Convert a package name or a DEPEND atom to category/package format. Raises an exception if program name is ambiguous. ''' - ret = _porttree().dbapi.xmatch("match-all", p) - if ret: - return portage.cpv_getkey(ret[0]) + try: + ret = portage.dep_getkey(p) + if ret: + return ret + except portage.exception.InvalidAtom: + pass + + try: + ret = _porttree().dbapi.xmatch('bestmatch-visible', p) + if ret: + return portage.dep_getkey(ret) + except portage.exception.InvalidAtom: + pass + return None @@ -188,12 +201,7 @@ def _package_conf_file_to_dir(file_name): else: os.rename(path, path + '.tmpbak') os.mkdir(path, 0o755) - with salt.utils.files.fopen(path + '.tmpbak') as fh_: - for line in fh_: - line = line.strip() - if line and not line.startswith('#'): - append_to_package_conf(file_name, string=line) - os.remove(path + '.tmpbak') + os.rename(path + '.tmpbak', os.path.join(path, 'tmp')) return True else: os.mkdir(path, 0o755) @@ -218,7 +226,7 @@ def _package_conf_ordering(conf, clean=True, keep_backup=False): shutil.copy(file_path, file_path + '.bak') backup_files.append(file_path + '.bak') - if cp[0] == '/' or cp.split('/') > 2: + if cp[0] == '/' or len(cp.split('/')) > 2: with salt.utils.files.fopen(file_path) as fp_: rearrange.extend(fp_.readlines()) os.remove(file_path) diff --git a/tests/unit/modules/test_portage_config.py b/tests/unit/modules/test_portage_config.py index a0d0119954..bdff520f45 100644 --- a/tests/unit/modules/test_portage_config.py +++ b/tests/unit/modules/test_portage_config.py @@ -7,11 +7,14 @@ ''' # Import Python libs from __future__ import absolute_import +import re # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch +from tests.support.paths import TMP +import salt.utils.files # Import salt libs import salt.modules.portage_config as portage_config @@ -19,27 +22,104 @@ import salt.modules.portage_config as portage_config @skipIf(NO_MOCK, NO_MOCK_REASON) class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): - class DummyAtom(object): - def __init__(self, atom): - self.cp, self.repo = atom.split("::") if "::" in atom else (atom, None) + + + def __init__(self): + self.cp = None + self.repo = None + + def __call__(self, atom, *_, **__): + if atom == '#' or isinstance(atom, MagicMock): + self.repo = None + self.cp = None + return self + + # extract (and remove) repo + atom, self.repo = atom.split('::') if '::' in atom else (atom, None) + + # remove '>, >=, <=, =, ~' etc. + atom = re.sub('[<>~+=]', '', atom) + # remove slots + atom = re.sub(':[0-9][^:]*', '', atom) + # remove version + atom = re.sub('-[0-9][\.0-9]*', '', atom) + + self.cp = atom + return self def setup_loader_modules(self): - self.portage = MagicMock() - self.addCleanup(delattr, self, 'portage') - return {portage_config: {'portage': self.portage}} + try: + import portage + return {} + except: + dummy_atom = self.DummyAtom() + self.portage = MagicMock() + self.portage.dep.Atom = MagicMock(side_effect=dummy_atom) + self.portage.dep_getkey = MagicMock(side_effect=lambda x: dummy_atom(x).cp) + self.portage.exception.InvalidAtom = Exception + self.addCleanup(delattr, self, 'portage') + return {portage_config: {'portage': self.portage}} def test_get_config_file_wildcards(self): pairs = [ ('*/*::repo', '/etc/portage/package.mask/repo'), ('*/pkg::repo', '/etc/portage/package.mask/pkg'), - ('cat/*', '/etc/portage/package.mask/cat'), + ('cat/*', '/etc/portage/package.mask/cat_'), ('cat/pkg', '/etc/portage/package.mask/cat/pkg'), ('cat/pkg::repo', '/etc/portage/package.mask/cat/pkg'), ] for (atom, expected) in pairs: - dummy_atom = self.DummyAtom(atom) - self.portage.dep.Atom = MagicMock(return_value=dummy_atom) - with patch.object(portage_config, '_p_to_cp', MagicMock(return_value=dummy_atom.cp)): - self.assertEqual(portage_config._get_config_file('mask', atom), expected) + self.assertEqual(portage_config._get_config_file('mask', atom), expected) + + def test_enforce_nice_config(self): + atoms = [ + ('*/*::repo', 'repo'), + ('*/pkg1::repo', 'pkg1'), + ('cat/*', 'cat_'), + ('cat/pkg2', 'cat/pkg2'), + ('cat/pkg3::repo', 'cat/pkg3'), + ('cat/pkg5-0.0.0.0:0', 'cat/pkg5'), + ('>cat/pkg6-0.0.0.0:0::repo', 'cat/pkg6'), + ('<=cat/pkg7-0.0.0.0', 'cat/pkg7'), + ('=cat/pkg8-0.0.0.0', 'cat/pkg8'), + ] + + supported = [ + ('accept_keywords', ['~amd64']), + ('env', ['glibc.conf']), + ('license', ['LICENCE1', 'LICENCE2']), + ('mask', ['']), + ('properties', ['* -interactive']), + ('unmask', ['']), + ('use', ['apple', '-banana', 'ananas', 'orange']), + ] + + base_path = TMP + '/package.{0}' + + def make_line(atom, addition): + return atom + (' ' + addition if addition != '' else '') + '\n' + + for typ, additions in supported: + path = base_path.format(typ) + with salt.utils.files.fopen(path, 'a') as fh: + for atom, _ in atoms: + for addition in additions: + line = make_line(atom, addition) + fh.write('# comment for: ' + line) + fh.write(line) + + with patch.object(portage_config, 'BASE_PATH', base_path): + with patch.object(portage_config, '_merge_flags', lambda l1, l2, _: list(set(l1 + l2))): + portage_config.enforce_nice_config() + + for typ, additions in supported: + for atom, file_name in atoms: + with salt.utils.files.fopen(base_path.format(typ) + "/" + file_name, 'r') as fh: + for line in fh: + self.assertTrue(atom in line, msg="'{}' not in '{}'".format(addition, line)) + for addition in additions: + self.assertTrue(addition in line, msg="'{}' not in '{}'".format(addition, line)) + From 2701346a18e5cb5caac16868f011e9c33deb07bc Mon Sep 17 00:00:00 2001 From: Kunal Ajay Bajpai Date: Tue, 5 Sep 2017 14:23:41 +0530 Subject: [PATCH 362/639] Fix save_load to add doc if not exists --- salt/returners/couchbase_return.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/returners/couchbase_return.py b/salt/returners/couchbase_return.py index 24c3a9105a..b299eb2dff 100644 --- a/salt/returners/couchbase_return.py +++ b/salt/returners/couchbase_return.py @@ -213,8 +213,8 @@ def save_load(jid, clear_load, minion=None): try: jid_doc = cb_.get(str(jid)) except couchbase.exceptions.NotFoundError: - log.warning('Could not write job cache file for jid: {0}'.format(jid)) - return False + cb_.add(str(jid), {}, ttl=_get_ttl()) + jid_doc = cb_.get(str(jid)) jid_doc.value['load'] = clear_load cb_.replace(str(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl()) From a0f9903788c2dfc8f3eee04e491478e5e959249a Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Tue, 5 Sep 2017 10:04:44 +0000 Subject: [PATCH 363/639] Lint and better documentation --- salt/beacons/napalm_beacon.py | 154 +++++++++++++++++++++++++++++----- 1 file changed, 133 insertions(+), 21 deletions(-) diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py index d0631fc911..827a80ba39 100644 --- a/salt/beacons/napalm_beacon.py +++ b/salt/beacons/napalm_beacon.py @@ -3,25 +3,104 @@ NAPALM functions ================ -Watch napalm functions and fire events on specific triggers. - .. versionadded:: Oxygen +Watch NAPALM functions and fire events on specific triggers. + .. note:: - The ``napalm`` beacon only work only when running under - a regular or a proxy minion. + + The ``NAPALM`` beacon only works only when running under + a regular Minion or a Proxy Minion, managed via NAPALM_. + Check the documentation for the + :mod:`NAPALM proxy module `. + + _NAPALM: http://napalm.readthedocs.io/en/latest/index.html The configuration accepts a list of Salt functions to be invoked, and the corresponding output hierarchy that should -be matched against. When we want to match on any element -at a certain level, we can have ``*`` to match anything. +be matched against. To invoke a function with certain +arguments, they can be specified using the ``_args`` key, or +``_kwargs`` for more specific key-value arguments. -To invoke a certain function with certain arguments, -they can be specified using the ``_args`` key, or -``_kwargs`` to configure more specific key-value arguments. +The match structure follows the output hierarchy of the NAPALM +functions, under the ``out`` key. -The right operand can also accept mathematical comparisons -when applicable (i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.). +For example, the following is normal structure returned by the +:mod:`ntp.stats ` execution function: + +.. code-block:: json + + { + "comment": "", + "result": true, + "out": [ + { + "referenceid": ".GPSs.", + "remote": "172.17.17.1", + "synchronized": true, + "reachability": 377, + "offset": 0.461, + "when": "860", + "delay": 143.606, + "hostpoll": 1024, + "stratum": 1, + "jitter": 0.027, + "type": "-" + }, + { + "referenceid": ".INIT.", + "remote": "172.17.17.2", + "synchronized": false, + "reachability": 0, + "offset": 0.0, + "when": "-", + "delay": 0.0, + "hostpoll": 1024, + "stratum": 16, + "jitter": 4000.0, + "type": "-" + } + ] + } + +In order to fire events when the synchronization is lost with +one of the NTP peers, e.g., ``172.17.17.2``, we can match it explicitly as: + +.. code-block:: yaml + + ntp.stats: + remote: 172.17.17.2 + synchronized: false + +There is one single nesting level, as the output of ``ntp.stats`` is +just a list of dictionaries, and this beacon will compare each dictionary +from the list with the structure examplified above. + +.. note:: + + When we want to match on any element at a certain level, we can + configure ``*`` to match anything. + +Considering a more complex structure consisting on multiple nested levels, +e.g., the output of the :mod:`bgp.neighbors ` +execution function, to check when any neighbor from the ``global`` +routing table is down, the match structure would have the format: + +.. code-block:: yaml + + bgp.neighbors: + global: + '*': + up: false + +The match structure above will match any BGP neighbor, with +any network (``*`` matches any AS number), under the ``global`` VRF. +In other words, this beacon will push an event on the Salt bus +when there's a BGP neighbor down. + +The right operand can also accept mathematical operations +(i.e., ``<``, ``<=``, ``!=``, ``>``, ``>=`` etc.) when comparing +numerical values. Configuration Example: @@ -37,13 +116,6 @@ Configuration Example: # fire events only when the xe-0/0/0 interface is down 'xe-0/0/0': is_up: false - - bgp.neighbors: - # fire events only when the 172.17.17.1 BGP neighbor is down - _args: - - 172.17.17.1 - global: - '*': - up: false - ntp.stats: # fire when there's any NTP peer unsynchornized synchronized: false @@ -57,6 +129,44 @@ Configuration Example: # fire only when there's a NTP peer with # synchronization stratum > 5 stratum: '> 5' + +Event structure example: + +.. code-block:: json + + salt/beacon/edge01.bjm01/napalm/junos/ntp.stats { + "_stamp": "2017-09-05T09:51:09.377202", + "args": [], + "data": { + "comment": "", + "out": [ + { + "delay": 0.0, + "hostpoll": 1024, + "jitter": 4000.0, + "offset": 0.0, + "reachability": 0, + "referenceid": ".INIT.", + "remote": "172.17.17.1", + "stratum": 16, + "synchronized": false, + "type": "-", + "when": "-" + } + ], + "result": true + }, + "fun": "ntp.stats", + "id": "edge01.bjm01", + "kwargs": {}, + "match": { + "stratum": "> 5" + } + } + +The event examplified above has been fired when the device +identified by the Minion id ``edge01.bjm01`` has been synchronized +with a NTP server at a stratum level greater than 5. ''' from __future__ import absolute_import @@ -69,7 +179,8 @@ from salt.ext import six import salt.utils.napalm log = logging.getLogger(__name__) -_numeric_regex = re.compile('^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$') +_numeric_regex = re.compile(r'^(<|>|<=|>=|==|!=)\s*(\d+(\.\d+){0,1})$') +# the numeric regex will match the right operand, e.g '>= 20', '< 100', '!= 20', '< 1000.12' etc. _numeric_operand = { '<': '__lt__', '>': '__gt__', @@ -77,7 +188,8 @@ _numeric_operand = { '<=': '__le__', '==': '__eq__', '!=': '__ne__', -} +} # mathematical operand - private method map + __virtualname__ = 'napalm' @@ -191,7 +303,7 @@ def beacon(config): log.debug('Got the reply from the minion:') log.debug(fun_ret) if not fun_ret.get('result', False): - log.error('Error whilst executing {fun}'.format(fun)) + log.error('Error whilst executing {}'.format(fun)) log.error(fun_ret) continue fun_ret_out = fun_ret['out'] From ed9b45ceaaafedf09f52570ed865ce71eff3077f Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 5 Sep 2017 10:11:11 -0400 Subject: [PATCH 364/639] Adjust a couple more tests to handle __utils__['state.check_result'] mocking --- salt/utils/__init__.py | 6 +++--- tests/unit/modules/test_dockermod.py | 9 ++++++++- tests/unit/modules/test_state.py | 14 ++++++++++---- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 8cf709751c..85a70a3396 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -3286,7 +3286,7 @@ def search_onfail_requisites(sid, highstate): import salt.utils.state salt.utils.versions.warn_until( 'Neon', - 'Use of \'salt.utils.search_onfail_requisites\' detected. This function' + 'Use of \'salt.utils.search_onfail_requisites\' detected. This function ' 'has been moved to \'salt.utils.state.search_onfail_requisites\' as of ' 'Salt Oxygen. This warning will be removed in Salt Neon.' ) @@ -3313,7 +3313,7 @@ def check_onfail_requisites(state_id, state_result, running, highstate): import salt.utils.state salt.utils.versions.warn_until( 'Neon', - 'Use of \'salt.utils.check_onfail_requisites\' detected. This function' + 'Use of \'salt.utils.check_onfail_requisites\' detected. This function ' 'has been moved to \'salt.utils.state.check_onfail_requisites\' as of ' 'Salt Oxygen. This warning will be removed in Salt Neon.' ) @@ -3334,7 +3334,7 @@ def check_state_result(running, recurse=False, highstate=None): import salt.utils.state salt.utils.versions.warn_until( 'Neon', - 'Use of \'salt.utils.check_state_result\' detected. This function' + 'Use of \'salt.utils.check_state_result\' detected. This function ' 'has been moved to \'salt.utils.state.check_result\' as of ' 'Salt Oxygen. This warning will be removed in Salt Neon.' ) diff --git a/tests/unit/modules/test_dockermod.py b/tests/unit/modules/test_dockermod.py index 00fad0c3c5..acae7259f4 100644 --- a/tests/unit/modules/test_dockermod.py +++ b/tests/unit/modules/test_dockermod.py @@ -18,6 +18,8 @@ from tests.support.mock import ( ) # Import Salt Libs +import salt.config +import salt.loader from salt.ext.six.moves import range from salt.exceptions import CommandExecutionError import salt.modules.dockermod as docker_mod @@ -39,7 +41,12 @@ class DockerTestCase(TestCase, LoaderModuleMockMixin): Validate docker module ''' def setup_loader_modules(self): - return {docker_mod: {'__context__': {'docker.docker_version': ''}}} + utils = salt.loader.utils( + salt.config.DEFAULT_MINION_OPTS, + whitelist=['state'] + ) + return {docker_mod: {'__context__': {'docker.docker_version': ''}, + '__utils__': utils}} try: docker_version = docker_mod.docker.version_info diff --git a/tests/unit/modules/test_state.py b/tests/unit/modules/test_state.py index caa89fdf43..b6dc97af05 100644 --- a/tests/unit/modules/test_state.py +++ b/tests/unit/modules/test_state.py @@ -26,6 +26,7 @@ import salt.utils.odict import salt.utils.platform import salt.modules.state as state from salt.exceptions import SaltInvocationError +from salt.ext import six class MockState(object): @@ -984,7 +985,12 @@ class StateTestCase(TestCase, LoaderModuleMockMixin): MockTarFile.path = "" MockJson.flag = False - with patch('salt.utils.files.fopen', mock_open()), \ - patch.dict(state.__utils__, {'state.check_result': MagicMock(return_value=True)}): - self.assertTrue(state.pkg("/tmp/state_pkg.tgz", - 0, "md5")) + if six.PY2: + with patch('salt.utils.files.fopen', mock_open()), \ + patch.dict(state.__utils__, {'state.check_result': MagicMock(return_value=True)}): + self.assertTrue(state.pkg("/tmp/state_pkg.tgz", + 0, "md5")) + else: + with patch('salt.utils.files.fopen', mock_open()): + self.assertTrue(state.pkg("/tmp/state_pkg.tgz", + 0, "md5")) From 9e64ea814c56660640b8a0f25416907097c2eeb6 Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 5 Sep 2017 18:16:15 +0200 Subject: [PATCH 365/639] Add quiet mode to lvdisplay --- salt/modules/linux_lvm.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/modules/linux_lvm.py b/salt/modules/linux_lvm.py index ed6411d899..1001879b34 100644 --- a/salt/modules/linux_lvm.py +++ b/salt/modules/linux_lvm.py @@ -159,7 +159,7 @@ def vgdisplay(vgname=''): return ret -def lvdisplay(lvname=''): +def lvdisplay(lvname='', quiet=False): ''' Return information about the logical volume(s) @@ -174,7 +174,10 @@ def lvdisplay(lvname=''): cmd = ['lvdisplay', '-c'] if lvname: cmd.append(lvname) - cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False) + if quiet: + cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') + else: + cmd_ret = __salt__['cmd.run_all'](cmd, python_shell=False) if cmd_ret['retcode'] != 0: return {} From a4c693bf2f58722391aa4be43fa7fd4c331c9bc4 Mon Sep 17 00:00:00 2001 From: Robert Mader Date: Tue, 5 Sep 2017 18:18:58 +0200 Subject: [PATCH 366/639] Use quiet mode of lvdisplay in lvpresent --- salt/states/lvm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/lvm.py b/salt/states/lvm.py index ffc9054aab..9a2378fe46 100644 --- a/salt/states/lvm.py +++ b/salt/states/lvm.py @@ -268,7 +268,7 @@ def lv_present(name, else: lvpath = '/dev/{0}/{1}'.format(vgname, name) - if __salt__['lvm.lvdisplay'](lvpath): + if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) From fc9c61d12e565c73a8e273a787c72c5f9c233061 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 02:28:16 +1000 Subject: [PATCH 367/639] Update win_pkg.py --- salt/modules/win_pkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py index 1f85f49fcd..1f6f20b8a3 100644 --- a/salt/modules/win_pkg.py +++ b/salt/modules/win_pkg.py @@ -1204,7 +1204,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): if use_msiexec: cmd = msiexec arguments = ['/i', cached_pkg] - if pkginfo['version_num'].get('allusers', True): + if pkginfo[version_num].get('allusers', True): arguments.append('ALLUSERS="1"') arguments.extend(salt.utils.shlex_split(install_flags)) else: From 3c6ae99a775e15e00f6d612cff797f8c6f3c4596 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 5 Sep 2017 10:35:05 -0600 Subject: [PATCH 368/639] never-download got readded https://github.com/pypa/virtualenv/pull/840 --- salt/modules/virtualenv_mod.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/salt/modules/virtualenv_mod.py b/salt/modules/virtualenv_mod.py index 492c5bd3d9..6fbe94d647 100644 --- a/salt/modules/virtualenv_mod.py +++ b/salt/modules/virtualenv_mod.py @@ -198,12 +198,10 @@ def create(path, for entry in extra_search_dir: cmd.append('--extra-search-dir={0}'.format(entry)) if never_download is True: - if virtualenv_version_info >= (1, 10): + if virtualenv_version_info >= (1, 10) and virtualenv_version_info < (14, 0, 0): log.info( - 'The virtualenv \'--never-download\' option has been ' - 'deprecated in virtualenv(>=1.10), as such, the ' - '\'never_download\' option to `virtualenv.create()` has ' - 'also been deprecated and it\'s not necessary anymore.' + '--never-download was deprecated in 1.10.0, but reimplemented in 14.0.0. ' + 'If this feature is needed, please install a supported virtualenv version.' ) else: cmd.append('--never-download') From 1b4583b42599dce473b5df76babd42c9c6d779d2 Mon Sep 17 00:00:00 2001 From: John Jawed Date: Fri, 1 Sep 2017 18:33:28 -0700 Subject: [PATCH 369/639] Fix #43295, better handling of consul initialization issues --- salt/cache/consul.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/salt/cache/consul.py b/salt/cache/consul.py index b545c96ead..0148c73bf0 100644 --- a/salt/cache/consul.py +++ b/salt/cache/consul.py @@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store. .. versionadded:: 2016.11.2 +:depends: python-consul >= 0.2.0 + It is up to the system administrator to set up and configure the Consul infrastructure. All is needed for this plugin is a working Consul agent with a read-write access to the key-value store. @@ -81,8 +83,11 @@ def __virtual__(): 'verify': __opts__.get('consul.verify', True), } - global api - api = consul.Consul(**consul_kwargs) + try: + global api + api = consul.Consul(**consul_kwargs) + except AttributeError: + return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed") return __virtualname__ From 23d9abb5606d770ad1b3a663f7d38b93aff1a771 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 5 Sep 2017 11:20:47 -0600 Subject: [PATCH 370/639] ipaddr_start ipaddr_end for el7 --- salt/templates/rh_ip/rh7_eth.jinja | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/templates/rh_ip/rh7_eth.jinja b/salt/templates/rh_ip/rh7_eth.jinja index 62c871ae63..d60932dd20 100644 --- a/salt/templates/rh_ip/rh7_eth.jinja +++ b/salt/templates/rh_ip/rh7_eth.jinja @@ -15,6 +15,8 @@ DEVICE="{{name}}" {%endif%}{% if onparent %}ONPARENT={{onparent}} {%endif%}{% if ipv4_failure_fatal %}IPV4_FAILURE_FATAL="{{ipv4_failure_fatal}}" {%endif%}{% if ipaddr %}IPADDR="{{ipaddr}}" +{%endif%}{% if ipaddr_start %}IPADDR_START="{{ipaddr_start}}" +{%endif%}{% if ipaddr_end %}IPADDR_END="{{ipaddr_end}}" {%endif%}{% if netmask %}NETMASK="{{netmask}}" {%endif%}{% if prefix %}PREFIX="{{prefix}}" {%endif%}{% if gateway %}GATEWAY="{{gateway}}" From 9911b04208e1b4561128f79e7701110d76b33dbf Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 5 Sep 2017 13:52:02 -0600 Subject: [PATCH 371/639] fix test --- tests/unit/modules/virtualenv_test.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/unit/modules/virtualenv_test.py b/tests/unit/modules/virtualenv_test.py index 5f686266c1..3122a7c343 100644 --- a/tests/unit/modules/virtualenv_test.py +++ b/tests/unit/modules/virtualenv_test.py @@ -112,11 +112,9 @@ class VirtualenvTestCase(TestCase): # Are we logging the deprecation information? self.assertIn( - 'INFO:The virtualenv \'--never-download\' option has been ' - 'deprecated in virtualenv(>=1.10), as such, the ' - '\'never_download\' option to `virtualenv.create()` has ' - 'also been deprecated and it\'s not necessary anymore.', - handler.messages + 'INFO:--never-download was deprecated in 1.10.0, ' + 'but reimplemented in 14.0.0. If this feature is needed, ' + 'please install a supported virtualenv version.' ) def test_issue_6031_multiple_extra_search_dirs(self): From ba63920874481d41e8735e2b2cc263e881fe5644 Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Sat, 19 Aug 2017 00:06:15 +0000 Subject: [PATCH 372/639] Support states returning a list for ret['comment'] Some states are complicated and multiple subparts, or maybe cross-call into __states__ if they manage subresources. In these cases, they will have multiple comments. Make this more ergonomic by supporting a list of strings as the value for ret['comment'] in state returns and documenting this. By joining comments on newlines, it is possible to combine single-line and multi-line comments cleanly, as opposed to e.g. commas. The driving impetus for this is some of the boto modules. An update to the boto_sqs module is included as an example. Add a check that outgoing state return data has the right shape, and add a testcase as well. Fix the NPM state tests and the saltmod runner & wheel state functions to comply with the prescribed format. --- doc/ref/states/writing.rst | 7 ++- doc/topics/releases/oxygen.rst | 6 +++ salt/state.py | 59 ++++++++++++++++++++++--- salt/states/boto_sqs.py | 64 ++++++++++++++-------------- salt/states/saltmod.py | 22 +++++++--- tests/integration/states/test_npm.py | 4 +- tests/unit/states/test_boto_sqs.py | 18 ++++---- tests/unit/states/test_saltmod.py | 8 ++-- tests/unit/test_state.py | 28 +++++++++++- 9 files changed, 154 insertions(+), 62 deletions(-) diff --git a/doc/ref/states/writing.rst b/doc/ref/states/writing.rst index f278df5294..5e94c1ccc3 100644 --- a/doc/ref/states/writing.rst +++ b/doc/ref/states/writing.rst @@ -153,7 +153,12 @@ A State Module must return a dict containing the following keys/values: However, if a state is going to fail and this can be determined in test mode without applying the change, ``False`` can be returned. -- **comment:** A string containing a summary of the result. +- **comment:** A list of strings or a single string summarizing the result. + Note that support for lists of strings is available as of Salt Oxygen. + Lists of strings will be joined with newlines to form the final comment; + this is useful to allow multiple comments from subparts of a state. + Prefer to keep line lengths short (use multiple lines as needed), + and end with punctuation (e.g. a period) to delimit multiple comments. The return data can also, include the **pchanges** key, this stands for `predictive changes`. The **pchanges** key informs the State system what diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 9396a1d74b..7227184477 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -110,6 +110,12 @@ Support has been added to the ``virtual`` grain for detecting Solaris LDOMs running on T-Series SPARC hardware. The ``virtual_subtype`` grain is populated as a list of domain roles. +Lists of comments in state returns +---------------------------------- + +State functions can now return a list of strings for the ``comment`` field, +as opposed to only a single string. +This is meant to ease writing states with multiple or multi-part comments. Beacon configuration changes ---------------------------------------- diff --git a/salt/state.py b/salt/state.py index 004ccdaedc..c46da64578 100644 --- a/salt/state.py +++ b/salt/state.py @@ -969,22 +969,65 @@ class State(object): elif data[u'state'] in (u'pkg', u'ports'): self.module_refresh() - def verify_ret(self, ret): + @staticmethod + def verify_ret(ret): ''' - Verify the state return data + Perform basic verification of the raw state return data ''' if not isinstance(ret, dict): raise SaltException( - u'Malformed state return, return must be a dict' - ) + u'Malformed state return, return must be a dict' + ) bad = [] for val in [u'name', u'result', u'changes', u'comment']: if val not in ret: bad.append(val) if bad: - raise SaltException( - u'The following keys were not present in the state ' - u'return: {0}'.format(u','.join(bad))) + m = u'The following keys were not present in the state return: {0}' + raise SaltException(m.format(u','.join(bad))) + + @staticmethod + def munge_ret_for_export(ret): + ''' + Process raw state return data to make it suitable for export, + to ensure consistency of the data format seen by external systems + ''' + # We support lists of strings for ret['comment'] internal + # to the state system for improved ergonomics. + # However, to maintain backwards compatability with external tools, + # the list representation is not allowed to leave the state system, + # and should be converted like this at external boundaries. + if isinstance(ret[u'comment'], list): + ret[u'comment'] = u'\n'.join(ret[u'comment']) + + @staticmethod + def verify_ret_for_export(ret): + ''' + Verify the state return data for export outside the state system + ''' + State.verify_ret(ret) + + for key in [u'name', u'comment']: + if not isinstance(ret[key], six.string_types): + msg = ( + u'The value for the {0} key in the state return ' + u'must be a string, found {1}' + ) + raise SaltException(msg.format(key, repr(ret[key]))) + + if ret[u'result'] not in [True, False, None]: + msg = ( + u'The value for the result key in the state return ' + u'must be True, False, or None, found {0}' + ) + raise SaltException(msg.format(repr(ret[u'result']))) + + if not isinstance(ret[u'changes'], dict): + msg = ( + u'The value for the changes key in the state return ' + u'must be a dict, found {0}' + ) + raise SaltException(msg.format(repr(ret[u'changes']))) def verify_data(self, data): ''' @@ -1847,6 +1890,7 @@ class State(object): if u'check_cmd' in low and u'{0[state]}.mod_run_check_cmd'.format(low) not in self.states: ret.update(self._run_check_cmd(low)) self.verify_ret(ret) + self.munge_ret_for_export(ret) except Exception: trb = traceback.format_exc() # There are a number of possibilities to not have the cdata @@ -1874,6 +1918,7 @@ class State(object): self.state_con.pop('runas') self.state_con.pop('runas_password') + self.verify_ret_for_export(ret) # If format_call got any warnings, let's show them to the user if u'warnings' in cdata: diff --git a/salt/states/boto_sqs.py b/salt/states/boto_sqs.py index 8a33e078ee..e9b142f864 100644 --- a/salt/states/boto_sqs.py +++ b/salt/states/boto_sqs.py @@ -108,8 +108,12 @@ def present( A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' - comments = [] - ret = {'name': name, 'result': True, 'changes': {}} + ret = { + 'name': name, + 'result': True, + 'comment': [], + 'changes': {}, + } r = __salt__['boto_sqs.exists']( name, @@ -120,17 +124,18 @@ def present( ) if 'error' in r: ret['result'] = False - ret['comment'] = '\n'.join(comments + [str(r['error'])]) + ret['comment'].append(r['error']) return ret if r['result']: - comments.append('SQS queue {0} present.'.format(name)) + ret['comment'].append('SQS queue {0} present.'.format(name)) else: if __opts__['test']: ret['result'] = None - comments.append('SQS queue {0} is set to be created.'.format(name)) + ret['comment'].append( + 'SQS queue {0} is set to be created.'.format(name), + ) ret['pchanges'] = {'old': None, 'new': name} - ret['comment'] = '\n'.join(comments) return ret r = __salt__['boto_sqs.create']( @@ -143,22 +148,18 @@ def present( ) if 'error' in r: ret['result'] = False - comments.append('Failed to create SQS queue {0}: {1}'.format( - name, - str(r['error']), - )) - ret['comment'] = '\n'.join(comments) + ret['comment'].append( + 'Failed to create SQS queue {0}: {1}'.format(name, r['error']), + ) return ret - comments.append('SQS queue {0} created.'.format(name)) + ret['comment'].append('SQS queue {0} created.'.format(name)) ret['changes']['old'] = None ret['changes']['new'] = name # Return immediately, as the create call also set all attributes - ret['comment'] = '\n'.join(comments) return ret if not attributes: - ret['comment'] = '\n'.join(comments) return ret r = __salt__['boto_sqs.get_attributes']( @@ -170,10 +171,9 @@ def present( ) if 'error' in r: ret['result'] = False - comments.append('Failed to get queue attributes: {0}'.format( - str(r['error']), - )) - ret['comment'] = '\n'.join(comments) + ret['comment'].append( + 'Failed to get queue attributes: {0}'.format(r['error']), + ) return ret current_attributes = r['result'] @@ -195,8 +195,7 @@ def present( attr_names = ', '.join(attrs_to_set) if not attrs_to_set: - comments.append('Queue attributes already set correctly.') - ret['comment'] = '\n'.join(comments) + ret['comment'].append('Queue attributes already set correctly.') return ret final_attributes = current_attributes.copy() @@ -218,12 +217,13 @@ def present( if __opts__['test']: ret['result'] = None - comments.append('Attribute(s) {0} set to be updated:'.format( - attr_names, - )) - comments.append(attributes_diff) + ret['comment'].append( + 'Attribute(s) {0} set to be updated:\n{1}'.format( + attr_names, + attributes_diff, + ) + ) ret['pchanges'] = {'attributes': {'diff': attributes_diff}} - ret['comment'] = '\n'.join(comments) return ret r = __salt__['boto_sqs.set_attributes']( @@ -236,15 +236,15 @@ def present( ) if 'error' in r: ret['result'] = False - comments.append('Failed to set queue attributes: {0}'.format( - str(r['error']), - )) - ret['comment'] = '\n'.join(comments) + ret['comment'].append( + 'Failed to set queue attributes: {0}'.format(r['error']), + ) return ret - comments.append('Updated SQS queue attribute(s) {0}.'.format(attr_names)) + ret['comment'].append( + 'Updated SQS queue attribute(s) {0}.'.format(attr_names), + ) ret['changes']['attributes'] = {'diff': attributes_diff} - ret['comment'] = '\n'.join(comments) return ret @@ -291,7 +291,7 @@ def absent( if not r['result']: ret['comment'] = 'SQS queue {0} does not exist in {1}.'.format( name, - region + region, ) return ret diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index ae48a7e334..6d9315ac77 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -742,18 +742,26 @@ def runner(name, **kwargs): if isinstance(runner_return, dict) and 'Error' in runner_return: out['success'] = False if not out.get('success', True): + cmt = "Runner function '{0}' failed{1}.".format( + name, + ' with return {0}'.format(runner_return) if runner_return else '', + ) ret = { 'name': name, 'result': False, 'changes': {}, - 'comment': runner_return if runner_return else "Runner function '{0}' failed without comment.".format(name) + 'comment': cmt, } else: + cmt = "Runner function '{0}' executed{1}.".format( + name, + ' with return {0}'.format(runner_return) if runner_return else '', + ) ret = { 'name': name, 'result': True, - 'changes': runner_return if runner_return else {}, - 'comment': "Runner function '{0}' executed.".format(name) + 'changes': {}, + 'comment': cmt, } ret['__orchestration__'] = True @@ -802,14 +810,14 @@ def wheel(name, **kwargs): **kwargs) ret['result'] = True - ret['comment'] = "Wheel function '{0}' executed.".format(name) - ret['__orchestration__'] = True if 'jid' in out: ret['__jid__'] = out['jid'] runner_return = out.get('return') - if runner_return: - ret['changes'] = runner_return + ret['comment'] = "Wheel function '{0}' executed{1}.".format( + name, + ' with return {0}'.format(runner_return) if runner_return else '', + ) return ret diff --git a/tests/integration/states/test_npm.py b/tests/integration/states/test_npm.py index 13b002d78d..d93825b593 100644 --- a/tests/integration/states/test_npm.py +++ b/tests/integration/states/test_npm.py @@ -54,7 +54,7 @@ class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): Basic test to determine if NPM module successfully installs multiple packages. ''' - ret = self.run_state('npm.installed', name=None, pkgs=['pm2', 'grunt']) + ret = self.run_state('npm.installed', name='unused', pkgs=['pm2', 'grunt']) self.assertSaltTrueReturn(ret) @skipIf(salt.utils.path.which('npm') and LooseVersion(cmd.run('npm -v')) >= LooseVersion(MAX_NPM_VERSION), @@ -64,5 +64,5 @@ class NpmStateTest(ModuleCase, SaltReturnAssertsMixin): ''' Basic test to determine if NPM successfully cleans its cached packages. ''' - ret = self.run_state('npm.cache_cleaned', name=None, force=True) + ret = self.run_state('npm.cache_cleaned', name='unused', force=True) self.assertSaltTrueReturn(ret) diff --git a/tests/unit/states/test_boto_sqs.py b/tests/unit/states/test_boto_sqs.py index 4c50a5449f..80672e87f4 100644 --- a/tests/unit/states/test_boto_sqs.py +++ b/tests/unit/states/test_boto_sqs.py @@ -62,15 +62,15 @@ class BotoSqsTestCase(TestCase, LoaderModuleMockMixin): 'boto_sqs.create': mock_bool, 'boto_sqs.get_attributes': mock_attr}): with patch.dict(boto_sqs.__opts__, {'test': False}): - comt = 'Failed to create SQS queue {0}: create error'.format( + comt = ['Failed to create SQS queue {0}: create error'.format( name, - ) + )] ret = base_ret.copy() ret.update({'result': False, 'comment': comt}) self.assertDictEqual(boto_sqs.present(name), ret) with patch.dict(boto_sqs.__opts__, {'test': True}): - comt = 'SQS queue {0} is set to be created.'.format(name) + comt = ['SQS queue {0} is set to be created.'.format(name)] ret = base_ret.copy() ret.update({ 'result': None, @@ -85,17 +85,19 @@ class BotoSqsTestCase(TestCase, LoaderModuleMockMixin): -{} +DelaySeconds: 20 ''') - comt = textwrap.dedent('''\ - SQS queue mysqs present. - Attribute(s) DelaySeconds set to be updated: - ''') + diff + comt = [ + 'SQS queue mysqs present.', + 'Attribute(s) DelaySeconds set to be updated:\n{0}'.format( + diff, + ), + ] ret.update({ 'comment': comt, 'pchanges': {'attributes': {'diff': diff}}, }) self.assertDictEqual(boto_sqs.present(name, attributes), ret) - comt = ('SQS queue mysqs present.') + comt = ['SQS queue mysqs present.'] ret = base_ret.copy() ret.update({'result': True, 'comment': comt}) self.assertDictEqual(boto_sqs.present(name), ret) diff --git a/tests/unit/states/test_saltmod.py b/tests/unit/states/test_saltmod.py index ecfd891476..ad8a5d7216 100644 --- a/tests/unit/states/test_saltmod.py +++ b/tests/unit/states/test_saltmod.py @@ -251,8 +251,8 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin): ''' name = 'state' - ret = {'changes': True, 'name': 'state', 'result': True, - 'comment': 'Runner function \'state\' executed.', + ret = {'changes': {}, 'name': 'state', 'result': True, + 'comment': 'Runner function \'state\' executed with return True.', '__orchestration__': True} runner_mock = MagicMock(return_value={'return': True}) @@ -267,8 +267,8 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin): ''' name = 'state' - ret = {'changes': True, 'name': 'state', 'result': True, - 'comment': 'Wheel function \'state\' executed.', + ret = {'changes': {}, 'name': 'state', 'result': True, + 'comment': 'Wheel function \'state\' executed with return True.', '__orchestration__': True} wheel_mock = MagicMock(return_value={'return': True}) diff --git a/tests/unit/test_state.py b/tests/unit/test_state.py index 3d36e70d62..df2cd70259 100644 --- a/tests/unit/test_state.py +++ b/tests/unit/test_state.py @@ -16,8 +16,9 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch from tests.support.mixins import AdaptedConfigurationTestCaseMixin # Import Salt libs -import salt.state import salt.exceptions +from salt.ext import six +import salt.state from salt.utils.odict import OrderedDict, DefaultOrderedDict @@ -472,3 +473,28 @@ class TopFileMergeTestCase(TestCase, AdaptedConfigurationTestCaseMixin): expected_merge = DefaultOrderedDict(OrderedDict) self.assertEqual(merged_tops, expected_merge) + + +class StateReturnsTestCase(TestCase): + ''' + TestCase for code handling state returns. + ''' + + def test_comment_lists_are_converted_to_string(self): + ''' + Tests that states returning a list of comments + have that converted to a single string + ''' + ret = { + 'name': 'myresource', + 'result': True, + 'comment': ['comment 1', 'comment 2'], + 'changes': {}, + } + salt.state.State.verify_ret(ret) # sanity check + with self.assertRaises(salt.exceptions.SaltException): + # Not suitable for export as is + salt.state.State.verify_ret_for_export(ret) + salt.state.State.munge_ret_for_export(ret) + self.assertIsInstance(ret[u'comment'], six.string_types) + salt.state.State.verify_ret_for_export(ret) From f6c16935d897085203d17e3d144ac258ef1c2491 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 5 Sep 2017 15:10:05 -0500 Subject: [PATCH 373/639] Move --showduplicates before repository-packages As of Fedora 26, dnf interprets arguments after "repository-packages" as arguments to the repository-package subcommand, which breaks pkg.list_repo_pkgs on that Fedora release. Moving this argument earlier in the command allows pkg.list_repo_pkgs to work. --- salt/modules/yumpkg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index 14cdf0d899..c675a56608 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -764,8 +764,8 @@ def list_repo_pkgs(*args, **kwargs): _parse_output(out['stdout'], strict=True) else: for repo in repos: - cmd = [_yum(), '--quiet', 'repository-packages', repo, - 'list', '--showduplicates'] + cmd = [_yum(), '--quiet', '--showduplicates', + 'repository-packages', repo, 'list'] # Can't concatenate because args is a tuple, using list.extend() cmd.extend(args) From 433bca14b17d94501a4d94d0a318eeb22a24e63a Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 5 Sep 2017 15:13:22 -0500 Subject: [PATCH 374/639] Fix KeyError in yumpkg configparser code on Python 3 It looks like the configparser in Python 3 does not insert the `__name__` key in each section. Popping it without a default causes a KeyError on Python 3. This commit fixes that KeyError. --- salt/modules/yumpkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index c675a56608..9b2c541953 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -2539,7 +2539,7 @@ def _parse_repo_file(filename): for section in parsed._sections: section_dict = dict(parsed._sections[section]) - section_dict.pop('__name__') + section_dict.pop('__name__', None) config[section] = section_dict # Try to extract leading comments From b09e5b43796caff82d03d53470b35c857ce7be59 Mon Sep 17 00:00:00 2001 From: John Jawed Date: Fri, 1 Sep 2017 18:33:28 -0700 Subject: [PATCH 375/639] Fix #43295, better handling of consul initialization issues --- salt/cache/consul.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/salt/cache/consul.py b/salt/cache/consul.py index b545c96ead..0148c73bf0 100644 --- a/salt/cache/consul.py +++ b/salt/cache/consul.py @@ -4,6 +4,8 @@ Minion data cache plugin for Consul key/value data store. .. versionadded:: 2016.11.2 +:depends: python-consul >= 0.2.0 + It is up to the system administrator to set up and configure the Consul infrastructure. All is needed for this plugin is a working Consul agent with a read-write access to the key-value store. @@ -81,8 +83,11 @@ def __virtual__(): 'verify': __opts__.get('consul.verify', True), } - global api - api = consul.Consul(**consul_kwargs) + try: + global api + api = consul.Consul(**consul_kwargs) + except AttributeError: + return (False, "Failed to invoke consul.Consul, please make sure you have python-consul >= 0.2.0 installed") return __virtualname__ From b93392dfb7b2c94f06d5caa320f2e5e5c19330d8 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 25 Aug 2017 15:43:34 -0400 Subject: [PATCH 376/639] Return a SaltInvocationError when passing incorrect function pattern When calling an incorrectly formatted wheel or runner function, we should be raising a SaltInvocationError with a hint to check function syntax rather that raising an Eauth authentication error. This PR does several things: - Adds a dictionary error return when the function syntax passed through to `utils.minions.CkMinions.spec_check` does not match the expected `module.function` syntax - Handles the return of this new dictionary error (instead of previous `False` return) wherever the spec_check function is called. This is handled up the stack in `master.py` and `masterapi.py`. - Reworks the runner and wheel functions in `master.py` and `masterapi.py` to help make those functions more DRY (see `salt.auth.check_authentication` function). - Adds tests for all of these changes (written before the runner and wheel functions were moved to use the new salt.auth.check_authentication function) to help prevent regressions. - Fixes a couple of places where unit tests exposed potential stacktraces. - Adjusts one previous unit test concerning the dictionary error change from spec_check --- salt/auth/__init__.py | 92 ++++++++++-- salt/daemons/masterapi.py | 110 ++++++++------ salt/master.py | 135 ++++++++--------- salt/utils/minions.py | 6 +- tests/unit/daemons/__init__.py | 1 + tests/unit/daemons/test_masterapi.py | 201 ++++++++++++++++++++++++++ tests/unit/test_master.py | 209 +++++++++++++++++++++++++++ tests/unit/utils/test_minions.py | 4 +- 8 files changed, 633 insertions(+), 125 deletions(-) create mode 100644 tests/unit/daemons/__init__.py create mode 100644 tests/unit/daemons/test_masterapi.py create mode 100644 tests/unit/test_master.py diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 2b91e3d9e1..396f3b7b20 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -293,29 +293,34 @@ class LoadAuth(object): def authenticate_key(self, load, key): ''' Authenticate a user by the key passed in load. - Return the effective user id (name) if it's differ from the specified one (for sudo). - If the effective user id is the same as passed one return True on success or False on + Return the effective user id (name) if it's different from the specified one (for sudo). + If the effective user id is the same as the passed one, return True on success or False on failure. ''' + error_msg = 'Authentication failure of type "user" occurred.' + if 'key' not in load: + log.warning(error_msg) + return False + auth_key = load.pop('key') if not auth_key: - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False if 'user' in load: auth_user = AuthUser(load['user']) if auth_user.is_sudo(): # If someone sudos check to make sure there is no ACL's around their username if auth_key != key[self.opts.get('user', 'root')]: - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False return auth_user.sudo_name() elif load['user'] == self.opts.get('user', 'root') or load['user'] == 'root': if auth_key != key[self.opts.get('user', 'root')]: - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False elif auth_user.is_running_user(): if auth_key != key.get(load['user']): - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False elif auth_key == key.get('root'): pass @@ -323,15 +328,15 @@ class LoadAuth(object): if load['user'] in key: # User is authorised, check key and check perms if auth_key != key[load['user']]: - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False return load['user'] else: - log.warning('Authentication failure of type "user" occurred.') + log.warning(error_msg) return False else: if auth_key != key[salt.utils.get_user()]: - log.warning('Authentication failure of type "other" occurred.') + log.warning(error_msg) return False return True @@ -413,6 +418,64 @@ class LoadAuth(object): return auth_list + def check_authentication(self, load, auth_type, key=None, show_username=False): + ''' + .. versionadded:: Oxygen + + Go through various checks to see if the token/eauth/user can be authenticated. + + Returns a dictionary containing the following keys: + + - auth_list + - username + - error + + If an error is encountered, return immediately with the relevant error dictionary + as authentication has failed. Otherwise, return the username and valid auth_list. + ''' + auth_list = [] + username = load.get('username', 'UNKNOWN') + ret = {'auth_list': auth_list, + 'username': username, + 'error': {}} + + # Authenticate + if auth_type == 'token': + token = self.authenticate_token(load) + if not token: + ret['error'] = {'name': 'TokenAuthenticationError', + 'message': 'Authentication failure of type "token" occurred.'} + return ret + + # Update username for token + username = token['name'] + ret['username'] = username + auth_list = self.get_auth_list(load, token=token) + elif auth_type == 'eauth': + if not self.authenticate_eauth(load): + ret['error'] = {'name': 'EauthAuthenticationError', + 'message': 'Authentication failure of type "eauth" occurred for ' + 'user {0}.'.format(username)} + return ret + + auth_list = self.get_auth_list(load) + elif auth_type == 'user': + if not self.authenticate_key(load, key): + if show_username: + msg = 'Authentication failure of type "user" occurred for user {0}.'.format(username) + else: + msg = 'Authentication failure of type "user" occurred' + ret['error'] = {'name': 'UserAuthenticationError', 'message': msg} + return ret + else: + ret['error'] = {'name': 'SaltInvocationError', + 'message': 'Authentication type not supported.'} + return ret + + # Authentication checks passed + ret['auth_list'] = auth_list + return ret + class Authorize(object): ''' @@ -558,6 +621,15 @@ class Authorize(object): load.get('arg', None), load.get('tgt', None), load.get('tgt_type', 'glob')) + + # Handle possible return of dict data structure from any_auth call to + # avoid a stacktrace. As mentioned in PR #43181, this entire class is + # dead code and is marked for removal in Salt Neon. But until then, we + # should handle the dict return, which is an error and should return + # False until this class is removed. + if isinstance(good, dict): + return False + if not good: # Accept find_job so the CLI will function cleanly if load.get('fun', '') != 'saltutil.find_job': @@ -570,7 +642,7 @@ class Authorize(object): authorization Note: this will check that the user has at least one right that will let - him execute "load", this does not deal with conflicting rules + the user execute "load", this does not deal with conflicting rules ''' adata = self.auth_data diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 3b0438f3b8..c7e18340f0 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -1011,30 +1011,33 @@ class LocalFuncs(object): ''' Send a master control function back to the runner system ''' - if 'token' in load: - auth_type = 'token' - err_name = 'TokenAuthenticationError' - token = self.loadauth.authenticate_token(load) - if not token: - return dict(error=dict(name=err_name, - message='Authentication failure of type "token" occurred.')) - username = token['name'] - auth_list = self.loadauth.get_auth_list(load, token) - else: - auth_type = 'eauth' - err_name = 'EauthAuthenticationError' - username = load.get('username', 'UNKNOWN') - if not self.loadauth.authenticate_eauth(load): - return dict(error=dict(name=err_name, - message=('Authentication failure of type "eauth" occurred ' - 'for user {0}.').format(username))) - auth_list = self.loadauth.get_auth_list(load) + # All runner opts pass through eauth + auth_type, err_name, key = self._prep_auth_info(load) - if not self.ckminions.runner_check(auth_list, load['fun'], load['kwarg']): + # Authenticate + auth_check = self.loadauth.check_authentication(load, auth_type) + error = auth_check.get('error') + + if error: + # Authentication error occurred: do not continue. + return dict(error=error) + + # Authorize + runner_check = self.ckminions.runner_check( + auth_check.get('auth_list', []), + load['fun'], + load['kwarg'] + ) + username = auth_check.get('username') + if not runner_check: return dict(error=dict(name=err_name, message=('Authentication failure of type "{0}" occurred ' 'for user {1}.').format(auth_type, username))) + elif isinstance(runner_check, dict) and 'error' in runner_check: + # A dictionary with an error name/message was handled by ckminions.runner_check + return runner_check + # Authorized. Do the job! try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) @@ -1053,38 +1056,36 @@ class LocalFuncs(object): Send a master control function back to the wheel system ''' # All wheel ops pass through eauth - if 'token' in load: - auth_type = 'token' - err_name = 'TokenAuthenticationError' - token = self.loadauth.authenticate_token(load) - if not token: - return dict(error=dict(name=err_name, - message='Authentication failure of type "token" occurred.')) - username = token['name'] - auth_list = self.loadauth.get_auth_list(load, token) - elif 'eauth' in load: - auth_type = 'eauth' - err_name = 'EauthAuthenticationError' - username = load.get('username', 'UNKNOWN') - if not self.loadauth.authenticate_eauth(load): - return dict(error=dict(name=err_name, - message=('Authentication failure of type "eauth" occurred for ' - 'user {0}.').format(username))) - auth_list = self.loadauth.get_auth_list(load) - else: - auth_type = 'user' - err_name = 'UserAuthenticationError' - username = load.get('username', 'UNKNOWN') - if not self.loadauth.authenticate_key(load, self.key): - return dict(error=dict(name=err_name, - message=('Authentication failure of type "user" occurred for ' - 'user {0}.').format(username))) + auth_type, err_name, key = self._prep_auth_info(load) + # Authenticate + auth_check = self.loadauth.check_authentication( + load, + auth_type, + key=key, + show_username=True + ) + error = auth_check.get('error') + + if error: + # Authentication error occurred: do not continue. + return dict(error=error) + + # Authorize + username = auth_check.get('username') if auth_type != 'user': - if not self.ckminions.wheel_check(auth_list, load['fun'], load['kwarg']): + wheel_check = self.ckminions.wheel_check( + auth_check.get('auth_list', []), + load['fun'], + load['kwarg'] + ) + if not wheel_check: return dict(error=dict(name=err_name, message=('Authentication failure of type "{0}" occurred for ' 'user {1}.').format(auth_type, username))) + elif isinstance(wheel_check, dict) and 'error' in wheel_check: + # A dictionary with an error name/message was handled by ckminions.wheel_check + return wheel_check # Authenticated. Do the job. jid = salt.utils.jid.gen_jid() @@ -1104,7 +1105,7 @@ class LocalFuncs(object): 'data': data} except Exception as exc: log.error('Exception occurred while ' - 'introspecting {0}: {1}'.format(fun, exc)) + 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, @@ -1368,3 +1369,18 @@ class LocalFuncs(object): }, 'pub': pub_load } + + def _prep_auth_info(self, load): + key = None + if 'token' in load: + auth_type = 'token' + err_name = 'TokenAuthenticationError' + elif 'eauth' in load: + auth_type = 'eauth' + err_name = 'EauthAuthenticationError' + else: + auth_type = 'user' + err_name = 'UserAuthenticationError' + key = self.key + + return auth_type, err_name, key diff --git a/salt/master.py b/salt/master.py index 9869ef37ed..15d329bd77 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1667,44 +1667,36 @@ class ClearFuncs(object): Send a master control function back to the runner system ''' # All runner ops pass through eauth - if u'token' in clear_load: - # Authenticate - token = self.loadauth.authenticate_token(clear_load) + auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) - if not token: - return dict(error=dict(name=u'TokenAuthenticationError', - message=u'Authentication failure of type "token" occurred.')) + # Authenticate + auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) + error = auth_check.get('error') - # Authorize - auth_list = self.loadauth.get_auth_list(clear_load, token) + if error: + # Authentication error occurred: do not continue. + return dict(error=error) - if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})): - return dict(error=dict(name=u'TokenAuthenticationError', - message=(u'Authentication failure of type "token" occurred for ' - u'user {0}.').format(token[u'name']))) - clear_load.pop(u'token') - username = token[u'name'] - elif u'eauth' in clear_load: - if not self.loadauth.authenticate_eauth(clear_load): - return dict(error=dict(name=u'EauthAuthenticationError', - message=(u'Authentication failure of type "eauth" occurred for ' - u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) + # Authorize + username = auth_check.get('username') + if auth_type != u'user': + runner_check = self.ckminions.runner_check( + auth_check.get('auth_list', []), + clear_load[u'fun'], + clear_load.get(u'kwarg', {}) + ) + if not runner_check: + return dict(error=dict(name=err_name, + message=(u'Authentication failure of type "{0}" occurred for ' + u'user {1}.').format(auth_type, username))) + elif isinstance(runner_check, dict) and u'error' in runner_check: + # A dictionary with an error name/message was handled by ckminions.runner_check + return runner_check - auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.runner_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})): - return dict(error=dict(name=u'EauthAuthenticationError', - message=(u'Authentication failure of type "eauth" occurred for ' - u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) - - # No error occurred, consume the password from the clear_load if - # passed - username = clear_load.pop(u'username', u'UNKNOWN') - clear_load.pop(u'password', None) + # No error occurred, consume sensitive settings from the clear_load if passed. + for item in sensitive_load_keys: + clear_load.pop(item, None) else: - if not self.loadauth.authenticate_key(clear_load, self.key): - return dict(error=dict(name=u'UserAuthenticationError', - message=u'Authentication failure of type "user" occurred')) - if u'user' in clear_load: username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): @@ -1730,43 +1722,36 @@ class ClearFuncs(object): Send a master control function back to the wheel system ''' # All wheel ops pass through eauth - username = None - if u'token' in clear_load: - # Authenticate - token = self.loadauth.authenticate_token(clear_load) - if not token: - return dict(error=dict(name=u'TokenAuthenticationError', - message=u'Authentication failure of type "token" occurred.')) + auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) - # Authorize - auth_list = self.loadauth.get_auth_list(clear_load, token) - if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})): - return dict(error=dict(name=u'TokenAuthenticationError', - message=(u'Authentication failure of type "token" occurred for ' - u'user {0}.').format(token[u'name']))) - clear_load.pop(u'token') - username = token[u'name'] - elif u'eauth' in clear_load: - if not self.loadauth.authenticate_eauth(clear_load): - return dict(error=dict(name=u'EauthAuthenticationError', - message=(u'Authentication failure of type "eauth" occurred for ' - u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) + # Authenticate + auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) + error = auth_check.get('error') - auth_list = self.loadauth.get_auth_list(clear_load) - if not self.ckminions.wheel_check(auth_list, clear_load[u'fun'], clear_load.get(u'kwarg', {})): - return dict(error=dict(name=u'EauthAuthenticationError', - message=(u'Authentication failure of type "eauth" occurred for ' - u'user {0}.').format(clear_load.get(u'username', u'UNKNOWN')))) + if error: + # Authentication error occurred: do not continue. + return dict(error=error) - # No error occurred, consume the password from the clear_load if - # passed - clear_load.pop(u'password', None) - username = clear_load.pop(u'username', u'UNKNOWN') + # Authorize + username = auth_check.get('username') + if auth_type != u'user': + wheel_check = self.ckminions.wheel_check( + auth_check.get('auth_list', []), + clear_load[u'fun'], + clear_load.get(u'kwarg', {}) + ) + if not wheel_check: + return dict(error=dict(name=err_name, + message=(u'Authentication failure of type "{0}" occurred for ' + u'user {1}.').format(auth_type, username))) + elif isinstance(wheel_check, dict) and u'error' in wheel_check: + # A dictionary with an error name/message was handled by ckminions.wheel_check + return wheel_check + + # No error occurred, consume sensitive settings from the clear_load if passed. + for item in sensitive_load_keys: + clear_load.pop(item, None) else: - if not self.loadauth.authenticate_key(clear_load, self.key): - return dict(error=dict(name=u'UserAuthenticationError', - message=u'Authentication failure of type "user" occurred')) - if u'user' in clear_load: username = clear_load[u'user'] if salt.auth.AuthUser(username).is_sudo(): @@ -1959,6 +1944,24 @@ class ClearFuncs(object): } } + def _prep_auth_info(self, clear_load): + sensitive_load_keys = [] + key = None + if u'token' in clear_load: + auth_type = u'token' + err_name = u'TokenAuthenticationError' + sensitive_load_keys = [u'token'] + elif u'eauth' in clear_load: + auth_type = u'eauth' + err_name = u'EauthAuthenticationError' + sensitive_load_keys = [u'username', u'password'] + else: + auth_type = u'user' + err_name = u'UserAuthenticationError' + key = self.key + + return auth_type, err_name, key, sensitive_load_keys + def _prep_jid(self, clear_load, extra): ''' Return a jid for this publication diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 43f875dc24..ff3d0cbcc7 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -956,7 +956,11 @@ class CkMinions(object): if form != 'cloud': comps = fun.split('.') if len(comps) != 2: - return False + # Hint at a syntax error when command is passed improperly, + # rather than returning an authentication error of some kind. + # See Issue #21969 for more information. + return {'error': {'name': 'SaltInvocationError', + 'message': 'A command invocation error occurred: Check syntax.'}} mod_name = comps[0] fun_name = comps[1] else: diff --git a/tests/unit/daemons/__init__.py b/tests/unit/daemons/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/tests/unit/daemons/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/unit/daemons/test_masterapi.py b/tests/unit/daemons/test_masterapi.py new file mode 100644 index 0000000000..29ea37ecd4 --- /dev/null +++ b/tests/unit/daemons/test_masterapi.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +import salt.config +import salt.daemons.masterapi as masterapi + +# Import Salt Testing Libs +from tests.support.unit import TestCase +from tests.support.mock import ( + patch, + MagicMock, +) + + +class LocalFuncsTestCase(TestCase): + ''' + TestCase for salt.daemons.masterapi.LocalFuncs class + ''' + + def setUp(self): + opts = salt.config.master_config(None) + self.local_funcs = masterapi.LocalFuncs(opts, 'test-key') + + def test_runner_token_not_authenticated(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred.'}} + ret = self.local_funcs.runner({u'token': u'asdfasdfasdfasdf'}) + self.assertDictEqual(mock_ret, ret) + + def test_runner_token_authorization_error(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token authenticates, but is + not authorized. + ''' + token = u'asdfasdfasdfasdf' + load = {u'token': token, u'fun': u'test.arg', u'kwarg': {}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred ' + u'for user test.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.runner(load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_token_salt_invocation_error(self): + ''' + Asserts that a SaltInvocationError is returned when the token authenticates, but the + command is malformed. + ''' + token = u'asdfasdfasdfasdf' + load = {u'token': token, u'fun': u'badtestarg', u'kwarg': {}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.runner(load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_not_authenticated(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user UNKNOWN.'}} + ret = self.local_funcs.runner({u'eauth': u'foo'}) + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_authorization_error(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but is + not authorized. + ''' + load = {u'eauth': u'foo', u'username': u'test', u'fun': u'test.arg', u'kwarg': {}} + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user test.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.runner(load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_salt_invocation_errpr(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but the + command is malformed. + ''' + load = {u'eauth': u'foo', u'username': u'test', u'fun': u'bad.test.arg.func', u'kwarg': {}} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.runner(load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_not_authenticated(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred.'}} + ret = self.local_funcs.wheel({u'token': u'asdfasdfasdfasdf'}) + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_authorization_error(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token authenticates, but is + not authorized. + ''' + token = u'asdfasdfasdfasdf' + load = {u'token': token, u'fun': u'test.arg', u'kwarg': {}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred ' + u'for user test.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.wheel(load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_salt_invocation_error(self): + ''' + Asserts that a SaltInvocationError is returned when the token authenticates, but the + command is malformed. + ''' + token = u'asdfasdfasdfasdf' + load = {u'token': token, u'fun': u'badtestarg', u'kwarg': {}} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.wheel(load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_not_authenticated(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user UNKNOWN.'}} + ret = self.local_funcs.wheel({u'eauth': u'foo'}) + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_authorization_error(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but is + not authorized. + ''' + load = {u'eauth': u'foo', u'username': u'test', u'fun': u'test.arg', u'kwarg': {}} + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user test.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.wheel(load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_salt_invocation_errpr(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but the + command is malformed. + ''' + load = {u'eauth': u'foo', u'username': u'test', u'fun': u'bad.test.arg.func', u'kwarg': {}} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.local_funcs.wheel(load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_user_not_authenticated(self): + ''' + Asserts that an UserAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'UserAuthenticationError', + u'message': u'Authentication failure of type "user" occurred for ' + u'user UNKNOWN.'}} + ret = self.local_funcs.wheel({}) + self.assertDictEqual(mock_ret, ret) diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py new file mode 100644 index 0000000000..c663d2c45c --- /dev/null +++ b/tests/unit/test_master.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +import salt.config +import salt.master + +# Import Salt Testing Libs +from tests.support.unit import TestCase +from tests.support.mock import ( + patch, + MagicMock, +) + + +class ClearFuncsTestCase(TestCase): + ''' + TestCase for salt.master.ClearFuncs class + ''' + + def setUp(self): + opts = salt.config.master_config(None) + self.clear_funcs = salt.master.ClearFuncs(opts, {}) + + def test_runner_token_not_authenticated(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred.'}} + ret = self.clear_funcs.runner({u'token': u'asdfasdfasdfasdf'}) + self.assertDictEqual(mock_ret, ret) + + def test_runner_token_authorization_error(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token authenticates, but is + not authorized. + ''' + token = u'asdfasdfasdfasdf' + clear_load = {u'token': token, u'fun': u'test.arg'} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred ' + u'for user test.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.runner(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_token_salt_invocation_error(self): + ''' + Asserts that a SaltInvocationError is returned when the token authenticates, but the + command is malformed. + ''' + token = u'asdfasdfasdfasdf' + clear_load = {u'token': token, u'fun': u'badtestarg'} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.runner(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_not_authenticated(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user UNKNOWN.'}} + ret = self.clear_funcs.runner({u'eauth': u'foo'}) + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_authorization_error(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but is + not authorized. + ''' + clear_load = {u'eauth': u'foo', u'username': u'test', u'fun': u'test.arg'} + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user test.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.runner(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_eauth_salt_invocation_errpr(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but the + command is malformed. + ''' + clear_load = {u'eauth': u'foo', u'username': u'test', u'fun': u'bad.test.arg.func'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.runner(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_runner_user_not_authenticated(self): + ''' + Asserts that an UserAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'UserAuthenticationError', + u'message': u'Authentication failure of type "user" occurred'}} + ret = self.clear_funcs.runner({}) + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_not_authenticated(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred.'}} + ret = self.clear_funcs.wheel({u'token': u'asdfasdfasdfasdf'}) + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_authorization_error(self): + ''' + Asserts that a TokenAuthenticationError is returned when the token authenticates, but is + not authorized. + ''' + token = u'asdfasdfasdfasdf' + clear_load = {u'token': token, u'fun': u'test.arg'} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'TokenAuthenticationError', + u'message': u'Authentication failure of type "token" occurred ' + u'for user test.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.wheel(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_token_salt_invocation_error(self): + ''' + Asserts that a SaltInvocationError is returned when the token authenticates, but the + command is malformed. + ''' + token = u'asdfasdfasdfasdf' + clear_load = {u'token': token, u'fun': u'badtestarg'} + mock_token = {u'token': token, u'eauth': u'foo', u'name': u'test'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + + with patch('salt.auth.LoadAuth.authenticate_token', MagicMock(return_value=mock_token)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.wheel(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_not_authenticated(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user UNKNOWN.'}} + ret = self.clear_funcs.wheel({u'eauth': u'foo'}) + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_authorization_error(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but is + not authorized. + ''' + clear_load = {u'eauth': u'foo', u'username': u'test', u'fun': u'test.arg'} + mock_ret = {u'error': {u'name': u'EauthAuthenticationError', + u'message': u'Authentication failure of type "eauth" occurred for ' + u'user test.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.wheel(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_eauth_salt_invocation_errpr(self): + ''' + Asserts that an EauthAuthenticationError is returned when the user authenticates, but the + command is malformed. + ''' + clear_load = {u'eauth': u'foo', u'username': u'test', u'fun': u'bad.test.arg.func'} + mock_ret = {u'error': {u'name': u'SaltInvocationError', + u'message': u'A command invocation error occurred: Check syntax.'}} + with patch('salt.auth.LoadAuth.authenticate_eauth', MagicMock(return_value=True)), \ + patch('salt.auth.LoadAuth.get_auth_list', MagicMock(return_value=[])): + ret = self.clear_funcs.wheel(clear_load) + + self.assertDictEqual(mock_ret, ret) + + def test_wheel_user_not_authenticated(self): + ''' + Asserts that an UserAuthenticationError is returned when the user can't authenticate. + ''' + mock_ret = {u'error': {u'name': u'UserAuthenticationError', + u'message': u'Authentication failure of type "user" occurred'}} + ret = self.clear_funcs.wheel({}) + self.assertDictEqual(mock_ret, ret) diff --git a/tests/unit/utils/test_minions.py b/tests/unit/utils/test_minions.py index d6e4df652b..fbec4e2c8b 100644 --- a/tests/unit/utils/test_minions.py +++ b/tests/unit/utils/test_minions.py @@ -57,7 +57,9 @@ class CkMinionsTestCase(TestCase): ret = self.ckminions.spec_check(auth_list, 'test.arg', {}, 'wheel') self.assertFalse(ret) ret = self.ckminions.spec_check(auth_list, 'testarg', {}, 'runner') - self.assertFalse(ret) + mock_ret = {'error': {'name': 'SaltInvocationError', + 'message': 'A command invocation error occurred: Check syntax.'}} + self.assertDictEqual(mock_ret, ret) # Test spec in plural form auth_list = ['@runners'] From ca0b41f74a6b34a290004dfc7aa7c8ec14ce37c5 Mon Sep 17 00:00:00 2001 From: Colton Myers Date: Tue, 5 Sep 2017 15:35:06 -0600 Subject: [PATCH 377/639] Document lack of support for multiple default gateways --- salt/grains/core.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index a9f2f4e19d..be3f318863 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2434,6 +2434,9 @@ def default_gateway(): If the `ip` command is unavailable, no grains will be populated. + Currently does not support multiple default gateways. The grains will be + set to the first default gateway found. + List of grains: ip4_gw: True # ip/True/False if default ipv4 gateway @@ -2463,5 +2466,3 @@ def default_gateway(): except Exception as exc: pass return grains - -# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 From 79c4ff77412e8ef8f04dd7ac3403f78ba68b7562 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 5 Sep 2017 17:46:40 -0400 Subject: [PATCH 378/639] Simplify check for auth_key in auth.authenticate_key --- salt/auth/__init__.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 396f3b7b20..ee9cf88c5a 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -298,14 +298,11 @@ class LoadAuth(object): failure. ''' error_msg = 'Authentication failure of type "user" occurred.' - if 'key' not in load: + auth_key = load.pop('key', None) + if auth_key is None: log.warning(error_msg) return False - auth_key = load.pop('key') - if not auth_key: - log.warning(error_msg) - return False if 'user' in load: auth_user = AuthUser(load['user']) if auth_user.is_sudo(): From f438e707b89663a60c53847ea73e6d4282edeb48 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 5 Sep 2017 18:07:03 -0400 Subject: [PATCH 379/639] Use `{}` instead of `dict()` in error returns --- salt/daemons/masterapi.py | 24 ++++++++++++------------ salt/master.py | 22 +++++++++++----------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index c7e18340f0..7b2163bd0b 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -1020,7 +1020,7 @@ class LocalFuncs(object): if error: # Authentication error occurred: do not continue. - return dict(error=error) + return {'error': error} # Authorize runner_check = self.ckminions.runner_check( @@ -1030,9 +1030,9 @@ class LocalFuncs(object): ) username = auth_check.get('username') if not runner_check: - return dict(error=dict(name=err_name, - message=('Authentication failure of type "{0}" occurred ' - 'for user {1}.').format(auth_type, username))) + return {'error': {'name': err_name, + 'message': 'Authentication failure of type "{0}" occurred ' + 'for user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and 'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check @@ -1046,10 +1046,10 @@ class LocalFuncs(object): username) except Exception as exc: log.error('Exception occurred while ' - 'introspecting {0}: {1}'.format(fun, exc)) - return dict(error=dict(name=exc.__class__.__name__, - args=exc.args, - message=str(exc))) + 'introspecting {0}: {1}'.format(fun, exc)) + return {'error': {'name': exc.__class__.__name__, + 'args': exc.args, + 'message': str(exc)}} def wheel(self, load): ''' @@ -1069,7 +1069,7 @@ class LocalFuncs(object): if error: # Authentication error occurred: do not continue. - return dict(error=error) + return {'error': error} # Authorize username = auth_check.get('username') @@ -1080,9 +1080,9 @@ class LocalFuncs(object): load['kwarg'] ) if not wheel_check: - return dict(error=dict(name=err_name, - message=('Authentication failure of type "{0}" occurred for ' - 'user {1}.').format(auth_type, username))) + return {'error': {'name': err_name, + 'message': 'Authentication failure of type "{0}" occurred for ' + 'user {1}.'.format(auth_type, username)}} elif isinstance(wheel_check, dict) and 'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check diff --git a/salt/master.py b/salt/master.py index 15d329bd77..8f821a9fd0 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1675,7 +1675,7 @@ class ClearFuncs(object): if error: # Authentication error occurred: do not continue. - return dict(error=error) + return {'error': error} # Authorize username = auth_check.get('username') @@ -1686,9 +1686,9 @@ class ClearFuncs(object): clear_load.get(u'kwarg', {}) ) if not runner_check: - return dict(error=dict(name=err_name, - message=(u'Authentication failure of type "{0}" occurred for ' - u'user {1}.').format(auth_type, username))) + return {'error': {'name': err_name, + 'message': u'Authentication failure of type "{0}" occurred for ' + u'user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and u'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check @@ -1713,9 +1713,9 @@ class ClearFuncs(object): username) except Exception as exc: log.error(u'Exception occurred while introspecting %s: %s', fun, exc) - return dict(error=dict(name=exc.__class__.__name__, - args=exc.args, - message=str(exc))) + return {'error': {'name': exc.__class__.__name__, + 'args': exc.args, + 'message': str(exc)}} def wheel(self, clear_load): ''' @@ -1730,7 +1730,7 @@ class ClearFuncs(object): if error: # Authentication error occurred: do not continue. - return dict(error=error) + return {'error': error} # Authorize username = auth_check.get('username') @@ -1741,9 +1741,9 @@ class ClearFuncs(object): clear_load.get(u'kwarg', {}) ) if not wheel_check: - return dict(error=dict(name=err_name, - message=(u'Authentication failure of type "{0}" occurred for ' - u'user {1}.').format(auth_type, username))) + return {'error': {'name': err_name, + 'message': u'Authentication failure of type "{0}" occurred for ' + u'user {1}.'.format(auth_type, username)}} elif isinstance(wheel_check, dict) and u'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check From b4c0516cd5d370f60a682a64d2a3802adaea9c13 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 5 Sep 2017 18:14:22 -0400 Subject: [PATCH 380/639] Change strings in master.py to have `u''` --- salt/master.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/salt/master.py b/salt/master.py index 8f821a9fd0..61d0fb15ea 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1671,23 +1671,23 @@ class ClearFuncs(object): # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) - error = auth_check.get('error') + error = auth_check.get(u'error') if error: # Authentication error occurred: do not continue. - return {'error': error} + return {u'error': error} # Authorize - username = auth_check.get('username') + username = auth_check.get(u'username') if auth_type != u'user': runner_check = self.ckminions.runner_check( - auth_check.get('auth_list', []), + auth_check.get(u'auth_list', []), clear_load[u'fun'], clear_load.get(u'kwarg', {}) ) if not runner_check: - return {'error': {'name': err_name, - 'message': u'Authentication failure of type "{0}" occurred for ' + return {u'error': {u'name': err_name, + u'message': u'Authentication failure of type "{0}" occurred for ' u'user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and u'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check @@ -1713,9 +1713,9 @@ class ClearFuncs(object): username) except Exception as exc: log.error(u'Exception occurred while introspecting %s: %s', fun, exc) - return {'error': {'name': exc.__class__.__name__, - 'args': exc.args, - 'message': str(exc)}} + return {u'error': {u'name': exc.__class__.__name__, + u'args': exc.args, + u'message': str(exc)}} def wheel(self, clear_load): ''' @@ -1726,24 +1726,24 @@ class ClearFuncs(object): # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) - error = auth_check.get('error') + error = auth_check.get(u'error') if error: # Authentication error occurred: do not continue. - return {'error': error} + return {u'error': error} # Authorize - username = auth_check.get('username') + username = auth_check.get(u'username') if auth_type != u'user': wheel_check = self.ckminions.wheel_check( - auth_check.get('auth_list', []), + auth_check.get(u'auth_list', []), clear_load[u'fun'], clear_load.get(u'kwarg', {}) ) if not wheel_check: - return {'error': {'name': err_name, - 'message': u'Authentication failure of type "{0}" occurred for ' - u'user {1}.'.format(auth_type, username)}} + return {u'error': {u'name': err_name, + u'message': u'Authentication failure of type "{0}" occurred for ' + u'user {1}.'.format(auth_type, username)}} elif isinstance(wheel_check, dict) and u'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check From 85997391f1cd9f15280ebdc6d50f343ab84c6266 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 5 Sep 2017 16:32:43 -0600 Subject: [PATCH 381/639] Is this handled the same on Linux and Windows --- salt/modules/ini_manage.py | 9 ++- tests/unit/modules/test_ini_manage.py | 112 ++++++++++++++------------ 2 files changed, 64 insertions(+), 57 deletions(-) diff --git a/salt/modules/ini_manage.py b/salt/modules/ini_manage.py index c553c97992..53ff8488e0 100644 --- a/salt/modules/ini_manage.py +++ b/salt/modules/ini_manage.py @@ -318,17 +318,18 @@ class _Section(OrderedDict): yield '{0}[{1}]{0}'.format(os.linesep, self.name) sections_dict = OrderedDict() for name, value in six.iteritems(self): + # Handle Comment Lines if com_regx.match(name): yield '{0}{1}'.format(value, os.linesep) + # Handle Sections elif isinstance(value, _Section): sections_dict.update({name: value}) + # Key / Value pairs + # Adds spaces between the separator else: yield '{0}{1}{2}{3}'.format( name, - ( - ' {0} '.format(self.sep) if self.sep != ' ' - else self.sep - ), + ' {0} '.format(self.sep) if self.sep != ' ' else self.sep, value, os.linesep ) diff --git a/tests/unit/modules/test_ini_manage.py b/tests/unit/modules/test_ini_manage.py index 6edce9b602..01f9a481d8 100644 --- a/tests/unit/modules/test_ini_manage.py +++ b/tests/unit/modules/test_ini_manage.py @@ -15,38 +15,42 @@ import salt.modules.ini_manage as ini class IniManageTestCase(TestCase): - TEST_FILE_CONTENT = '''\ -# Comment on the first line - -# First main option -option1=main1 - -# Second main option -option2=main2 - - -[main] -# Another comment -test1=value 1 - -test2=value 2 - -[SectionB] -test1=value 1B - -# Blank line should be above -test3 = value 3B - -[SectionC] -# The following option is empty -empty_option= -''' + TEST_FILE_CONTENT = os.linesep.join([ + '# Comment on the first line', + '', + '# First main option', + 'option1 = main1', + '', + '# Second main option', + 'option2 = main2', + '', + '', + '[main]', + '# Another comment', + 'test1 = value 1', + '', + 'test2 = value 2', + '', + '[SectionB]', + 'test1 = value 1B', + '', + '# Blank line should be above', + 'test3 = value 3B', + '', + '[SectionC]', + '# The following option is empty', + 'empty_option =' + ]) + print('*' * 68) + print('original') + print(repr(salt.utils.to_bytes(TEST_FILE_CONTENT))) + print('*' * 68) maxDiff = None def setUp(self): - self.tfile = tempfile.NamedTemporaryFile(delete=False, mode='w+') - self.tfile.write(self.TEST_FILE_CONTENT) + self.tfile = tempfile.NamedTemporaryFile(delete=False, mode='w+b') + self.tfile.write(salt.utils.to_bytes(self.TEST_FILE_CONTENT)) self.tfile.close() def tearDown(self): @@ -128,33 +132,35 @@ empty_option= ini.set_option(self.tfile.name, { 'SectionB': {'test3': 'new value 3B'}, }) + expected = os.linesep.join([ + '# Comment on the first line', + '', + '# First main option', + 'option1 = main1', + '', + '# Second main option', + 'option2 = main2', + '', + '[main]', + '# Another comment', + 'test1 = value 1', + '', + 'test2 = value 2', + '', + '[SectionB]', + 'test1 = value 1B', + '', + '# Blank line should be above', + 'test3 = new value 3B', + '', + '[SectionC]', + '# The following option is empty', + 'empty_option = ', + '' + ]) with salt.utils.fopen(self.tfile.name, 'r') as fp: file_content = fp.read() - self.assertEqual('''\ -# Comment on the first line - -# First main option -option1 = main1 - -# Second main option -option2 = main2 - -[main] -# Another comment -test1 = value 1 - -test2 = value 2 - -[SectionB] -test1 = value 1B - -# Blank line should be above -test3 = new value 3B - -[SectionC] -# The following option is empty -empty_option = -''', file_content) + self.assertEqual(expected, file_content) def test_empty_lines_preserved_after_multiple_edits(self): ini.set_option(self.tfile.name, { From 79cd3831ae332956560c9ecc5e466f829d5a1549 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 5 Sep 2017 16:44:12 -0600 Subject: [PATCH 382/639] Fix empty value preserved test --- tests/unit/modules/test_ini_manage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_ini_manage.py b/tests/unit/modules/test_ini_manage.py index 01f9a481d8..e5fb9cb669 100644 --- a/tests/unit/modules/test_ini_manage.py +++ b/tests/unit/modules/test_ini_manage.py @@ -125,8 +125,8 @@ class IniManageTestCase(TestCase): }) with salt.utils.fopen(self.tfile.name, 'r') as fp: file_content = fp.read() - self.assertIn('\nempty_option = \n', file_content, - 'empty_option was not preserved') + expected = '{0}{1}{0}'.format(os.linesep, 'empty_option = ') + self.assertIn(expected, file_content, 'empty_option was not preserved') def test_empty_lines_preserved_after_edit(self): ini.set_option(self.tfile.name, { From 6263bc89836fe813ac2f859d2ccbd7b35ae5af1f Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 5 Sep 2017 16:48:58 -0600 Subject: [PATCH 383/639] Remove print statement --- tests/unit/modules/test_ini_manage.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/unit/modules/test_ini_manage.py b/tests/unit/modules/test_ini_manage.py index e5fb9cb669..56e3670940 100644 --- a/tests/unit/modules/test_ini_manage.py +++ b/tests/unit/modules/test_ini_manage.py @@ -41,10 +41,6 @@ class IniManageTestCase(TestCase): '# The following option is empty', 'empty_option =' ]) - print('*' * 68) - print('original') - print(repr(salt.utils.to_bytes(TEST_FILE_CONTENT))) - print('*' * 68) maxDiff = None From a94319a082e89be87ee326f41570dfa66a3a4f5e Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 5 Sep 2017 17:03:50 -0600 Subject: [PATCH 384/639] Make sure formatting of TEST_FILE_CONTENT matches original --- tests/unit/modules/test_ini_manage.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/unit/modules/test_ini_manage.py b/tests/unit/modules/test_ini_manage.py index 56e3670940..2398c85959 100644 --- a/tests/unit/modules/test_ini_manage.py +++ b/tests/unit/modules/test_ini_manage.py @@ -19,27 +19,27 @@ class IniManageTestCase(TestCase): '# Comment on the first line', '', '# First main option', - 'option1 = main1', + 'option1=main1', '', '# Second main option', - 'option2 = main2', + 'option2=main2', '', '', '[main]', '# Another comment', - 'test1 = value 1', + 'test1=value 1', '', - 'test2 = value 2', + 'test2=value 2', '', '[SectionB]', - 'test1 = value 1B', + 'test1=value 1B', '', '# Blank line should be above', 'test3 = value 3B', '', '[SectionC]', '# The following option is empty', - 'empty_option =' + 'empty_option=' ]) maxDiff = None From 6aefa5c3f8d706e5bebb1d3901fa8ef5ecb5104f Mon Sep 17 00:00:00 2001 From: Andreas Thienemann Date: Wed, 6 Sep 2017 03:46:00 +0200 Subject: [PATCH 385/639] Update unit test to provide osmajorrelease grain. --- tests/unit/modules/test_rh_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_rh_ip.py b/tests/unit/modules/test_rh_ip.py index 08b3367a73..f115ccf5ba 100644 --- a/tests/unit/modules/test_rh_ip.py +++ b/tests/unit/modules/test_rh_ip.py @@ -58,7 +58,7 @@ class RhipTestCase(TestCase, LoaderModuleMockMixin): ''' Test to build an interface script for a network interface. ''' - with patch.dict(rh_ip.__grains__, {'os': 'Fedora'}): + with patch.dict(rh_ip.__grains__, {'os': 'Fedora', 'osmajorrelease': 26}): with patch.object(rh_ip, '_raise_error_iface', return_value=None): self.assertRaises(AttributeError, From 92de2bb498e8da427879c4eff0bfbca8ae6f58c6 Mon Sep 17 00:00:00 2001 From: Damon Atkins Date: Wed, 6 Sep 2017 12:31:51 +1000 Subject: [PATCH 386/639] Update doco --- doc/topics/windows/windows-package-manager.rst | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/doc/topics/windows/windows-package-manager.rst b/doc/topics/windows/windows-package-manager.rst index 063c8b44eb..cea071e888 100644 --- a/doc/topics/windows/windows-package-manager.rst +++ b/doc/topics/windows/windows-package-manager.rst @@ -480,11 +480,17 @@ Alternatively the ``uninstaller`` can also simply repeat the URL of the msi file :param bool allusers: This parameter is specific to `.msi` installations. It tells `msiexec` to install the software for all users. The default is True. -:param bool cache_dir: If true, the entire directory where the installer resides - will be recursively cached. This is useful for installers that depend on - other files in the same directory for installation. +:param bool cache_dir: If true when installer URL begins with salt://, the + entire directory where the installer resides will be recursively cached. + This is useful for installers that depend on other files in the same + directory for installation. -.. note:: Only applies to salt: installer URLs. +:param str cache_file: + When installer URL begins with salt://, this indicates single file to copy + down for use with the installer. Copied to the same location as the + installer. Use this over ``cache_dir`` if there are many files in the + directory and you only need a specific file and don't want to cache + additional files that may reside in the installer directory. Here's an example for a software package that has dependent files: From 43d8c65bf4dd331f6ce91a7a670e611f4aff98cd Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 21 Aug 2017 16:44:50 -0700 Subject: [PATCH 387/639] Swapping slack engine over to using a class for easier passing around for bits. General cleanup and fixing of messages when things go wrong. --- salt/engines/slack.py | 1096 +++++++++++++++++++++-------------------- 1 file changed, 558 insertions(+), 538 deletions(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index c3da20f683..e57f6fc6f4 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -92,6 +92,7 @@ In addition, other groups are being loaded from pillars. # Import python libraries from __future__ import absolute_import +import datetime import json import itertools import logging @@ -129,600 +130,618 @@ def __virtual__(): return __virtualname__ -def get_slack_users(token): - ''' - Get all users from Slack - ''' +class SlackClient(object): + def __init__(self, token): + self.master_minion = salt.minion.MasterMinion(__opts__) - ret = salt.utils.slack.query(function='users', - api_key=token, - opts=__opts__) - users = {} - if 'message' in ret: - for item in ret['message']: - if 'is_bot' in item: - if not item['is_bot']: - users[item['name']] = item['id'] - users[item['id']] = item['name'] - return users + self.sc = slackclient.SlackClient(token) + self.slack_connect = self.sc.rtm_connect() + def get_slack_users(self, token): + ''' + Get all users from Slack + ''' -def get_slack_channels(token): - ''' - Get all channel names from Slack - ''' + ret = salt.utils.slack.query(function='users', + api_key=token, + opts=__opts__) + users = {} + if 'message' in ret: + for item in ret['message']: + if 'is_bot' in item: + if not item['is_bot']: + users[item['name']] = item['id'] + users[item['id']] = item['name'] + return users - ret = salt.utils.slack.query( - function='rooms', - api_key=token, - # These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged - opts={ - 'exclude_archived': True, - 'exclude_members': True - }) - channels = {} - if 'message' in ret: - for item in ret['message']: - channels[item["id"]] = item["name"] - return channels + def get_slack_channels(self, token): + ''' + Get all channel names from Slack + ''' + ret = salt.utils.slack.query( + function='rooms', + api_key=token, + # These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged + opts={ + 'exclude_archived': True, + 'exclude_members': True + }) + channels = {} + if 'message' in ret: + for item in ret['message']: + channels[item["id"]] = item["name"] + return channels -def get_config_groups(groups_conf, groups_pillar_name): - """ - get info from groups in config, and from the named pillar + def get_config_groups(self, groups_conf, groups_pillar_name): + """ + get info from groups in config, and from the named pillar - todo: add specification for the minion to use to recover pillar - """ - # Get groups - # Default to returning something that'll never match - ret_groups = { - "default": { - "users": set(), - "commands": set(), - "aliases": dict(), - "default_target": dict(), - "targets": dict() + todo: add specification for the minion to use to recover pillar + """ + # Get groups + # Default to returning something that'll never match + ret_groups = { + "default": { + "users": set(), + "commands": set(), + "aliases": dict(), + "default_target": dict(), + "targets": dict() + } } - } - # allow for empty groups in the config file, and instead let some/all of this come - # from pillar data. - if not groups_conf: - use_groups = {} - else: - use_groups = groups_conf - # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups - # that come from pillars. The configuration in files on disk/from startup - # will override any configs from pillars. They are meant to be complementary not to provide overrides. - try: - groups_gen = itertools.chain(_groups_from_pillar(groups_pillar_name).items(), use_groups.items()) - except AttributeError: - log.warn("Failed to get groups from {}: {}".format(groups_pillar_name, _groups_from_pillar(groups_pillar_name))) - log.warn("or from config: {}".format(use_groups)) - groups_gen = [] - for name, config in groups_gen: - log.info("Trying to get {} and {} to be useful".format(name, config)) - ret_groups.setdefault(name, { - "users": set(), "commands": set(), "aliases": dict(), "default_target": dict(), "targets": dict() - }) - try: - ret_groups[name]['users'].update(set(config.get('users', []))) - ret_groups[name]['commands'].update(set(config.get('commands', []))) - ret_groups[name]['aliases'].update(config.get('aliases', {})) - ret_groups[name]['default_target'].update(config.get('default_target', {})) - ret_groups[name]['targets'].update(config.get('targets', {})) - except IndexError: - log.warn("Couldn't use group {}. Check that targets is a dict and not a list".format(name)) - - log.debug("Got the groups: {}".format(ret_groups)) - return ret_groups - - -def _groups_from_pillar(pillar_name): - """pillar_prefix is the pillar.get syntax for the pillar to be queried. - Group name is gotten via the equivalent of using - ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` - in a jinja template. - - returns a dictionary (unless the pillar is mis-formatted) - XXX: instead of using Caller, make the minion to use configurable so there could be some - restrictions placed on what pillars can be used. - """ - caller = salt.client.Caller() - pillar_groups = caller.cmd('pillar.get', pillar_name) - # pillar_groups = __salt__['pillar.get'](pillar_name, {}) - log.info("Got pillar groups {} from pillar {}".format(pillar_groups, pillar_name)) - log.info("pillar groups type is {}".format(type(pillar_groups))) - return pillar_groups - - -def fire(tag, msg): - """ - This replaces a function in main called "fire" - - It fires an event into the salt bus. - """ - if __opts__.get('__role') == 'master': - fire_master = salt.utils.event.get_master_event( - __opts__, - __opts__['sock_dir']).fire_event - else: - fire_master = None - - if fire_master: - fire_master(msg, tag) - else: - __salt__['event.send'](tag, msg) - - -def can_user_run(user, command, groups): - """ - Break out the permissions into the folowing: - - Check whether a user is in any group, including whether a group has the '*' membership - - :type user: str - :param user: The username being checked against - - :type command: str - :param command: The command that is being invoked (e.g. test.ping) - - :type groups: dict - :param groups: the dictionary with groups permissions structure. - - :rtype: tuple - :returns: On a successful permitting match, returns 2-element tuple that contains - the name of the group that successfuly matched, and a dictionary containing - the configuration of the group so it can be referenced. - - On failure it returns an empty tuple - - """ - log.info("{} wants to run {} with groups {}".format(user, command, groups)) - for key, val in groups.items(): - if user not in val['users']: - if '*' not in val['users']: - continue # this doesn't grant permissions, pass - if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): - if '*' not in val['commands']: - continue # again, pass - log.info("Slack user {} permitted to run {}".format(user, command)) - return (key, val,) # matched this group, return the group - log.info("Slack user {} denied trying to run {}".format(user, command)) - return () - - -def commandline_to_list(cmdline_str, trigger_string): - """ - cmdline_str is the string of the command line - trigger_string is the trigger string, to be removed - """ - cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):]) - # Remove slack url parsing - # Translate target= - # to target=host.domain.net - cmdlist = [] - for cmditem in cmdline: - pattern = r'(?P.*)(<.*\|)(?P.*)(>)(?P.*)' - mtch = re.match(pattern, cmditem) - if mtch: - origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder') - cmdlist.append(origtext) + # allow for empty groups in the config file, and instead let some/all of this come + # from pillar data. + if not groups_conf: + use_groups = {} else: - cmdlist.append(cmditem) - return cmdlist + use_groups = groups_conf + # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups + # that come from pillars. The configuration in files on disk/from startup + # will override any configs from pillars. They are meant to be complementary not to provide overrides. + log.debug('use_groups {}'.format(use_groups)) + try: + groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) + except AttributeError: + log.warn("Failed to get groups from {}: {}".format(groups_pillar_name, self._groups_from_pillar(groups_pillar_name))) + log.warn("or from config: {}".format(use_groups)) + groups_gen = [] + for name, config in groups_gen: + log.info("Trying to get {} and {} to be useful".format(name, config)) + ret_groups.setdefault(name, { + "users": set(), "commands": set(), "aliases": dict(), "default_target": dict(), "targets": dict() + }) + try: + ret_groups[name]['users'].update(set(config.get('users', []))) + ret_groups[name]['commands'].update(set(config.get('commands', []))) + ret_groups[name]['aliases'].update(config.get('aliases', {})) + ret_groups[name]['default_target'].update(config.get('default_target', {})) + ret_groups[name]['targets'].update(config.get('targets', {})) + except IndexError: + log.warn("Couldn't use group {}. Check that targets is a dict and not a list".format(name)) + + log.debug("Got the groups: {}".format(ret_groups)) + return ret_groups + + + def _groups_from_pillar(self, pillar_name): + """pillar_prefix is the pillar.get syntax for the pillar to be queried. + Group name is gotten via the equivalent of using + ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` + in a jinja template. + + returns a dictionary (unless the pillar is mis-formatted) + XXX: instead of using Caller, make the minion to use configurable so there could be some + restrictions placed on what pillars can be used. + """ + caller = salt.client.Caller() + pillar_groups = caller.cmd('pillar.get', pillar_name) + # pillar_groups = __salt__['pillar.get'](pillar_name, {}) + log.info("Got pillar groups {} from pillar {}".format(pillar_groups, pillar_name)) + log.info("pillar groups is {}".format(pillar_groups)) + log.info("pillar groups type is {}".format(type(pillar_groups))) + if pillar_groups: + return pillar_groups + else: + return {} + + + def fire(self, tag, msg): + """ + This replaces a function in main called "fire" + + It fires an event into the salt bus. + """ + if __opts__.get('__role') == 'master': + fire_master = salt.utils.event.get_master_event( + __opts__, + __opts__['sock_dir']).fire_master + else: + fire_master = None + + if fire_master: + fire_master(msg, tag) + else: + __salt__['event.send'](tag, msg) + + + def can_user_run(self, user, command, groups): + """ + Break out the permissions into the folowing: + + Check whether a user is in any group, including whether a group has the '*' membership + + :type user: str + :param user: The username being checked against + + :type command: str + :param command: The command that is being invoked (e.g. test.ping) + + :type groups: dict + :param groups: the dictionary with groups permissions structure. + + :rtype: tuple + :returns: On a successful permitting match, returns 2-element tuple that contains + the name of the group that successfuly matched, and a dictionary containing + the configuration of the group so it can be referenced. + + On failure it returns an empty tuple + + """ + log.info("{} wants to run {} with groups {}".format(user, command, groups)) + for key, val in groups.items(): + log.debug('==== key {} val {} ===='.format(key, val)) + if user not in val['users']: + if '*' not in val['users']: + log.debug('==== user validation failed ====') + continue # this doesn't grant permissions, pass + if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): + if '*' not in val['commands']: + continue # again, pass + log.info("Slack user {} permitted to run {}".format(user, command)) + return (key, val,) # matched this group, return the group + log.info("Slack user {} denied trying to run {}".format(user, command)) + return () + + + def commandline_to_list(self, cmdline_str, trigger_string): + """ + cmdline_str is the string of the command line + trigger_string is the trigger string, to be removed + """ + cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):]) + # Remove slack url parsing + # Translate target= + # to target=host.domain.net + cmdlist = [] + for cmditem in cmdline: + pattern = r'(?P.*)(<.*\|)(?P.*)(>)(?P.*)' + mtch = re.match(pattern, cmditem) + if mtch: + origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder') + cmdlist.append(origtext) + else: + cmdlist.append(cmditem) + return cmdlist # m_data -> m_data, _text -> test, all_slack_users -> all_slack_users, -def control_message_target(slack_user_name, text, loaded_groups, trigger_string): - """Returns a tuple of (target, cmdline,) for the response + def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string): + """Returns a tuple of (target, cmdline,) for the response - Raises IndexError if a user can't be looked up from all_slack_users + Raises IndexError if a user can't be looked up from all_slack_users - Returns (False, False) if the user doesn't have permission + Returns (False, False) if the user doesn't have permission - These are returned together because the commandline and the targeting - interact with the group config (specifically aliases and targeting configuration) - so taking care of them together works out. + These are returned together because the commandline and the targeting + interact with the group config (specifically aliases and targeting configuration) + so taking care of them together works out. - The cmdline that is returned is the actual list that should be - processed by salt, and not the alias. + The cmdline that is returned is the actual list that should be + processed by salt, and not the alias. - """ + """ - # Trim the trigger string from the front - # cmdline = _text[1:].split(' ', 1) - cmdline = commandline_to_list(text, trigger_string) - permitted_group = can_user_run(slack_user_name, cmdline[0], loaded_groups) - log.debug("slack_user_name is {} and the permitted group is {}".format(slack_user_name, permitted_group)) - if not permitted_group: - return (False, False) - if not slack_user_name: - return (False, False) + # Trim the trigger string from the front + # cmdline = _text[1:].split(' ', 1) + cmdline = self.commandline_to_list(text, trigger_string) + permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups) + log.debug("slack_user_name is {} and the permitted group is {}".format(slack_user_name, permitted_group)) - # maybe there are aliases, so check on that - if cmdline[0] in permitted_group[1].get('aliases', {}).keys(): - use_cmdline = commandline_to_list(permitted_group[1]['aliases'][cmdline[0]], "") - else: - use_cmdline = cmdline - target = get_target(permitted_group, cmdline, use_cmdline) - return (target, use_cmdline,) + if not permitted_group: + return (False, None, cmdline[0]) + if not slack_user_name: + return (False, None, cmdline[0]) - -def message_text(m_data): - """ - Raises ValueError if a value doesn't work out, and TypeError if - this isn't a message type - """ - if m_data.get('type') != 'message': - raise TypeError("This isn't a message") - # Edited messages have text in message - _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None) - try: - log.info("Message is {}".format(_text)) # this can violate the ascii codec - except UnicodeEncodeError as uee: - log.warn("Got a message that I couldn't log. The reason is: {}".format(uee)) - - # Convert UTF to string - _text = json.dumps(_text) - _text = yaml.safe_load(_text) - - if not _text: - raise ValueError("_text has no value") - return _text - - -def generate_triggered_messages(token, trigger_string, groups, groups_pillar_name): - """slack_token = string - trigger_string = string - input_valid_users = set - input_valid_commands = set - - When the trigger_string prefixes the message text, yields a dictionary of { - "message_data": m_data, - "cmdline": cmdline_list, # this is a list - "channel": channel, - "user": m_data['user'], - "slack_client": sc - } - - else yields {"message_data": m_data} and the caller can handle that - - When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message - - When the websocket being read from has given up all its messages, yields {"done": True} to - indicate that the caller has read all of the relevent data for now, and should continue - its own processing and check back for more data later. - - This relies on the caller sleeping between checks, otherwise this could flood - """ - sc = slackclient.SlackClient(token) - slack_connect = sc.rtm_connect() - all_slack_users = get_slack_users(token) # re-checks this if we have an negative lookup result - all_slack_channels = get_slack_channels(token) # re-checks this if we have an negative lookup result - - def just_data(m_data): - """Always try to return the user and channel anyway""" - user_id = m_data.get('user') - channel_id = m_data.get('channel') - if channel_id.startswith('D'): # private chate with bot user - channel_name = "private chat" + # maybe there are aliases, so check on that + if cmdline[0] in permitted_group[1].get('aliases', {}).keys(): + use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]], "") else: - channel_name = all_slack_channels.get(channel_id) - data = { + use_cmdline = cmdline + target = self.get_target(permitted_group, cmdline, use_cmdline) + + return (True, target, use_cmdline) + + def message_text(self, m_data): + """ + Raises ValueError if a value doesn't work out, and TypeError if + this isn't a message type + """ + if m_data.get('type') != 'message': + raise TypeError("This isn't a message") + # Edited messages have text in message + _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None) + try: + log.info("Message is {}".format(_text)) # this can violate the ascii codec + except UnicodeEncodeError as uee: + log.warn("Got a message that I couldn't log. The reason is: {}".format(uee)) + + # Convert UTF to string + _text = json.dumps(_text) + _text = yaml.safe_load(_text) + + if not _text: + raise ValueError("_text has no value") + return _text + + + def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): + """slack_token = string + trigger_string = string + input_valid_users = set + input_valid_commands = set + + When the trigger_string prefixes the message text, yields a dictionary of { "message_data": m_data, - "user_name": all_slack_users.get(user_id), - "channel_name": channel_name + "cmdline": cmdline_list, # this is a list + "channel": channel, + "user": m_data['user'], + "slack_client": sc } - if not data["user_name"]: - all_slack_users.clear() - all_slack_users.update(get_slack_users(token)) - data["user_name"] = all_slack_users.get(user_id) - if not data["channel_name"]: - all_slack_channels.clear() - all_slack_channels.update(get_slack_channels(token)) - data["channel_name"] = all_slack_channels.get(channel_id) - return data - for sleeps in (5, 10, 30, 60): - if slack_connect: - break + else yields {"message_data": m_data} and the caller can handle that + + When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message + + When the websocket being read from has given up all its messages, yields {"done": True} to + indicate that the caller has read all of the relevent data for now, and should continue + its own processing and check back for more data later. + + This relies on the caller sleeping between checks, otherwise this could flood + """ + all_slack_users = self.get_slack_users(token) # re-checks this if we have an negative lookup result + all_slack_channels = self.get_slack_channels(token) # re-checks this if we have an negative lookup result + + def just_data(m_data): + """Always try to return the user and channel anyway""" + user_id = m_data.get('user') + channel_id = m_data.get('channel') + if channel_id.startswith('D'): # private chate with bot user + channel_name = "private chat" + else: + channel_name = all_slack_channels.get(channel_id) + data = { + "message_data": m_data, + "user_name": all_slack_users.get(user_id), + "channel_name": channel_name + } + if not data["user_name"]: + all_slack_users.clear() + all_slack_users.update(self.get_slack_users(token)) + data["user_name"] = all_slack_users.get(user_id) + if not data["channel_name"]: + all_slack_channels.clear() + all_slack_channels.update(self.get_slack_channels(token)) + data["channel_name"] = all_slack_channels.get(channel_id) + return data + + for sleeps in (5, 10, 30, 60): + if self.slack_connect: + break + else: + # see https://api.slack.com/docs/rate-limits + log.warning("Slack connection is invalid. Server: {}, sleeping {}".format(self.sc.server, sleeps)) + time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection else: - # see https://api.slack.com/docs/rate-limits - log.warning("Slack connection is invalid. Server: {}, sleeping {}".format(sc.server, sleeps)) - time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection - else: - raise UserWarning("Connection to slack is still invalid, giving up: {}".format(slack_connect)) # Boom! - while True: - msg = sc.rtm_read() - for m_data in msg: - try: - msg_text = message_text(m_data) - except (ValueError, TypeError) as msg_err: - log.debug("Got an error from trying to get the message text {}".format(msg_err)) - yield {"message_data": m_data} # Not a message type from the API? - continue + raise UserWarning("Connection to slack is still invalid, giving up: {}".format(self.slack_connect)) # Boom! + while True: + msg = self.sc.rtm_read() + for m_data in msg: + try: + msg_text = self.message_text(m_data) + except (ValueError, TypeError) as msg_err: + log.debug("Got an error from trying to get the message text {}".format(msg_err)) + yield {"message_data": m_data} # Not a message type from the API? + continue - # Find the channel object from the channel name - channel = sc.server.channels.find(m_data['channel']) - data = just_data(m_data) - if msg_text.startswith(trigger_string): - loaded_groups = get_config_groups(groups, groups_pillar_name) - user_id = m_data.get('user') # slack user ID, e.g. 'U11011' - if not data.get('user_name'): - log.error("The user {} can't be looked up via slack. What has happened here?".format( - m_data.get('user'))) - channel.send_message("The user {} can't be looked up via slack. Not running {}".format( - user_id, msg_text)) - yield {"message_data": m_data} - continue - (target, cmdline) = control_message_target( - data['user_name'], msg_text, loaded_groups, trigger_string) - log.debug("Got target: {}, cmdline: {}".format(target, cmdline)) - if target and cmdline: - yield { - "message_data": m_data, - "slack_client": sc, - "channel": channel, - "user": user_id, - "user_name": all_slack_users[user_id], - "cmdline": cmdline, - "target": target - } - continue + # Find the channel object from the channel name + channel = self.sc.server.channels.find(m_data['channel']) + data = just_data(m_data) + if msg_text.startswith(trigger_string): + loaded_groups = self.get_config_groups(groups, groups_pillar_name) + user_id = m_data.get('user') # slack user ID, e.g. 'U11011' + if not data.get('user_name'): + log.error("The user {} can't be looked up via slack. What has happened here?".format( + m_data.get('user'))) + channel.send_message("The user {} can't be looked up via slack. Not running {}".format( + user_id, msg_text)) + yield {"message_data": m_data} + continue + (allowed, target, cmdline) = self.control_message_target( + data['user_name'], msg_text, loaded_groups, trigger_string) + log.debug("Got target: {}, cmdline: {}".format(target, cmdline)) + if allowed: + yield { + "message_data": m_data, + "channel": m_data['channel'], + "user": user_id, + "user_name": all_slack_users[user_id], + "cmdline": cmdline, + "target": target + } + continue + else: + channel.send_message('{} is not allowed to use command {}.'.format( + all_slack_users[user_id], cmdline)) + yield data + continue else: - channel.send_message('{}, {} is not allowed to use command {}.'.format( - user_id, all_slack_users[user_id], cmdline)) yield data continue - else: - yield data - continue - yield {"done": True} + yield {"done": True} -def get_target(permitted_group, cmdline, alias_cmdline): - """When we are permitted to run a command on a target, look to see - what the default targeting is for that group, and for that specific - command (if provided). + def get_target(self, permitted_group, cmdline, alias_cmdline): + """When we are permitted to run a command on a target, look to see + what the default targeting is for that group, and for that specific + command (if provided). - It's possible for None or False to be the result of either, which means - that it's expected that the caller provide a specific target. + It's possible for None or False to be the result of either, which means + that it's expected that the caller provide a specific target. - If no configured target is provided, the command line will be parsed - for target=foo and tgt_type=bar + If no configured target is provided, the command line will be parsed + for target=foo and tgt_type=bar - Test for this: - h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, - 'default_target': {'target': '*', 'tgt_type': 'glob'}, - 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}}, - 'users': {'dmangot', 'jmickle', 'pcn'}} - f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, - 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}} + Test for this: + h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, + 'default_target': {'target': '*', 'tgt_type': 'glob'}, + 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}}, + 'users': {'dmangot', 'jmickle', 'pcn'}} + f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, + 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}} - g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, - 'default_target': {'target': '*', 'tgt_type': 'glob'}, - 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} + g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'}, + 'default_target': {'target': '*', 'tgt_type': 'glob'}, + 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} - Run each of them through ``get_configured_target(("foo", f), "pillar.get")`` and confirm a valid target + Run each of them through ``get_configured_target(("foo", f), "pillar.get")`` and confirm a valid target - """ - null_target = {"target": None, "tgt_type": None} + """ + # Default to targetting all minions with a type of glob + null_target = {"target": '*', "tgt_type": 'glob'} - def check_cmd_against_group(cmd): - """Validate cmd against the group to return the target, or a null target""" - name, group_config = permitted_group - target = group_config.get('default_target') - if not target: # Empty, None, or False - target = null_target - if group_config.get('targets'): - if group_config['targets'].get(cmd): - target = group_config['targets'][cmd] - if not target.get("target"): - log.debug("Group {} is not configured to have a target for cmd {}.".format(name, cmd)) - return target + def check_cmd_against_group(cmd): + """Validate cmd against the group to return the target, or a null target""" + name, group_config = permitted_group + target = group_config.get('default_target') + if not target: # Empty, None, or False + target = null_target + if group_config.get('targets'): + if group_config['targets'].get(cmd): + target = group_config['targets'][cmd] + if not target.get("target"): + log.debug("Group {} is not configured to have a target for cmd {}.".format(name, cmd)) + return target - for this_cl in cmdline, alias_cmdline: - _, kwargs = parse_args_and_kwargs(this_cl) - if 'target' in kwargs: - log.debug("target is in kwargs {}.".format(kwargs)) - if 'tgt_type' in kwargs: - log.debug("tgt_type is in kwargs {}.".format(kwargs)) - return {"target": kwargs['target'], "tgt_type": kwargs['tgt_type']} - return {"target": kwargs['target'], "tgt_type": 'glob'} + for this_cl in cmdline, alias_cmdline: + _, kwargs = self.parse_args_and_kwargs(this_cl) + if 'target' in kwargs: + log.debug("target is in kwargs {}.".format(kwargs)) + if 'tgt_type' in kwargs: + log.debug("tgt_type is in kwargs {}.".format(kwargs)) + return {"target": kwargs['target'], "tgt_type": kwargs['tgt_type']} + return {"target": kwargs['target'], "tgt_type": 'glob'} - for this_cl in cmdline, alias_cmdline: - checked = check_cmd_against_group(this_cl[0]) - log.debug("this cmdline has target {}.".format(this_cl)) - if checked.get("target"): - return checked - return null_target + for this_cl in cmdline, alias_cmdline: + checked = check_cmd_against_group(this_cl[0]) + log.debug("this cmdline has target {}.".format(this_cl)) + if checked.get("target"): + return checked + return null_target # emulate the yaml_out output formatter. It relies on a global __opts__ object which we can't # obviously pass in -def format_return_text(data, **kwargs): # pylint: disable=unused-argument - ''' - Print out YAML using the block mode - ''' - params = dict(Dumper=OrderedDumper) - if 'output_indent' not in __opts__: - # default indentation - params.update(default_flow_style=False) - elif __opts__['output_indent'] >= 0: - # custom indent - params.update(default_flow_style=False, - indent=__opts__['output_indent']) - else: # no indentation - params.update(default_flow_style=True, - indent=0) - try: - return yaml.dump(data, **params).replace("\n\n", "\n") - # pylint: disable=broad-except - except Exception as exc: - import pprint - log.exception('Exception {0} encountered when trying to serialize {1}'.format( - exc, pprint.pformat(data))) - return "Got an error trying to serialze/clean up the response" + def format_return_text(self, data, **kwargs): # pylint: disable=unused-argument + ''' + Print out YAML using the block mode + ''' + params = dict(Dumper=OrderedDumper) + if 'output_indent' not in __opts__: + # default indentation + params.update(default_flow_style=False) + elif __opts__['output_indent'] >= 0: + # custom indent + params.update(default_flow_style=False, + indent=__opts__['output_indent']) + else: # no indentation + params.update(default_flow_style=True, + indent=0) + try: + #return yaml.dump(data, **params).replace("\n\n", "\n") + return json.dumps(data, sort_keys=True, indent=1) + # pylint: disable=broad-except + except Exception as exc: + import pprint + log.exception('Exception {0} encountered when trying to serialize {1}'.format( + exc, pprint.pformat(data))) + return "Got an error trying to serialze/clean up the response" -def parse_args_and_kwargs(cmdline): - """ - cmdline: list + def parse_args_and_kwargs(self, cmdline): + """ + cmdline: list - returns tuple of: args (list), kwargs (dict) - """ - # Parse args and kwargs - args = [] - kwargs = {} + returns tuple of: args (list), kwargs (dict) + """ + # Parse args and kwargs + args = [] + kwargs = {} - if len(cmdline) > 1: - for item in cmdline[1:]: - if '=' in item: - (key, value) = item.split('=', 1) - kwargs[key] = value - else: - args.append(item) - return (args, kwargs) + if len(cmdline) > 1: + for item in cmdline[1:]: + if '=' in item: + (key, value) = item.split('=', 1) + kwargs[key] = value + else: + args.append(item) + return (args, kwargs) -def get_jobs_from_runner(outstanding_jids): - """ - Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. + def get_jobs_from_runner(self, outstanding_jids): + """ + Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. - Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, - jobs.lookup_jid will return a job that has completed. + Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, + jobs.lookup_jid will return a job that has completed. - returns a dictionary of job id: result - """ - # Can't use the runner because of https://github.com/saltstack/salt/issues/40671 - runner = salt.runner.RunnerClient(__opts__) - # log.debug("Getting job IDs {} will run via runner jobs.lookup_jid".format(outstanding_jids)) - mm = salt.minion.MasterMinion(__opts__) - source = __opts__.get('ext_job_cache') - if not source: - source = __opts__.get('master_job_cache') + returns a dictionary of job id: result + """ + # Can't use the runner because of https://github.com/saltstack/salt/issues/40671 + runner = salt.runner.RunnerClient(__opts__) + # log.debug("Getting job IDs {} will run via runner jobs.lookup_jid".format(outstanding_jids)) + #mm = salt.minion.MasterMinion(__opts__) + source = __opts__.get('ext_job_cache') + if not source: + source = __opts__.get('master_job_cache') - results = dict() - for jid in outstanding_jids: - # results[jid] = runner.cmd('jobs.lookup_jid', [jid]) - if mm.returners['{}.get_jid'.format(source)](jid): - jid_result = runner.cmd('jobs.list_job', [jid]).get('Result', {}) - # emulate lookup_jid's return, which is just minion:return + results = dict() + for jid in outstanding_jids: + # results[jid] = runner.cmd('jobs.lookup_jid', [jid]) + if self.master_minion.returners['{}.get_jid'.format(source)](jid): + jid_result = runner.cmd('jobs.list_job', [jid]).get('Result', {}) + # emulate lookup_jid's return, which is just minion:return + # pylint is tripping + # pylint: disable=missing-whitespace-after-comma + job_data = json.dumps({key:val['return'] for key, val in jid_result.items()}) + results[jid] = yaml.load(job_data) + + return results + + + def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): + """Pull any pending messages from the message_generator, sending each + one to either the event bus, the command_async or both, depending on + the values of fire_all and command + """ + + outstanding = dict() # set of job_id that we need to check for + + while True: + log.trace("Sleeping for interval of {}".format(interval)) + time.sleep(interval) + # Drain the slack messages, up to 10 messages at a clip + count = 0 + for msg in message_generator: + # The message_generator yields dicts. Leave this loop + # on a dict that looks like {"done": True} or when we've done it + # 10 times without taking a break. + log.debug("Got a message from the generator: {}".format(msg.keys())) + if count > 10: + log.warn("Breaking in getting messages because count is exceeded") + break + if len(msg) == 0: + count += 1 + log.warn("len(msg) is zero") + continue # This one is a dud, get the next message + if msg.get("done"): + log.debug("msg is done") + break + if fire_all: + log.debug("Firing message to the bus with tag: {}".format(tag)) + log.debug("{} {}".format(tag, msg)) + self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) + if control and (len(msg) > 1) and msg.get('cmdline'): + channel = self.sc.server.channels.find(msg['channel']) + jid = self.run_command_async(msg) + log.debug("Submitted a job and got jid: {}".format(jid)) + outstanding[jid] = msg # record so we can return messages to the caller + channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) + count += 1 + start_time = time.time() + job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned + log.trace("Getting {} jobs status took {} seconds".format(len(job_status), time.time() - start_time)) + for jid, result in job_status.items(): + if result: + log.debug("ret to send back is {}".format(result)) + # formatting function? + this_job = outstanding[jid] + channel = self.sc.server.channels.find(this_job['channel']) + return_text = self.format_return_text(result) + return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( + this_job["user_name"], this_job["cmdline"], jid, this_job["target"]) + channel.send_message(return_prefix) + ts = time.time() + st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f') + filename = 'salt-results-{0}.yaml'.format(st) + r = self.sc.api_call( + "files.upload", channels=channel.id, filename=filename, + content=return_text) + # Handle unicode return + log.debug("Got back {} via the slack client".format(r)) + resp = yaml.safe_load(json.dumps(r)) + if 'ok' in resp and resp['ok'] is False: + this_job['channel'].send_message('Error: {0}'.format(resp['error'])) + del outstanding[jid] + + + def run_command_async(self, msg): + + """ + :type message_generator: generator of dict + :param message_generator: Generates messages from slack that should be run + + :type fire_all: bool + :param fire_all: Whether to also fire messages to the event bus + + :type tag: str + :param tag: The tag to send to use to send to the event bus + + :type interval: int + :param interval: time to wait between ending a loop and beginning the next + + """ + log.debug("Going to run a command async") + runner_functions = sorted(salt.runner.Runner(__opts__).functions) + # Parse args and kwargs + cmd = msg['cmdline'][0] + + args, kwargs = self.parse_args_and_kwargs(msg['cmdline']) + # Check for target. Otherwise assume None + target = msg["target"]["target"] + # Check for tgt_type. Otherwise assume glob + tgt_type = msg["target"]['tgt_type'] + log.debug("target_type is: {}".format(tgt_type)) + + if cmd in runner_functions: + runner = salt.runner.RunnerClient(__opts__) + log.debug("Command {} will run via runner_functions".format(cmd)) # pylint is tripping # pylint: disable=missing-whitespace-after-comma - job_data = json.dumps({key:val['return'] for key, val in jid_result.items()}) - results[jid] = yaml.load(job_data) + job_id_dict = runner.async(cmd, {"args": args, "kwargs": kwargs}) + job_id = job_id_dict['jid'] - return results - - -def run_commands_from_slack_async(message_generator, fire_all, tag, control, interval=1): - """Pull any pending messages from the message_generator, sending each - one to either the event bus, the command_async or both, depending on - the values of fire_all and command - """ - - outstanding = dict() # set of job_id that we need to check for - - while True: - log.debug("Sleeping for interval of {}".format(interval)) - time.sleep(interval) - # Drain the slack messages, up to 10 messages at a clip - count = 0 - for msg in message_generator: - # The message_generator yields dicts. Leave this loop - # on a dict that looks like {"done": True} or when we've done it - # 10 times without taking a break. - log.debug("Got a message from the generator: {}".format(msg.keys())) - if count > 10: - log.warn("Breaking in getting messages because count is exceeded") - break - if len(msg) == 0: - count += 1 - log.warn("len(msg) is zero") - continue # This one is a dud, get the next message - if msg.get("done"): - log.debug("msg is done") - break - if fire_all: - log.debug("Firing message to the bus with tag: {}".format(tag)) - fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) - if control and (len(msg) > 1) and msg.get('cmdline'): - jid = run_command_async(msg) - log.debug("Submitted a job and got jid: {}".format(jid)) - outstanding[jid] = msg # record so we can return messages to the caller - msg['channel'].send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) - count += 1 - start_time = time.time() - job_status = get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned - log.debug("Getting {} jobs status took {} seconds".format(len(job_status), time.time() - start_time)) - for jid, result in job_status.items(): - if result: - log.debug("ret to send back is {}".format(result)) - # formatting function? - this_job = outstanding[jid] - return_text = format_return_text(result) - return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( - this_job["user_name"], this_job["cmdline"], jid, this_job["target"]) - this_job['channel'].send_message(return_prefix) - r = this_job["slack_client"].api_call( - "files.upload", channels=this_job['channel'].id, files=None, - content=return_text) - # Handle unicode return - log.debug("Got back {} via the slack client".format(r)) - resp = yaml.safe_load(json.dumps(r)) - if 'ok' in resp and resp['ok'] is False: - this_job['channel'].send_message('Error: {0}'.format(resp['error'])) - del outstanding[jid] - - -def run_command_async(msg): - - """ - :type message_generator: generator of dict - :param message_generator: Generates messages from slack that should be run - - :type fire_all: bool - :param fire_all: Whether to also fire messages to the event bus - - :type tag: str - :param tag: The tag to send to use to send to the event bus - - :type interval: int - :param interval: time to wait between ending a loop and beginning the next - - """ - log.debug("Going to run a command async") - runner_functions = sorted(salt.runner.Runner(__opts__).functions) - # Parse args and kwargs - cmd = msg['cmdline'][0] - - args, kwargs = parse_args_and_kwargs(msg['cmdline']) - # Check for target. Otherwise assume None - target = msg["target"]["target"] - # Check for tgt_type. Otherwise assume glob - tgt_type = msg["target"]['tgt_type'] - log.debug("target_type is: {}".format(tgt_type)) - - if cmd in runner_functions: - runner = salt.runner.RunnerClient(__opts__) - log.debug("Command {} will run via runner_functions".format(cmd)) - # pylint is tripping - # pylint: disable=missing-whitespace-after-comma - job_id_dict = runner.async(cmd, {"args": args, "kwargs": kwargs}) - job_id = job_id_dict['jid'] - - # Default to trying to run as a client module. - else: - local = salt.client.LocalClient() - log.debug("Command {} will run via local.cmd_async, targeting {}".format(cmd, target)) - log.debug("Running {}, {}, {}, {}, {}".format(str(target), cmd, args, kwargs, str(tgt_type))) - # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form - job_id = local.cmd_async(str(target), cmd, arg=args, kwargs=kwargs, tgt_type=str(tgt_type)) - log.info("ret from local.cmd_async is {}".format(job_id)) - return job_id + # Default to trying to run as a client module. + else: + local = salt.client.LocalClient() + log.debug("Command {} will run via local.cmd_async, targeting {}".format(cmd, target)) + log.debug("Running {}, {}, {}, {}, {}".format(str(target), cmd, args, kwargs, str(tgt_type))) + # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form + job_id = local.cmd_async(str(target), cmd, arg=args, kwargs=kwargs, tgt_type=str(tgt_type)) + log.info("ret from local.cmd_async is {}".format(job_id)) + return job_id def start(token, @@ -742,7 +761,8 @@ def start(token, raise UserWarning('Slack Engine bot token not configured') try: - message_generator = generate_triggered_messages(token, trigger, groups, groups_pillar_name) - run_commands_from_slack_async(message_generator, fire_all, tag, control) + client = SlackClient(token=token) + message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name) + client.run_commands_from_slack_async(message_generator, fire_all, tag, control) except Exception: raise Exception("{}".format(traceback.format_exc())) From c228cb490cba24d15d378b54351acb574f0964ca Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 22 Aug 2017 11:51:06 -0700 Subject: [PATCH 388/639] Allow editing previous messages and having them resubmitted to the Slack engine. Fixing passing kwargs. Fixing passing pillar values. --- salt/engines/slack.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index e57f6fc6f4..19ba087aca 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -92,6 +92,7 @@ In addition, other groups are being loaded from pillars. # Import python libraries from __future__ import absolute_import +import ast import datetime import json import itertools @@ -291,10 +292,8 @@ class SlackClient(object): """ log.info("{} wants to run {} with groups {}".format(user, command, groups)) for key, val in groups.items(): - log.debug('==== key {} val {} ===='.format(key, val)) if user not in val['users']: if '*' not in val['users']: - log.debug('==== user validation failed ====') continue # this doesn't grant permissions, pass if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): if '*' not in val['commands']: @@ -415,7 +414,14 @@ class SlackClient(object): def just_data(m_data): """Always try to return the user and channel anyway""" - user_id = m_data.get('user') + if 'user' not in m_data: + if 'message' in m_data and 'user' in m_data['message']: + log.debug('Message was edited, ' + 'so we look for user in ' + 'the original message.') + user_id = m_data['message']['user'] + else: + user_id = m_data.get('user') channel_id = m_data.get('channel') if channel_id.startswith('D'): # private chate with bot user channel_name = "private chat" @@ -423,6 +429,7 @@ class SlackClient(object): channel_name = all_slack_channels.get(channel_id) data = { "message_data": m_data, + "user_id": user_id, "user_name": all_slack_users.get(user_id), "channel_name": channel_name } @@ -460,7 +467,6 @@ class SlackClient(object): data = just_data(m_data) if msg_text.startswith(trigger_string): loaded_groups = self.get_config_groups(groups, groups_pillar_name) - user_id = m_data.get('user') # slack user ID, e.g. 'U11011' if not data.get('user_name'): log.error("The user {} can't be looked up via slack. What has happened here?".format( m_data.get('user'))) @@ -475,8 +481,8 @@ class SlackClient(object): yield { "message_data": m_data, "channel": m_data['channel'], - "user": user_id, - "user_name": all_slack_users[user_id], + "user": data['user_id'], + "user_name": data['user_name'], "cmdline": cmdline, "target": target } @@ -648,7 +654,7 @@ class SlackClient(object): # The message_generator yields dicts. Leave this loop # on a dict that looks like {"done": True} or when we've done it # 10 times without taking a break. - log.debug("Got a message from the generator: {}".format(msg.keys())) + log.trace("Got a message from the generator: {}".format(msg.keys())) if count > 10: log.warn("Breaking in getting messages because count is exceeded") break @@ -657,7 +663,7 @@ class SlackClient(object): log.warn("len(msg) is zero") continue # This one is a dud, get the next message if msg.get("done"): - log.debug("msg is done") + log.trace("msg is done") break if fire_all: log.debug("Firing message to the bus with tag: {}".format(tag)) @@ -719,6 +725,11 @@ class SlackClient(object): cmd = msg['cmdline'][0] args, kwargs = self.parse_args_and_kwargs(msg['cmdline']) + + # Check for pillar string representation of dict and convert it to dict + if 'pillar' in kwargs: + kwargs.update(pillar=ast.literal_eval(kwargs['pillar'])) + # Check for target. Otherwise assume None target = msg["target"]["target"] # Check for tgt_type. Otherwise assume glob @@ -739,7 +750,7 @@ class SlackClient(object): log.debug("Command {} will run via local.cmd_async, targeting {}".format(cmd, target)) log.debug("Running {}, {}, {}, {}, {}".format(str(target), cmd, args, kwargs, str(tgt_type))) # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form - job_id = local.cmd_async(str(target), cmd, arg=args, kwargs=kwargs, tgt_type=str(tgt_type)) + job_id = local.cmd_async(str(target), cmd, arg=args, kwarg=kwargs, tgt_type=str(tgt_type)) log.info("ret from local.cmd_async is {}".format(job_id)) return job_id From ace483f57a9718c5d99487a26d623561b4ad8ba5 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 09:44:27 -0700 Subject: [PATCH 389/639] Fixing various lint issues --- salt/engines/slack.py | 48 +++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index 19ba087aca..e4508bfbbf 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -225,7 +225,6 @@ class SlackClient(object): log.debug("Got the groups: {}".format(ret_groups)) return ret_groups - def _groups_from_pillar(self, pillar_name): """pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using @@ -247,7 +246,6 @@ class SlackClient(object): else: return {} - def fire(self, tag, msg): """ This replaces a function in main called "fire" @@ -303,7 +301,6 @@ class SlackClient(object): log.info("Slack user {} denied trying to run {}".format(user, command)) return () - def commandline_to_list(self, cmdline_str, trigger_string): """ cmdline_str is the string of the command line @@ -324,7 +321,6 @@ class SlackClient(object): cmdlist.append(cmditem) return cmdlist - # m_data -> m_data, _text -> test, all_slack_users -> all_slack_users, def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string): """Returns a tuple of (target, cmdline,) for the response @@ -384,7 +380,6 @@ class SlackClient(object): raise ValueError("_text has no value") return _text - def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): """slack_token = string trigger_string = string @@ -424,23 +419,23 @@ class SlackClient(object): user_id = m_data.get('user') channel_id = m_data.get('channel') if channel_id.startswith('D'): # private chate with bot user - channel_name = "private chat" + channel_name = 'private chat' else: channel_name = all_slack_channels.get(channel_id) data = { - "message_data": m_data, - "user_id": user_id, - "user_name": all_slack_users.get(user_id), - "channel_name": channel_name + 'message_data': m_data, + 'user_id': user_id, + 'user_name': all_slack_users.get(user_id), + 'channel_name': channel_name } - if not data["user_name"]: + if not data['user_name']: all_slack_users.clear() all_slack_users.update(self.get_slack_users(token)) - data["user_name"] = all_slack_users.get(user_id) - if not data["channel_name"]: + data['user_name'] = all_slack_users.get(user_id) + if not data['channel_name']: all_slack_channels.clear() all_slack_channels.update(self.get_slack_channels(token)) - data["channel_name"] = all_slack_channels.get(channel_id) + data['channel_name'] = all_slack_channels.get(channel_id) return data for sleeps in (5, 10, 30, 60): @@ -471,7 +466,7 @@ class SlackClient(object): log.error("The user {} can't be looked up via slack. What has happened here?".format( m_data.get('user'))) channel.send_message("The user {} can't be looked up via slack. Not running {}".format( - user_id, msg_text)) + data['user_id'], msg_text)) yield {"message_data": m_data} continue (allowed, target, cmdline) = self.control_message_target( @@ -479,24 +474,23 @@ class SlackClient(object): log.debug("Got target: {}, cmdline: {}".format(target, cmdline)) if allowed: yield { - "message_data": m_data, - "channel": m_data['channel'], - "user": data['user_id'], - "user_name": data['user_name'], - "cmdline": cmdline, - "target": target + 'message_data': m_data, + 'channel': m_data['channel'], + 'user': data['user_id'], + 'user_name': data['user_name'], + 'cmdline': cmdline, + 'target': target } continue else: - channel.send_message('{} is not allowed to use command {}.'.format( - all_slack_users[user_id], cmdline)) + channel.send_message('{0} is not allowed to use command {1}.'.format( + data['user_name'], cmdline)) yield data continue else: yield data continue - yield {"done": True} - + yield {'done': True} def get_target(self, permitted_group, cmdline, alias_cmdline): """When we are permitted to run a command on a target, look to see @@ -585,7 +579,6 @@ class SlackClient(object): exc, pprint.pformat(data))) return "Got an error trying to serialze/clean up the response" - def parse_args_and_kwargs(self, cmdline): """ cmdline: list @@ -605,7 +598,6 @@ class SlackClient(object): args.append(item) return (args, kwargs) - def get_jobs_from_runner(self, outstanding_jids): """ Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. @@ -636,7 +628,6 @@ class SlackClient(object): return results - def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): """Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on @@ -702,7 +693,6 @@ class SlackClient(object): this_job['channel'].send_message('Error: {0}'.format(resp['error'])) del outstanding[jid] - def run_command_async(self, msg): """ From b9b51810466c3fa3762b1a70b950f40026cc2037 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 23 Aug 2017 12:18:16 -0700 Subject: [PATCH 390/639] Fixing lint, too many spaces. --- salt/engines/slack.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index e4508bfbbf..cbceae060f 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -264,7 +264,6 @@ class SlackClient(object): else: __salt__['event.send'](tag, msg) - def can_user_run(self, user, command, groups): """ Break out the permissions into the folowing: From 4cd38d7cd5855fd1669152e7c57254b1dc222df2 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 25 Aug 2017 13:43:51 -0700 Subject: [PATCH 391/639] Updating per requests. --- salt/engines/slack.py | 60 +++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index cbceae060f..fddfa056d8 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -175,11 +175,11 @@ class SlackClient(object): return channels def get_config_groups(self, groups_conf, groups_pillar_name): - """ + ''' get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar - """ + ''' # Get groups # Default to returning something that'll never match ret_groups = { @@ -226,7 +226,7 @@ class SlackClient(object): return ret_groups def _groups_from_pillar(self, pillar_name): - """pillar_prefix is the pillar.get syntax for the pillar to be queried. + '''pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` in a jinja template. @@ -234,7 +234,7 @@ class SlackClient(object): returns a dictionary (unless the pillar is mis-formatted) XXX: instead of using Caller, make the minion to use configurable so there could be some restrictions placed on what pillars can be used. - """ + ''' caller = salt.client.Caller() pillar_groups = caller.cmd('pillar.get', pillar_name) # pillar_groups = __salt__['pillar.get'](pillar_name, {}) @@ -247,11 +247,11 @@ class SlackClient(object): return {} def fire(self, tag, msg): - """ + ''' This replaces a function in main called "fire" It fires an event into the salt bus. - """ + ''' if __opts__.get('__role') == 'master': fire_master = salt.utils.event.get_master_event( __opts__, @@ -265,7 +265,7 @@ class SlackClient(object): __salt__['event.send'](tag, msg) def can_user_run(self, user, command, groups): - """ + ''' Break out the permissions into the folowing: Check whether a user is in any group, including whether a group has the '*' membership @@ -286,7 +286,7 @@ class SlackClient(object): On failure it returns an empty tuple - """ + ''' log.info("{} wants to run {} with groups {}".format(user, command, groups)) for key, val in groups.items(): if user not in val['users']: @@ -301,10 +301,10 @@ class SlackClient(object): return () def commandline_to_list(self, cmdline_str, trigger_string): - """ + ''' cmdline_str is the string of the command line trigger_string is the trigger string, to be removed - """ + ''' cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):]) # Remove slack url parsing # Translate target= @@ -322,7 +322,7 @@ class SlackClient(object): # m_data -> m_data, _text -> test, all_slack_users -> all_slack_users, def control_message_target(self, slack_user_name, text, loaded_groups, trigger_string): - """Returns a tuple of (target, cmdline,) for the response + '''Returns a tuple of (target, cmdline,) for the response Raises IndexError if a user can't be looked up from all_slack_users @@ -335,7 +335,7 @@ class SlackClient(object): The cmdline that is returned is the actual list that should be processed by salt, and not the alias. - """ + ''' # Trim the trigger string from the front # cmdline = _text[1:].split(' ', 1) @@ -358,10 +358,10 @@ class SlackClient(object): return (True, target, use_cmdline) def message_text(self, m_data): - """ + ''' Raises ValueError if a value doesn't work out, and TypeError if this isn't a message type - """ + ''' if m_data.get('type') != 'message': raise TypeError("This isn't a message") # Edited messages have text in message @@ -380,7 +380,7 @@ class SlackClient(object): return _text def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): - """slack_token = string + '''slack_token = string trigger_string = string input_valid_users = set input_valid_commands = set @@ -402,12 +402,12 @@ class SlackClient(object): its own processing and check back for more data later. This relies on the caller sleeping between checks, otherwise this could flood - """ + ''' all_slack_users = self.get_slack_users(token) # re-checks this if we have an negative lookup result all_slack_channels = self.get_slack_channels(token) # re-checks this if we have an negative lookup result def just_data(m_data): - """Always try to return the user and channel anyway""" + '''Always try to return the user and channel anyway''' if 'user' not in m_data: if 'message' in m_data and 'user' in m_data['message']: log.debug('Message was edited, ' @@ -492,7 +492,8 @@ class SlackClient(object): yield {'done': True} def get_target(self, permitted_group, cmdline, alias_cmdline): - """When we are permitted to run a command on a target, look to see + ''' + When we are permitted to run a command on a target, look to see what the default targeting is for that group, and for that specific command (if provided). @@ -516,12 +517,14 @@ class SlackClient(object): Run each of them through ``get_configured_target(("foo", f), "pillar.get")`` and confirm a valid target - """ + ''' # Default to targetting all minions with a type of glob null_target = {"target": '*', "tgt_type": 'glob'} def check_cmd_against_group(cmd): - """Validate cmd against the group to return the target, or a null target""" + ''' + Validate cmd against the group to return the target, or a null target + ''' name, group_config = permitted_group target = group_config.get('default_target') if not target: # Empty, None, or False @@ -579,11 +582,11 @@ class SlackClient(object): return "Got an error trying to serialze/clean up the response" def parse_args_and_kwargs(self, cmdline): - """ + ''' cmdline: list returns tuple of: args (list), kwargs (dict) - """ + ''' # Parse args and kwargs args = [] kwargs = {} @@ -598,14 +601,14 @@ class SlackClient(object): return (args, kwargs) def get_jobs_from_runner(self, outstanding_jids): - """ + ''' Given a list of job_ids, return a dictionary of those job_ids that have completed and their results. Query the salt event bus via the jobs runner. jobs.list_job will show a job in progress, jobs.lookup_jid will return a job that has completed. returns a dictionary of job id: result - """ + ''' # Can't use the runner because of https://github.com/saltstack/salt/issues/40671 runner = salt.runner.RunnerClient(__opts__) # log.debug("Getting job IDs {} will run via runner jobs.lookup_jid".format(outstanding_jids)) @@ -628,10 +631,11 @@ class SlackClient(object): return results def run_commands_from_slack_async(self, message_generator, fire_all, tag, control, interval=1): - """Pull any pending messages from the message_generator, sending each + ''' + Pull any pending messages from the message_generator, sending each one to either the event bus, the command_async or both, depending on the values of fire_all and command - """ + ''' outstanding = dict() # set of job_id that we need to check for @@ -694,7 +698,7 @@ class SlackClient(object): def run_command_async(self, msg): - """ + ''' :type message_generator: generator of dict :param message_generator: Generates messages from slack that should be run @@ -707,7 +711,7 @@ class SlackClient(object): :type interval: int :param interval: time to wait between ending a loop and beginning the next - """ + ''' log.debug("Going to run a command async") runner_functions = sorted(salt.runner.Runner(__opts__).functions) # Parse args and kwargs From 3104bde2e4bb44ecfa35b6c8fea94804eb799d0a Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 6 Sep 2017 07:08:58 -0700 Subject: [PATCH 392/639] Replacing double quotes with single quotes. --- salt/engines/slack.py | 170 +++++++++++++++++++++--------------------- 1 file changed, 86 insertions(+), 84 deletions(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index fddfa056d8..00452f6e3a 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -18,7 +18,7 @@ the saltmaster's minion pillar. .. versionadded: 2016.3.0 -:configuration: Example configuration using only a "default" group. The default group is not special. +:configuration: Example configuration using only a 'default' group. The default group is not special. In addition, other groups are being loaded from pillars. .. code-block:: yaml @@ -28,7 +28,7 @@ In addition, other groups are being loaded from pillars. token: 'xoxb-xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx' control: True fire_all: False - groups_pillar_name: "slack_engine:groups_pillar" + groups_pillar_name: 'slack_engine:groups_pillar' groups: default: users: @@ -54,7 +54,7 @@ In addition, other groups are being loaded from pillars. target: saltmaster tgt_type: list -:configuration: Example configuration using the "default" group and a non-default group and a pillar that will be merged in +:configuration: Example configuration using the 'default' group and a non-default group and a pillar that will be merged in If the user is '*' (without the quotes) then the group's users or commands will match all users as appropriate .. versionadded: 2017.7.0 @@ -68,7 +68,7 @@ In addition, other groups are being loaded from pillars. control: True fire_all: True tag: salt/engines/slack - groups_pillar_name: "slack_engine:groups_pillar" + groups_pillar_name: 'slack_engine:groups_pillar' groups: default: valid_users: @@ -171,7 +171,7 @@ class SlackClient(object): channels = {} if 'message' in ret: for item in ret['message']: - channels[item["id"]] = item["name"] + channels[item['id']] = item['name'] return channels def get_config_groups(self, groups_conf, groups_pillar_name): @@ -183,12 +183,12 @@ class SlackClient(object): # Get groups # Default to returning something that'll never match ret_groups = { - "default": { - "users": set(), - "commands": set(), - "aliases": dict(), - "default_target": dict(), - "targets": dict() + 'default': { + 'users': set(), + 'commands': set(), + 'aliases': dict(), + 'default_target': dict(), + 'targets': dict() } } @@ -205,13 +205,13 @@ class SlackClient(object): try: groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) except AttributeError: - log.warn("Failed to get groups from {}: {}".format(groups_pillar_name, self._groups_from_pillar(groups_pillar_name))) - log.warn("or from config: {}".format(use_groups)) + log.warn('Failed to get groups from {}: {}'.format(groups_pillar_name, self._groups_from_pillar(groups_pillar_name))) + log.warn('or from config: {}'.format(use_groups)) groups_gen = [] for name, config in groups_gen: - log.info("Trying to get {} and {} to be useful".format(name, config)) + log.info('Trying to get {} and {} to be useful'.format(name, config)) ret_groups.setdefault(name, { - "users": set(), "commands": set(), "aliases": dict(), "default_target": dict(), "targets": dict() + 'users': set(), 'commands': set(), 'aliases': dict(), 'default_target': dict(), 'targets': dict() }) try: ret_groups[name]['users'].update(set(config.get('users', []))) @@ -222,11 +222,12 @@ class SlackClient(object): except IndexError: log.warn("Couldn't use group {}. Check that targets is a dict and not a list".format(name)) - log.debug("Got the groups: {}".format(ret_groups)) + log.debug('Got the groups: {}'.format(ret_groups)) return ret_groups def _groups_from_pillar(self, pillar_name): - '''pillar_prefix is the pillar.get syntax for the pillar to be queried. + ''' + pillar_prefix is the pillar.get syntax for the pillar to be queried. Group name is gotten via the equivalent of using ``salt['pillar.get']('{}:{}'.format(pillar_prefix, group_name))`` in a jinja template. @@ -238,9 +239,9 @@ class SlackClient(object): caller = salt.client.Caller() pillar_groups = caller.cmd('pillar.get', pillar_name) # pillar_groups = __salt__['pillar.get'](pillar_name, {}) - log.info("Got pillar groups {} from pillar {}".format(pillar_groups, pillar_name)) - log.info("pillar groups is {}".format(pillar_groups)) - log.info("pillar groups type is {}".format(type(pillar_groups))) + log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name) + log.debug('pillar groups is %s', pillar_groups) + log.debug('pillar groups type is %s', type(pillar_groups)) if pillar_groups: return pillar_groups else: @@ -248,7 +249,7 @@ class SlackClient(object): def fire(self, tag, msg): ''' - This replaces a function in main called "fire" + This replaces a function in main called 'fire' It fires an event into the salt bus. ''' @@ -287,7 +288,7 @@ class SlackClient(object): On failure it returns an empty tuple ''' - log.info("{} wants to run {} with groups {}".format(user, command, groups)) + log.info('{} wants to run {} with groups {}'.format(user, command, groups)) for key, val in groups.items(): if user not in val['users']: if '*' not in val['users']: @@ -295,9 +296,9 @@ class SlackClient(object): if (command not in val['commands']) and (command not in val.get('aliases', {}).keys()): if '*' not in val['commands']: continue # again, pass - log.info("Slack user {} permitted to run {}".format(user, command)) + log.info('Slack user {} permitted to run {}'.format(user, command)) return (key, val,) # matched this group, return the group - log.info("Slack user {} denied trying to run {}".format(user, command)) + log.info('Slack user {} denied trying to run {}'.format(user, command)) return () def commandline_to_list(self, cmdline_str, trigger_string): @@ -341,7 +342,7 @@ class SlackClient(object): # cmdline = _text[1:].split(' ', 1) cmdline = self.commandline_to_list(text, trigger_string) permitted_group = self.can_user_run(slack_user_name, cmdline[0], loaded_groups) - log.debug("slack_user_name is {} and the permitted group is {}".format(slack_user_name, permitted_group)) + log.debug('slack_user_name is {} and the permitted group is {}'.format(slack_user_name, permitted_group)) if not permitted_group: return (False, None, cmdline[0]) @@ -350,7 +351,7 @@ class SlackClient(object): # maybe there are aliases, so check on that if cmdline[0] in permitted_group[1].get('aliases', {}).keys(): - use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]], "") + use_cmdline = self.commandline_to_list(permitted_group[1]['aliases'][cmdline[0]], '') else: use_cmdline = cmdline target = self.get_target(permitted_group, cmdline, use_cmdline) @@ -363,41 +364,42 @@ class SlackClient(object): this isn't a message type ''' if m_data.get('type') != 'message': - raise TypeError("This isn't a message") + raise TypeError('This is not a message') # Edited messages have text in message _text = m_data.get('text', None) or m_data.get('message', {}).get('text', None) try: - log.info("Message is {}".format(_text)) # this can violate the ascii codec + log.info('Message is {}'.format(_text)) # this can violate the ascii codec except UnicodeEncodeError as uee: - log.warn("Got a message that I couldn't log. The reason is: {}".format(uee)) + log.warn('Got a message that I could not log. The reason is: {}'.format(uee)) # Convert UTF to string _text = json.dumps(_text) _text = yaml.safe_load(_text) if not _text: - raise ValueError("_text has no value") + raise ValueError('_text has no value') return _text def generate_triggered_messages(self, token, trigger_string, groups, groups_pillar_name): - '''slack_token = string + ''' + slack_token = string trigger_string = string input_valid_users = set input_valid_commands = set When the trigger_string prefixes the message text, yields a dictionary of { - "message_data": m_data, - "cmdline": cmdline_list, # this is a list - "channel": channel, - "user": m_data['user'], - "slack_client": sc + 'message_data': m_data, + 'cmdline': cmdline_list, # this is a list + 'channel': channel, + 'user': m_data['user'], + 'slack_client': sc } - else yields {"message_data": m_data} and the caller can handle that + else yields {'message_data': m_data} and the caller can handle that When encountering an error (e.g. invalid message), yields {}, the caller can proceed to the next message - When the websocket being read from has given up all its messages, yields {"done": True} to + When the websocket being read from has given up all its messages, yields {'done': True} to indicate that the caller has read all of the relevent data for now, and should continue its own processing and check back for more data later. @@ -442,18 +444,18 @@ class SlackClient(object): break else: # see https://api.slack.com/docs/rate-limits - log.warning("Slack connection is invalid. Server: {}, sleeping {}".format(self.sc.server, sleeps)) + log.warning('Slack connection is invalid. Server: {}, sleeping {}'.format(self.sc.server, sleeps)) time.sleep(sleeps) # respawning too fast makes the slack API unhappy about the next reconnection else: - raise UserWarning("Connection to slack is still invalid, giving up: {}".format(self.slack_connect)) # Boom! + raise UserWarning('Connection to slack is still invalid, giving up: {}'.format(self.slack_connect)) # Boom! while True: msg = self.sc.rtm_read() for m_data in msg: try: msg_text = self.message_text(m_data) except (ValueError, TypeError) as msg_err: - log.debug("Got an error from trying to get the message text {}".format(msg_err)) - yield {"message_data": m_data} # Not a message type from the API? + log.debug('Got an error from trying to get the message text {}'.format(msg_err)) + yield {'message_data': m_data} # Not a message type from the API? continue # Find the channel object from the channel name @@ -462,15 +464,15 @@ class SlackClient(object): if msg_text.startswith(trigger_string): loaded_groups = self.get_config_groups(groups, groups_pillar_name) if not data.get('user_name'): - log.error("The user {} can't be looked up via slack. What has happened here?".format( + log.error('The user {} can not be looked up via slack. What has happened here?'.format( m_data.get('user'))) - channel.send_message("The user {} can't be looked up via slack. Not running {}".format( + channel.send_message('The user {} can not be looked up via slack. Not running {}'.format( data['user_id'], msg_text)) - yield {"message_data": m_data} + yield {'message_data': m_data} continue (allowed, target, cmdline) = self.control_message_target( data['user_name'], msg_text, loaded_groups, trigger_string) - log.debug("Got target: {}, cmdline: {}".format(target, cmdline)) + log.debug('Got target: {}, cmdline: {}'.format(target, cmdline)) if allowed: yield { 'message_data': m_data, @@ -515,11 +517,11 @@ class SlackClient(object): 'default_target': {'target': '*', 'tgt_type': 'glob'}, 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}} - Run each of them through ``get_configured_target(("foo", f), "pillar.get")`` and confirm a valid target + Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target ''' # Default to targetting all minions with a type of glob - null_target = {"target": '*', "tgt_type": 'glob'} + null_target = {'target': '*', 'tgt_type': 'glob'} def check_cmd_against_group(cmd): ''' @@ -532,23 +534,23 @@ class SlackClient(object): if group_config.get('targets'): if group_config['targets'].get(cmd): target = group_config['targets'][cmd] - if not target.get("target"): - log.debug("Group {} is not configured to have a target for cmd {}.".format(name, cmd)) + if not target.get('target'): + log.debug('Group {} is not configured to have a target for cmd {}.'.format(name, cmd)) return target for this_cl in cmdline, alias_cmdline: _, kwargs = self.parse_args_and_kwargs(this_cl) if 'target' in kwargs: - log.debug("target is in kwargs {}.".format(kwargs)) + log.debug('target is in kwargs {}.'.format(kwargs)) if 'tgt_type' in kwargs: - log.debug("tgt_type is in kwargs {}.".format(kwargs)) - return {"target": kwargs['target'], "tgt_type": kwargs['tgt_type']} - return {"target": kwargs['target'], "tgt_type": 'glob'} + log.debug('tgt_type is in kwargs {}.'.format(kwargs)) + return {'target': kwargs['target'], 'tgt_type': kwargs['tgt_type']} + return {'target': kwargs['target'], 'tgt_type': 'glob'} for this_cl in cmdline, alias_cmdline: checked = check_cmd_against_group(this_cl[0]) - log.debug("this cmdline has target {}.".format(this_cl)) - if checked.get("target"): + log.debug('this cmdline has target {}.'.format(this_cl)) + if checked.get('target'): return checked return null_target @@ -579,7 +581,7 @@ class SlackClient(object): import pprint log.exception('Exception {0} encountered when trying to serialize {1}'.format( exc, pprint.pformat(data))) - return "Got an error trying to serialze/clean up the response" + return 'Got an error trying to serialze/clean up the response' def parse_args_and_kwargs(self, cmdline): ''' @@ -640,57 +642,57 @@ class SlackClient(object): outstanding = dict() # set of job_id that we need to check for while True: - log.trace("Sleeping for interval of {}".format(interval)) + log.trace('Sleeping for interval of {}'.format(interval)) time.sleep(interval) # Drain the slack messages, up to 10 messages at a clip count = 0 for msg in message_generator: # The message_generator yields dicts. Leave this loop - # on a dict that looks like {"done": True} or when we've done it + # on a dict that looks like {'done': True} or when we've done it # 10 times without taking a break. - log.trace("Got a message from the generator: {}".format(msg.keys())) + log.trace('Got a message from the generator: {}'.format(msg.keys())) if count > 10: - log.warn("Breaking in getting messages because count is exceeded") + log.warn('Breaking in getting messages because count is exceeded') break if len(msg) == 0: count += 1 - log.warn("len(msg) is zero") + log.warn('len(msg) is zero') continue # This one is a dud, get the next message - if msg.get("done"): - log.trace("msg is done") + if msg.get('done'): + log.trace('msg is done') break if fire_all: - log.debug("Firing message to the bus with tag: {}".format(tag)) - log.debug("{} {}".format(tag, msg)) + log.debug('Firing message to the bus with tag: {}'.format(tag)) + log.debug('{} {}'.format(tag, msg)) self.fire('{0}/{1}'.format(tag, msg['message_data'].get('type')), msg) if control and (len(msg) > 1) and msg.get('cmdline'): channel = self.sc.server.channels.find(msg['channel']) jid = self.run_command_async(msg) - log.debug("Submitted a job and got jid: {}".format(jid)) + log.debug('Submitted a job and got jid: {}'.format(jid)) outstanding[jid] = msg # record so we can return messages to the caller channel.send_message("@{}'s job is submitted as salt jid {}".format(msg['user_name'], jid)) count += 1 start_time = time.time() job_status = self.get_jobs_from_runner(outstanding.keys()) # dict of job_ids:results are returned - log.trace("Getting {} jobs status took {} seconds".format(len(job_status), time.time() - start_time)) + log.trace('Getting {} jobs status took {} seconds'.format(len(job_status), time.time() - start_time)) for jid, result in job_status.items(): if result: - log.debug("ret to send back is {}".format(result)) + log.debug('ret to send back is {}'.format(result)) # formatting function? this_job = outstanding[jid] channel = self.sc.server.channels.find(this_job['channel']) return_text = self.format_return_text(result) return_prefix = "@{}'s job `{}` (id: {}) (target: {}) returned".format( - this_job["user_name"], this_job["cmdline"], jid, this_job["target"]) + this_job['user_name'], this_job['cmdline'], jid, this_job['target']) channel.send_message(return_prefix) ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S%f') filename = 'salt-results-{0}.yaml'.format(st) r = self.sc.api_call( - "files.upload", channels=channel.id, filename=filename, + 'files.upload', channels=channel.id, filename=filename, content=return_text) # Handle unicode return - log.debug("Got back {} via the slack client".format(r)) + log.debug('Got back {} via the slack client'.format(r)) resp = yaml.safe_load(json.dumps(r)) if 'ok' in resp and resp['ok'] is False: this_job['channel'].send_message('Error: {0}'.format(resp['error'])) @@ -712,7 +714,7 @@ class SlackClient(object): :param interval: time to wait between ending a loop and beginning the next ''' - log.debug("Going to run a command async") + log.debug('Going to run a command async') runner_functions = sorted(salt.runner.Runner(__opts__).functions) # Parse args and kwargs cmd = msg['cmdline'][0] @@ -724,33 +726,33 @@ class SlackClient(object): kwargs.update(pillar=ast.literal_eval(kwargs['pillar'])) # Check for target. Otherwise assume None - target = msg["target"]["target"] + target = msg['target']['target'] # Check for tgt_type. Otherwise assume glob - tgt_type = msg["target"]['tgt_type'] - log.debug("target_type is: {}".format(tgt_type)) + tgt_type = msg['target']['tgt_type'] + log.debug('target_type is: {}'.format(tgt_type)) if cmd in runner_functions: runner = salt.runner.RunnerClient(__opts__) - log.debug("Command {} will run via runner_functions".format(cmd)) + log.debug('Command {} will run via runner_functions'.format(cmd)) # pylint is tripping # pylint: disable=missing-whitespace-after-comma - job_id_dict = runner.async(cmd, {"args": args, "kwargs": kwargs}) + job_id_dict = runner.async(cmd, {'args': args, 'kwargs': kwargs}) job_id = job_id_dict['jid'] # Default to trying to run as a client module. else: local = salt.client.LocalClient() - log.debug("Command {} will run via local.cmd_async, targeting {}".format(cmd, target)) - log.debug("Running {}, {}, {}, {}, {}".format(str(target), cmd, args, kwargs, str(tgt_type))) + log.debug('Command {} will run via local.cmd_async, targeting {}'.format(cmd, target)) + log.debug('Running {}, {}, {}, {}, {}'.format(str(target), cmd, args, kwargs, str(tgt_type))) # according to https://github.com/saltstack/salt-api/issues/164, tgt_type has changed to expr_form job_id = local.cmd_async(str(target), cmd, arg=args, kwarg=kwargs, tgt_type=str(tgt_type)) - log.info("ret from local.cmd_async is {}".format(job_id)) + log.info('ret from local.cmd_async is {}'.format(job_id)) return job_id def start(token, control=False, - trigger="!", + trigger='!', groups=None, groups_pillar_name=None, fire_all=False, @@ -761,7 +763,7 @@ def start(token, if (not token) or (not token.startswith('xoxb')): time.sleep(2) # don't respawn too quickly - log.error("Slack bot token not found, bailing...") + log.error('Slack bot token not found, bailing...') raise UserWarning('Slack Engine bot token not configured') try: @@ -769,4 +771,4 @@ def start(token, message_generator = client.generate_triggered_messages(token, trigger, groups, groups_pillar_name) client.run_commands_from_slack_async(message_generator, fire_all, tag, control) except Exception: - raise Exception("{}".format(traceback.format_exc())) + raise Exception('{}'.format(traceback.format_exc())) From 0acf0ffd69cac1f0b00d51f5f54005891110081f Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 6 Sep 2017 07:17:16 -0700 Subject: [PATCH 393/639] Swapping list() for []. --- salt/utils/minions.py | 54 +++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/salt/utils/minions.py b/salt/utils/minions.py index 83d1b9af9f..2349699025 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -202,7 +202,7 @@ class CkMinions(object): Return the minions found by looking via globs ''' return {'minions': fnmatch.filter(self._pki_minions(), expr), - 'missing': list()} + 'missing': []} def _check_list_minions(self, expr, greedy): # pylint: disable=unused-argument ''' @@ -220,7 +220,7 @@ class CkMinions(object): ''' reg = re.compile(expr) return {'minions': [m for m in self._pki_minions() if reg.match(m)], - 'missing': list()} + 'missing': []} def _pki_minions(self): ''' @@ -268,8 +268,8 @@ class CkMinions(object): elif cache_enabled: minions = list_cached_minions() else: - return {'minions': list(), - 'missing': list()} + return {'minions': [], + 'missing': []} if cache_enabled: if greedy: @@ -278,7 +278,7 @@ class CkMinions(object): cminions = minions if not cminions: return {'minions': minions, - 'missing': list()} + 'missing': []} minions = set(minions) for id_ in cminions: if greedy and id_ not in minions: @@ -297,7 +297,7 @@ class CkMinions(object): minions.remove(id_) minions = list(minions) return {'minions': minions, - 'missing': list()} + 'missing': []} def _check_grain_minions(self, expr, delimiter, greedy): ''' @@ -352,8 +352,8 @@ class CkMinions(object): elif cache_enabled: minions = self.cache.list('minions') else: - return {'minions': list(), - 'missing': list()} + return {'minions': [], + 'missing': []} if cache_enabled: if greedy: @@ -362,7 +362,7 @@ class CkMinions(object): cminions = minions if cminions is None: return {'minions': minions, - 'missing': list()} + 'missing': []} tgt = expr try: @@ -374,8 +374,8 @@ class CkMinions(object): tgt = ipaddress.ip_network(tgt) except: # pylint: disable=bare-except log.error('Invalid IP/CIDR target: {0}'.format(tgt)) - return {'minions': list(), - 'missing': list()} + return {'minions': [], + 'missing': []} proto = 'ipv{0}'.format(tgt.version) minions = set(minions) @@ -397,7 +397,7 @@ class CkMinions(object): minions.remove(id_) return {'minions': list(minions), - 'missing': list()} + 'missing': []} def _check_range_minions(self, expr, greedy): ''' @@ -423,13 +423,13 @@ class CkMinions(object): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): mlist.append(fn_) return {'minions': mlist, - 'missing': list()} + 'missing': []} elif cache_enabled: return {'minions': self.cache.list('minions'), - 'missing': list()} + 'missing': []} else: - return {'minions': list(), - 'missing': list()} + return {'minions': [], + 'missing': []} def _check_compound_pillar_exact_minions(self, expr, delimiter, greedy): ''' @@ -452,7 +452,7 @@ class CkMinions(object): ''' if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error('Compound target that is neither string, list nor tuple') - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} minions = set(self._pki_minions()) log.debug('minions: {0}'.format(minions)) @@ -488,7 +488,7 @@ class CkMinions(object): if results: if results[-1] == '(' and word in ('and', 'or'): log.error('Invalid beginning operator after "(": {0}'.format(word)) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} if word == 'not': if not results[-1] in ('&', '|', '('): results.append('&') @@ -508,7 +508,7 @@ class CkMinions(object): log.error('Invalid compound expr (unexpected ' 'right parenthesis): {0}' .format(expr)) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} results.append(word) unmatched.pop() if unmatched and unmatched[-1] == '-': @@ -517,7 +517,7 @@ class CkMinions(object): else: # Won't get here, unless oper is added log.error('Unhandled oper in compound expr: {0}' .format(expr)) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} else: # seq start with oper, fail if word == 'not': @@ -533,13 +533,13 @@ class CkMinions(object): 'Expression may begin with' ' binary operator: {0}'.format(word) ) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} elif target_info and target_info['engine']: if 'N' == target_info['engine']: # Nodegroups should already be expanded/resolved to other engines log.error('Detected nodegroup expansion failure of "{0}"'.format(word)) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} engine = ref.get(target_info['engine']) if not engine: # If an unknown engine is called at any time, fail out @@ -550,7 +550,7 @@ class CkMinions(object): word, ) ) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} engine_args = [target_info['pattern']] if target_info['engine'] in ('G', 'P', 'I', 'J'): @@ -583,10 +583,10 @@ class CkMinions(object): return {'minions': minions, 'missing': missing} except Exception: log.error('Invalid compound target: {0}'.format(expr)) - return {'minions': list(), 'missing': list()} + return {'minions': [], 'missing': []} return {'minions': list(minions), - 'missing': list()} + 'missing': []} def connected_ids(self, subset=None, show_ipv4=False, include_localhost=False): ''' @@ -638,7 +638,7 @@ class CkMinions(object): for fn_ in salt.utils.isorted(os.listdir(os.path.join(self.opts['pki_dir'], self.acc))): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], self.acc, fn_)): mlist.append(fn_) - return {'minions': mlist, 'missing': list()} + return {'minions': mlist, 'missing': []} def check_minions(self, expr, @@ -670,7 +670,7 @@ class CkMinions(object): log.exception( 'Failed matching available minions with {0} pattern: {1}' .format(tgt_type, expr)) - _res = {'minions': list(), 'missing': list()} + _res = {'minions': [], 'missing': []} return _res def _expand_matching(self, auth_entry): From 889a556e85cedfcf11555ee19ade4fc48c725105 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 6 Sep 2017 10:40:37 -0500 Subject: [PATCH 394/639] modules/nfs3: linting --- salt/modules/nfs3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/nfs3.py b/salt/modules/nfs3.py index 996dfec88a..0d69202210 100644 --- a/salt/modules/nfs3.py +++ b/salt/modules/nfs3.py @@ -147,6 +147,6 @@ def reload_exports(): ret['stderr'] = output['stderr'] # exportfs always returns 0, so retcode is useless # We will consider it an error if stderr is nonempty - ret['result'] = output['stderr'] == "" + ret['result'] = output['stderr'] == '' return ret From 0ce48eecd7b59f454e4fb676f8fc4f32077bb329 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Wed, 6 Sep 2017 11:03:02 -0500 Subject: [PATCH 395/639] states/nfs_export: add __virtual__() --- salt/states/nfs_export.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 33bf129cf2..52237c32d6 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -55,8 +55,23 @@ To ensure an NFS export is absent: - name: '/srv/nfs' ''' +import salt.utils.path +def __virtual__(): + ''' + Only work with nfs tools installed + ''' + cmd = 'exportfs' + if salt.utils.path.which(cmd): + return bool(cmd) + + return( + False, + 'The nfs_exports state module failed to load: ' + 'the exportfs binary is not in the path' + ) + def present(name, clients=None, hosts=None, From ec9f29cb564d0743c0a566f5637f70018511192e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 6 Sep 2017 11:46:03 -0400 Subject: [PATCH 396/639] Changed inspect.getargspec to salt.utils.args.get_function_argspec --- salt/pillar/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 76ae1c41de..2840df935e 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -25,6 +25,7 @@ import salt.utils.url import salt.utils.cache import salt.utils.crypt import salt.utils.dictupdate +import salt.utils.args from salt.exceptions import SaltClientError from salt.template import compile_template from salt.utils.dictupdate import merge @@ -841,7 +842,7 @@ class Pillar(object): Builds actual pillar data structure and updates the ``pillar`` variable ''' ext = None - args = inspect.getargspec(self.ext_pillars[key]).args + args = salt.utils.args.get_function_argspec(self.ext_pillars[key]).args if isinstance(val, dict): if ('extra_minion_data' in args) and self.extra_minion_data: From 33feda7a5b145653d1b112431cc2ccb017915577 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 6 Sep 2017 12:09:54 -0400 Subject: [PATCH 397/639] Mocked salt.utils.args.get_function_argspec instead of inspect.getargspec in all external pillar data tests --- tests/unit/test_pillar.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index e48dce0923..4479164232 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -81,7 +81,7 @@ class PillarTestCase(TestCase): mock_ext_pillar_func})): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, 'fake_ext_pillar') @@ -90,7 +90,7 @@ class PillarTestCase(TestCase): arg='foo') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, 'fake_ext_pillar') @@ -121,7 +121,7 @@ class PillarTestCase(TestCase): mock_ext_pillar_func})): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', ['foo'], 'fake_ext_pillar') @@ -130,7 +130,7 @@ class PillarTestCase(TestCase): 'foo') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', ['foo'], 'fake_ext_pillar') @@ -161,7 +161,7 @@ class PillarTestCase(TestCase): mock_ext_pillar_func})): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev') # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', 'fake_val', 'fake_ext_pillar') @@ -169,7 +169,7 @@ class PillarTestCase(TestCase): 'fake_pillar', 'fake_val') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', 'fake_val', 'fake_ext_pillar') @@ -200,7 +200,7 @@ class PillarTestCase(TestCase): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', extra_minion_data={'fake_key': 'foo'}) # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, 'fake_ext_pillar') @@ -208,7 +208,7 @@ class PillarTestCase(TestCase): 'mocked-minion', 'fake_pillar', arg='foo') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', {'arg': 'foo'}, 'fake_ext_pillar') @@ -240,7 +240,7 @@ class PillarTestCase(TestCase): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', extra_minion_data={'fake_key': 'foo'}) # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', ['bar'], 'fake_ext_pillar') @@ -248,7 +248,7 @@ class PillarTestCase(TestCase): 'mocked-minion', 'fake_pillar', 'bar') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', ['bar'], 'fake_ext_pillar') @@ -280,7 +280,7 @@ class PillarTestCase(TestCase): pillar = salt.pillar.Pillar(opts, {}, 'mocked-minion', 'dev', extra_minion_data={'fake_key': 'foo'}) # ext pillar function doesn't have the extra_minion_data arg - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=[]))): pillar._external_pillar_data('fake_pillar', 'bar', 'fake_ext_pillar') @@ -288,7 +288,7 @@ class PillarTestCase(TestCase): 'mocked-minion', 'fake_pillar', 'bar') # ext pillar function has the extra_minion_data arg mock_ext_pillar_func.reset_mock() - with patch('inspect.getargspec', + with patch('salt.utils.args.get_function_argspec', MagicMock(return_value=MagicMock(args=['extra_minion_data']))): pillar._external_pillar_data('fake_pillar', 'bar', 'fake_ext_pillar') From 281e47185398b781cf63d192e25ab989d2b8ee4a Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Wed, 6 Sep 2017 11:25:25 -0500 Subject: [PATCH 398/639] Fix system.set_system_time when no hw clock is present If a hardware clock is not present, then the `cmd.run_all` call in `has_settable_hwclock` will return a non-zero retcode. If `ignore_retcode` is not set to True in that call, then it will set `__context__['retcode']` to that code which will cause the job to appear as if it failed (due to non-zero retcode) even though it didn't really fail. Signed-off-by: Sergey Kizunov --- salt/modules/system.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/modules/system.py b/salt/modules/system.py index c397ad2f27..fe8fc0c699 100644 --- a/salt/modules/system.py +++ b/salt/modules/system.py @@ -168,7 +168,10 @@ def has_settable_hwclock(): salt '*' system.has_settable_hwclock ''' if salt.utils.which_bin(['hwclock']) is not None: - res = __salt__['cmd.run_all'](['hwclock', '--test', '--systohc'], python_shell=False) + res = __salt__['cmd.run_all']( + ['hwclock', '--test', '--systohc'], python_shell=False, + output_loglevel='quiet', ignore_retcode=True + ) return res['retcode'] == 0 return False From 60d80e7cda85bed6660f5c4ee2ee91d769c20746 Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Mon, 21 Aug 2017 20:52:35 +0000 Subject: [PATCH 399/639] Add utils function to merge state returns This makes it possible to merge the state return for a subresource into a main state return. This is useful for configuring e.g. alarms to go along with a resource. --- salt/utils/state.py | 58 ++++++++ tests/unit/utils/test_state.py | 243 +++++++++++++++++++++++++++++++++ 2 files changed, 301 insertions(+) diff --git a/salt/utils/state.py b/salt/utils/state.py index 292558a16b..3251e6b3bd 100644 --- a/salt/utils/state.py +++ b/salt/utils/state.py @@ -153,3 +153,61 @@ def check_result(running, recurse=False, highstate=None): if not ret: break return ret + + +def merge_subreturn(original_return, sub_return, subkey=None): + ''' + Update an existing state return (`original_return`) in place + with another state return (`sub_return`), i.e. for a subresource. + + Returns: + dict: The updated state return. + + The existing state return does not need to have all the required fields, + as this is meant to be called from the internals of a state function, + but any existing data will be kept and respected. + + It is important after using this function to check the return value + to see if it is False, in which case the main state should return. + Prefer to check `_ret['result']` instead of `ret['result']`, + as the latter field may not yet be populated. + + Code Example: + + .. code-block:: python + def state_func(name, config, alarm=None): + ret = {'name': name, 'comment': '', 'changes': {}} + if alarm: + _ret = __states__['subresource.managed'](alarm) + __utils__['state.merge_subreturn'](ret, _ret) + if _ret['result'] is False: + return ret + ''' + if not subkey: + subkey = sub_return['name'] + + if sub_return['result'] is False: + # True or None stay the same + original_return['result'] = sub_return['result'] + + sub_comment = sub_return['comment'] + if not isinstance(sub_comment, list): + sub_comment = [sub_comment] + original_return.setdefault('comment', []) + if isinstance(original_return['comment'], list): + original_return['comment'].extend(sub_comment) + else: + if original_return['comment']: + # Skip for empty original comments + original_return['comment'] += u'\n' + original_return['comment'] += u'\n'.join(sub_comment) + + if sub_return['changes']: # changes always exists + original_return.setdefault('changes', {}) + original_return['changes'][subkey] = sub_return['changes'] + + if sub_return.get('pchanges'): # pchanges may or may not exist + original_return.setdefault('pchanges', {}) + original_return['pchanges'][subkey] = sub_return['pchanges'] + + return original_return diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py index 2da5554f28..18a7c2c9af 100644 --- a/tests/unit/utils/test_state.py +++ b/tests/unit/utils/test_state.py @@ -5,6 +5,8 @@ Unit Tests for functions located in salt.utils.state.py. # Import python libs from __future__ import absolute_import +import copy +import textwrap # Import Salt libs from salt.ext import six @@ -443,3 +445,244 @@ class StateUtilTestCase(TestCase): msg='{0} failed'.format(test)) test_valid_false_state = {'host1': {'test_state': {'result': False}}} self.assertFalse(salt.utils.state.check_result(test_valid_false_state)) + + +class UtilStateMergeSubreturnTestcase(TestCase): + ''' + Test cases for salt.utils.state.merge_subreturn function. + ''' + main_ret = { + 'name': 'primary', + # result may be missing, as primarysalt.utils.state is still in progress + 'comment': '', + 'changes': {}, + } + sub_ret = { + 'name': 'secondary', + 'result': True, + 'comment': '', + 'changes': {}, + } + + def test_merge_result(self): + # result not created if not needed + for no_effect_result in [True, None]: + m = copy.deepcopy(self.main_ret) + s = copy.deepcopy(self.sub_ret) + s['result'] = no_effect_result + res = salt.utils.state.merge_subreturn(m, s) + self.assertNotIn('result', res) + + # False subresult is propagated to existing result + for original_result in [True, None, False]: + m = copy.deepcopy(self.main_ret) + m['result'] = original_result + s = copy.deepcopy(self.sub_ret) + s['result'] = False + res = salt.utils.state.merge_subreturn(m, s) + self.assertFalse(res['result']) + + # False result cannot be overriden + for any_result in [True, None, False]: + m = copy.deepcopy(self.main_ret) + m['result'] = False + s = copy.deepcopy(self.sub_ret) + s['result'] = any_result + res = salt.utils.state.merge_subreturn(m, s) + self.assertFalse(res['result']) + + def test_merge_changes(self): + # The main changes dict should always already exist, + # and there should always be a changes dict in the secondary. + primary_changes = {'old': None, 'new': 'my_resource'} + secondary_changes = {'old': None, 'new': ['alarm-1', 'alarm-2']} + + # No changes case + m = copy.deepcopy(self.main_ret) + s = copy.deepcopy(self.sub_ret) + res = salt.utils.state.merge_subreturn(m, s) + self.assertDictEqual(res['changes'], {}) + + # New changes don't get rid of existing changes + m = copy.deepcopy(self.main_ret) + m['changes'] = copy.deepcopy(primary_changes) + s = copy.deepcopy(self.sub_ret) + s['changes'] = copy.deepcopy(secondary_changes) + res = salt.utils.state.merge_subreturn(m, s) + self.assertDictEqual(res['changes'], { + 'old': None, + 'new': 'my_resource', + 'secondary': secondary_changes, + }) + + # The subkey parameter is respected + m = copy.deepcopy(self.main_ret) + m['changes'] = copy.deepcopy(primary_changes) + s = copy.deepcopy(self.sub_ret) + s['changes'] = copy.deepcopy(secondary_changes) + res = salt.utils.state.merge_subreturn(m, s, subkey='alarms') + self.assertDictEqual(res['changes'], { + 'old': None, + 'new': 'my_resource', + 'alarms': secondary_changes, + }) + + def test_merge_pchanges(self): + primary_pchanges = {'old': None, 'new': 'my_resource'} + secondary_pchanges = {'old': None, 'new': ['alarm-1', 'alarm-2']} + + # Neither main nor sub pchanges case + m = copy.deepcopy(self.main_ret) + s = copy.deepcopy(self.sub_ret) + res = salt.utils.state.merge_subreturn(m, s) + self.assertNotIn('pchanges', res) + + # No main pchanges, sub pchanges + m = copy.deepcopy(self.main_ret) + s = copy.deepcopy(self.sub_ret) + s['pchanges'] = copy.deepcopy(secondary_pchanges) + res = salt.utils.state.merge_subreturn(m, s) + self.assertDictEqual(res['pchanges'], { + 'secondary': secondary_pchanges + }) + + # Main pchanges, no sub pchanges + m = copy.deepcopy(self.main_ret) + m['pchanges'] = copy.deepcopy(primary_pchanges) + s = copy.deepcopy(self.sub_ret) + res = salt.utils.state.merge_subreturn(m, s) + self.assertDictEqual(res['pchanges'], primary_pchanges) + + # Both main and sub pchanges, new pchanges don't affect existing ones + m = copy.deepcopy(self.main_ret) + m['pchanges'] = copy.deepcopy(primary_pchanges) + s = copy.deepcopy(self.sub_ret) + s['pchanges'] = copy.deepcopy(secondary_pchanges) + res = salt.utils.state.merge_subreturn(m, s) + self.assertDictEqual(res['pchanges'], { + 'old': None, + 'new': 'my_resource', + 'secondary': secondary_pchanges, + }) + + # The subkey parameter is respected + m = copy.deepcopy(self.main_ret) + m['pchanges'] = copy.deepcopy(primary_pchanges) + s = copy.deepcopy(self.sub_ret) + s['pchanges'] = copy.deepcopy(secondary_pchanges) + res = salt.utils.state.merge_subreturn(m, s, subkey='alarms') + self.assertDictEqual(res['pchanges'], { + 'old': None, + 'new': 'my_resource', + 'alarms': secondary_pchanges, + }) + + def test_merge_comments(self): + main_comment_1 = 'First primary comment.' + main_comment_2 = 'Second primary comment.' + sub_comment_1 = 'First secondary comment,\nwhich spans two lines.' + sub_comment_2 = 'Second secondary comment: {0}'.format( + 'some error\n And a traceback', + ) + final_comment = textwrap.dedent('''\ + First primary comment. + Second primary comment. + First secondary comment, + which spans two lines. + Second secondary comment: some error + And a traceback + '''.rstrip()) + + # Joining two strings + m = copy.deepcopy(self.main_ret) + m['comment'] = main_comment_1 + u'\n' + main_comment_2 + s = copy.deepcopy(self.sub_ret) + s['comment'] = sub_comment_1 + u'\n' + sub_comment_2 + res = salt.utils.state.merge_subreturn(m, s) + self.assertMultiLineEqual(res['comment'], final_comment) + + # Joining string and a list + m = copy.deepcopy(self.main_ret) + m['comment'] = main_comment_1 + u'\n' + main_comment_2 + s = copy.deepcopy(self.sub_ret) + s['comment'] = [sub_comment_1, sub_comment_2] + res = salt.utils.state.merge_subreturn(m, s) + self.assertMultiLineEqual(res['comment'], final_comment) + + # For tests where output is a list, + # also test that final joined output will match + # Joining list and a string + m = copy.deepcopy(self.main_ret) + m['comment'] = [main_comment_1, main_comment_2] + s = copy.deepcopy(self.sub_ret) + s['comment'] = sub_comment_1 + u'\n' + sub_comment_2 + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], [ + main_comment_1, + main_comment_2, + sub_comment_1 + u'\n' + sub_comment_2, + ]) + self.assertMultiLineEqual(u'\n'.join(res['comment']), final_comment) + + # Joining two lists + m = copy.deepcopy(self.main_ret) + m['comment'] = [main_comment_1, main_comment_2] + s = copy.deepcopy(self.sub_ret) + s['comment'] = [sub_comment_1, sub_comment_2] + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], [ + main_comment_1, + main_comment_2, + sub_comment_1, + sub_comment_2, + ]) + self.assertMultiLineEqual(u'\n'.join(res['comment']), final_comment) + + def test_merge_empty_comments(self): + # Since the primarysalt.utils.state is in progress, + # the main comment may be empty, either '' or []. + # Note that [''] is a degenerate case and should never happen, + # hence the behavior is left unspecified in that case. + # The secondary comment should never be empty, + # because thatsalt.utils.state has already returned, + # so we leave the behavior unspecified in that case. + sub_comment_1 = 'Secondary comment about changes:' + sub_comment_2 = 'A diff that goes with the previous comment' + # No contributions from primary + final_comment = sub_comment_1 + u'\n' + sub_comment_2 + + # Joining empty string and a string + m = copy.deepcopy(self.main_ret) + m['comment'] = '' + s = copy.deepcopy(self.sub_ret) + s['comment'] = sub_comment_1 + u'\n' + sub_comment_2 + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], final_comment) + + # Joining empty string and a list + m = copy.deepcopy(self.main_ret) + m['comment'] = '' + s = copy.deepcopy(self.sub_ret) + s['comment'] = [sub_comment_1, sub_comment_2] + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], final_comment) + + # For tests where output is a list, + # also test that final joined output will match + # Joining empty list and a string + m = copy.deepcopy(self.main_ret) + m['comment'] = [] + s = copy.deepcopy(self.sub_ret) + s['comment'] = sub_comment_1 + u'\n' + sub_comment_2 + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], [final_comment]) + self.assertEqual(u'\n'.join(res['comment']), final_comment) + + # Joining empty list and a list + m = copy.deepcopy(self.main_ret) + m['comment'] = [] + s = copy.deepcopy(self.sub_ret) + s['comment'] = [sub_comment_1, sub_comment_2] + res = salt.utils.state.merge_subreturn(m, s) + self.assertEqual(res['comment'], [sub_comment_1, sub_comment_2]) + self.assertEqual(u'\n'.join(res['comment']), final_comment) From 7c4460164bbe407b539f716f8c9464b6d0ab59f1 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 1 Sep 2017 13:56:39 -0600 Subject: [PATCH 400/639] Fix alternatives for Windows Not sure this is necessary... I don't think alternatives is a thing in Windows. Anyway, it uses `__salt__['file.readlink']` instead of `os.readlink` as there is no `os.readlink` in Windows. Modifies the tests to mock `__salt__['file.readlink']` instead of `os.readlink` --- salt/modules/alternatives.py | 2 +- tests/unit/modules/test_alternatives.py | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/salt/modules/alternatives.py b/salt/modules/alternatives.py index f7a016b150..2f4b60a2b2 100644 --- a/salt/modules/alternatives.py +++ b/salt/modules/alternatives.py @@ -241,4 +241,4 @@ def _read_link(name): Throws an OSError if the link does not exist ''' alt_link_path = '/etc/alternatives/{0}'.format(name) - return os.readlink(alt_link_path) + return __salt__['file.readlink'](alt_link_path) diff --git a/tests/unit/modules/test_alternatives.py b/tests/unit/modules/test_alternatives.py index b018eafa80..b85f25ae12 100644 --- a/tests/unit/modules/test_alternatives.py +++ b/tests/unit/modules/test_alternatives.py @@ -66,30 +66,28 @@ class AlternativesTestCase(TestCase, LoaderModuleMockMixin): ) def test_show_current(self): - with patch('os.readlink') as os_readlink_mock: - os_readlink_mock.return_value = '/etc/alternatives/salt' + mock = MagicMock(return_value='/etc/alternatives/salt') + with patch.dict(alternatives.__salt__, {'file.readlink': mock}): ret = alternatives.show_current('better-world') self.assertEqual('/etc/alternatives/salt', ret) - os_readlink_mock.assert_called_once_with( - '/etc/alternatives/better-world' - ) + mock.assert_called_once_with('/etc/alternatives/better-world') with TestsLoggingHandler() as handler: - os_readlink_mock.side_effect = OSError('Hell was not found!!!') + mock.side_effect = OSError('Hell was not found!!!') self.assertFalse(alternatives.show_current('hell')) - os_readlink_mock.assert_called_with('/etc/alternatives/hell') + mock.assert_called_with('/etc/alternatives/hell') self.assertIn('ERROR:alternative: hell does not exist', handler.messages) def test_check_installed(self): - with patch('os.readlink') as os_readlink_mock: - os_readlink_mock.return_value = '/etc/alternatives/salt' + mock = MagicMock(return_value='/etc/alternatives/salt') + with patch.dict(alternatives.__salt__, {'file.readlink': mock}): self.assertTrue( alternatives.check_installed( 'better-world', '/etc/alternatives/salt' ) ) - os_readlink_mock.return_value = False + mock.return_value = False self.assertFalse( alternatives.check_installed( 'help', '/etc/alternatives/salt' From c0d81aa1ce2b4264b2fd015dacf01a3dc66b42aa Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 6 Sep 2017 09:46:03 -0600 Subject: [PATCH 401/639] Use salt.utils.path.readlink instead of __salt__['file.readlink'] --- salt/modules/alternatives.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/modules/alternatives.py b/salt/modules/alternatives.py index 2f4b60a2b2..ba040daf7b 100644 --- a/salt/modules/alternatives.py +++ b/salt/modules/alternatives.py @@ -12,6 +12,7 @@ import logging # Import Salt libs import salt.utils +import salt.utils.path # Import 3rd-party libs import salt.ext.six as six @@ -241,4 +242,4 @@ def _read_link(name): Throws an OSError if the link does not exist ''' alt_link_path = '/etc/alternatives/{0}'.format(name) - return __salt__['file.readlink'](alt_link_path) + return salt.utils.path.readlink(alt_link_path) From 3ef8d714cb5637836182ce31aeba24b2ce564357 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 6 Sep 2017 13:02:50 -0600 Subject: [PATCH 402/639] Fix unit tests to mock salt.utils.path.readlink --- tests/unit/modules/test_alternatives.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_alternatives.py b/tests/unit/modules/test_alternatives.py index b85f25ae12..8651bdd4f0 100644 --- a/tests/unit/modules/test_alternatives.py +++ b/tests/unit/modules/test_alternatives.py @@ -18,6 +18,7 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs import salt.modules.alternatives as alternatives +import salt.utils.path @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -67,7 +68,7 @@ class AlternativesTestCase(TestCase, LoaderModuleMockMixin): def test_show_current(self): mock = MagicMock(return_value='/etc/alternatives/salt') - with patch.dict(alternatives.__salt__, {'file.readlink': mock}): + with patch('salt.utils.path.readlink', mock): ret = alternatives.show_current('better-world') self.assertEqual('/etc/alternatives/salt', ret) mock.assert_called_once_with('/etc/alternatives/better-world') @@ -81,7 +82,7 @@ class AlternativesTestCase(TestCase, LoaderModuleMockMixin): def test_check_installed(self): mock = MagicMock(return_value='/etc/alternatives/salt') - with patch.dict(alternatives.__salt__, {'file.readlink': mock}): + with patch('salt.utils.path.readlink', mock): self.assertTrue( alternatives.check_installed( 'better-world', '/etc/alternatives/salt' From a909813fa5e4773da6041bf99187af61fdf2d441 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 6 Sep 2017 15:56:24 -0600 Subject: [PATCH 403/639] Remove unused import (lint) --- tests/unit/modules/test_alternatives.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/modules/test_alternatives.py b/tests/unit/modules/test_alternatives.py index 8651bdd4f0..8dd0cde28d 100644 --- a/tests/unit/modules/test_alternatives.py +++ b/tests/unit/modules/test_alternatives.py @@ -18,7 +18,6 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch # Import salt libs import salt.modules.alternatives as alternatives -import salt.utils.path @skipIf(NO_MOCK, NO_MOCK_REASON) From 51af8f875719e2b00bc560df42df98ddf181ec6e Mon Sep 17 00:00:00 2001 From: Ken Jordan Date: Wed, 6 Sep 2017 16:25:53 -0600 Subject: [PATCH 404/639] Fix mine_interval phrasing in default file --- salt/config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 9f69900f65..e53a10e5bf 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -313,7 +313,7 @@ VALID_OPTS = { # Whether or not scheduled mine updates should be accompanied by a job return for the job cache 'mine_return_job': bool, - # Schedule a mine update every n number of seconds + # The number of minutes between mine updates. 'mine_interval': int, # The ipc strategy. (i.e., sockets versus tcp, etc) From 1e94d0ac3a4d539cbf58ccb1bb4042856fcf646a Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Thu, 7 Sep 2017 09:21:42 -0400 Subject: [PATCH 405/639] Lint: Remove trailing whitespace --- salt/config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index e53a10e5bf..39898a711d 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -313,7 +313,7 @@ VALID_OPTS = { # Whether or not scheduled mine updates should be accompanied by a job return for the job cache 'mine_return_job': bool, - # The number of minutes between mine updates. + # The number of minutes between mine updates. 'mine_interval': int, # The ipc strategy. (i.e., sockets versus tcp, etc) From 3f19b247f3ad3625811977dbfe1eb88feafcbc3e Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Thu, 7 Sep 2017 11:51:39 -0400 Subject: [PATCH 406/639] Add handler.messages back in for test comparison --- tests/unit/modules/virtualenv_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/virtualenv_test.py b/tests/unit/modules/virtualenv_test.py index 3122a7c343..6ef2aff69f 100644 --- a/tests/unit/modules/virtualenv_test.py +++ b/tests/unit/modules/virtualenv_test.py @@ -114,7 +114,8 @@ class VirtualenvTestCase(TestCase): self.assertIn( 'INFO:--never-download was deprecated in 1.10.0, ' 'but reimplemented in 14.0.0. If this feature is needed, ' - 'please install a supported virtualenv version.' + 'please install a supported virtualenv version.', + handler.messages ) def test_issue_6031_multiple_extra_search_dirs(self): From 3c58717c5881ec4ecc50f0c699f8ddd355c35569 Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Fri, 4 Aug 2017 11:51:21 -0500 Subject: [PATCH 407/639] Add config option to ensure jid is unique Having a jid that is occasionally not unique (eg two processes creating jobs at about the same time) causes problems in my usage due to job tracking assuming unique jids. Add a config option called `unique_jid` that will enforce unique jids, but will change the jid format by appending an underscore and the process id. Signed-off-by: Sergey Kizunov --- salt/cli/caller.py | 2 +- salt/client/mixins.py | 4 ++-- salt/config/__init__.py | 7 +++++++ salt/daemons/masterapi.py | 4 ++-- salt/master.py | 2 +- salt/modules/state.py | 2 +- salt/returners/carbon_return.py | 2 +- salt/returners/cassandra_cql_return.py | 2 +- salt/returners/cassandra_return.py | 2 +- salt/returners/couchbase_return.py | 2 +- salt/returners/couchdb_return.py | 2 +- salt/returners/django_return.py | 2 +- salt/returners/elasticsearch_return.py | 2 +- salt/returners/etcd_return.py | 2 +- salt/returners/influxdb_return.py | 2 +- salt/returners/local_cache.py | 2 +- salt/returners/memcache_return.py | 2 +- salt/returners/mongo_future_return.py | 2 +- salt/returners/mongo_return.py | 2 +- salt/returners/mysql.py | 2 +- salt/returners/odbc.py | 2 +- salt/returners/pgjsonb.py | 2 +- salt/returners/postgres.py | 2 +- salt/returners/postgres_local_cache.py | 2 +- salt/returners/redis_return.py | 2 +- salt/returners/sentry_return.py | 2 +- salt/returners/smtp_return.py | 2 +- salt/returners/sqlite3_return.py | 2 +- salt/returners/syslog_return.py | 2 +- salt/states/module.py | 4 ++-- salt/utils/jid.py | 22 ++++++++++++++++------ salt/utils/job.py | 2 +- salt/utils/schedule.py | 2 +- tests/unit/states/test_saltmod.py | 4 ++-- tests/unit/utils/test_utils.py | 8 +++++++- 35 files changed, 66 insertions(+), 43 deletions(-) diff --git a/salt/cli/caller.py b/salt/cli/caller.py index 75025ea473..5c04bab11c 100644 --- a/salt/cli/caller.py +++ b/salt/cli/caller.py @@ -157,7 +157,7 @@ class BaseCaller(object): ''' ret = {} fun = self.opts['fun'] - ret['jid'] = salt.utils.jid.gen_jid() + ret['jid'] = salt.utils.jid.gen_jid(self.opts) proc_fn = os.path.join( salt.minion.get_proc_dir(self.opts['cachedir']), ret['jid'] diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 4cdf9c92eb..7ddaa4d97b 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -299,7 +299,7 @@ class SyncClientMixin(object): # this is not to clutter the output with the module loading # if we have a high debug level. self.mminion # pylint: disable=W0104 - jid = low.get(u'__jid__', salt.utils.jid.gen_jid()) + jid = low.get(u'__jid__', salt.utils.jid.gen_jid(self.opts)) tag = low.get(u'__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix)) data = {u'fun': u'{0}.{1}'.format(self.client, fun), @@ -512,7 +512,7 @@ class AsyncClientMixin(object): def _gen_async_pub(self, jid=None): if jid is None: - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(self.opts) tag = salt.utils.event.tagify(jid, prefix=self.tag_prefix) return {u'tag': tag, u'jid': jid} diff --git a/salt/config/__init__.py b/salt/config/__init__.py index a51be4b3e4..210d55f2d3 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -418,6 +418,11 @@ VALID_OPTS = { # Tell the client to display the jid when a job is published 'show_jid': bool, + # Ensure that a generated jid is always unique. If this is set, the jid + # format is different due to an underscore and process id being appended + # to the jid. + 'unique_jid': bool, + # Tells the highstate outputter to show successful states. False will omit successes. 'state_verbose': bool, @@ -1198,6 +1203,7 @@ DEFAULT_MINION_OPTS = { 'gitfs_ref_types': ['branch', 'tag', 'sha'], 'gitfs_refspecs': _DFLT_REFSPECS, 'gitfs_disable_saltenv_mapping': False, + 'unique_jid': False, 'hash_type': 'sha256', 'disable_modules': [], 'disable_returners': [], @@ -1442,6 +1448,7 @@ DEFAULT_MASTER_OPTS = { 'hgfs_saltenv_blacklist': [], 'show_timeout': True, 'show_jid': False, + 'unique_jid': False, 'svnfs_remotes': [], 'svnfs_mountpoint': '', 'svnfs_root': '', diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py index 3b0438f3b8..15247dcc54 100644 --- a/salt/daemons/masterapi.py +++ b/salt/daemons/masterapi.py @@ -718,7 +718,7 @@ class RemoteFuncs(object): Handle the return data sent from the minions ''' # Generate EndTime - endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) + endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(self.opts)) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False @@ -1087,7 +1087,7 @@ class LocalFuncs(object): 'user {1}.').format(auth_type, username))) # Authenticated. Do the job. - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(self.opts) fun = load.pop('fun') tag = salt.utils.event.tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), diff --git a/salt/master.py b/salt/master.py index 9869ef37ed..078119ff96 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1776,7 +1776,7 @@ class ClearFuncs(object): # Authorized. Do the job! try: - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(self.opts) fun = clear_load.pop(u'fun') tag = tagify(jid, prefix=u'wheel') data = {u'fun': u"wheel.{0}".format(fun), diff --git a/salt/modules/state.py b/salt/modules/state.py index 282f27a41c..bd2d90893f 100644 --- a/salt/modules/state.py +++ b/salt/modules/state.py @@ -120,7 +120,7 @@ def _wait(jid): Wait for all previously started state jobs to finish running ''' if jid is None: - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(__opts__) states = _prior_running_states(jid) while states: time.sleep(1) diff --git a/salt/returners/carbon_return.py b/salt/returners/carbon_return.py index 10c22b29f4..ba06ad29b6 100644 --- a/salt/returners/carbon_return.py +++ b/salt/returners/carbon_return.py @@ -303,4 +303,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/cassandra_cql_return.py b/salt/returners/cassandra_cql_return.py index 2a3b325595..5e7e7c0ff9 100644 --- a/salt/returners/cassandra_cql_return.py +++ b/salt/returners/cassandra_cql_return.py @@ -454,4 +454,4 @@ def prep_jid(nocache, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/cassandra_return.py b/salt/returners/cassandra_return.py index 2cae15bd9f..d2c0e2b06d 100644 --- a/salt/returners/cassandra_return.py +++ b/salt/returners/cassandra_return.py @@ -80,4 +80,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/couchbase_return.py b/salt/returners/couchbase_return.py index 24c3a9105a..3cfbf44043 100644 --- a/salt/returners/couchbase_return.py +++ b/salt/returners/couchbase_return.py @@ -160,7 +160,7 @@ def prep_jid(nocache=False, passed_jid=None): So do what you have to do to make sure that stays the case ''' if passed_jid is None: - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(__opts__) else: jid = passed_jid diff --git a/salt/returners/couchdb_return.py b/salt/returners/couchdb_return.py index d24020db4e..117b2802f4 100644 --- a/salt/returners/couchdb_return.py +++ b/salt/returners/couchdb_return.py @@ -364,7 +364,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument diff --git a/salt/returners/django_return.py b/salt/returners/django_return.py index 5d756e6111..8a4517a5ce 100644 --- a/salt/returners/django_return.py +++ b/salt/returners/django_return.py @@ -82,4 +82,4 @@ def prep_jid(nocache=False, passed_jid=None): ''' Do any work necessary to prepare a JID, including sending a custom ID ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/elasticsearch_return.py b/salt/returners/elasticsearch_return.py index 5849c0e0e7..e4ffb20f1e 100644 --- a/salt/returners/elasticsearch_return.py +++ b/salt/returners/elasticsearch_return.py @@ -362,7 +362,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def save_load(jid, load, minions=None): diff --git a/salt/returners/etcd_return.py b/salt/returners/etcd_return.py index 6582e957e2..5f3d8a2bae 100644 --- a/salt/returners/etcd_return.py +++ b/salt/returners/etcd_return.py @@ -223,4 +223,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/influxdb_return.py b/salt/returners/influxdb_return.py index d37958b0e8..e6cafb7cc5 100644 --- a/salt/returners/influxdb_return.py +++ b/salt/returners/influxdb_return.py @@ -328,4 +328,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index 85a1c07264..3f36cee80d 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -90,7 +90,7 @@ def prep_jid(nocache=False, passed_jid=None, recurse_count=0): log.error(err) raise salt.exceptions.SaltCacheError(err) if passed_jid is None: # this can be a None or an empty string. - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(__opts__) else: jid = passed_jid diff --git a/salt/returners/memcache_return.py b/salt/returners/memcache_return.py index dd3657da1f..c00dcbdf9b 100644 --- a/salt/returners/memcache_return.py +++ b/salt/returners/memcache_return.py @@ -134,7 +134,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def returner(ret): diff --git a/salt/returners/mongo_future_return.py b/salt/returners/mongo_future_return.py index d7e6e1df53..0d9c7328b1 100644 --- a/salt/returners/mongo_future_return.py +++ b/salt/returners/mongo_future_return.py @@ -322,7 +322,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def event_return(events): diff --git a/salt/returners/mongo_return.py b/salt/returners/mongo_return.py index a0a0cac8b4..f59c25f9f5 100644 --- a/salt/returners/mongo_return.py +++ b/salt/returners/mongo_return.py @@ -231,7 +231,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py index 110c1caf6c..a7bfbed243 100644 --- a/salt/returners/mysql.py +++ b/salt/returners/mysql.py @@ -460,7 +460,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): diff --git a/salt/returners/odbc.py b/salt/returners/odbc.py index 7cc9ada0d9..03c114cb10 100644 --- a/salt/returners/odbc.py +++ b/salt/returners/odbc.py @@ -329,4 +329,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/pgjsonb.py b/salt/returners/pgjsonb.py index f6af142ac0..dd09d31d78 100644 --- a/salt/returners/pgjsonb.py +++ b/salt/returners/pgjsonb.py @@ -416,4 +416,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/postgres.py b/salt/returners/postgres.py index 7fb45fe837..02cbde3ce7 100644 --- a/salt/returners/postgres.py +++ b/salt/returners/postgres.py @@ -381,4 +381,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/postgres_local_cache.py b/salt/returners/postgres_local_cache.py index 42957f7dbe..0485fa4fe9 100644 --- a/salt/returners/postgres_local_cache.py +++ b/salt/returners/postgres_local_cache.py @@ -189,7 +189,7 @@ def _gen_jid(cur): ''' Generate an unique job id ''' - jid = salt.utils.jid.gen_jid() + jid = salt.utils.jid.gen_jid(__opts__) sql = '''SELECT jid FROM jids WHERE jid = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() diff --git a/salt/returners/redis_return.py b/salt/returners/redis_return.py index 140af0d063..5eea5bf782 100644 --- a/salt/returners/redis_return.py +++ b/salt/returners/redis_return.py @@ -312,4 +312,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/sentry_return.py b/salt/returners/sentry_return.py index 2eba954764..3b3c5b11eb 100644 --- a/salt/returners/sentry_return.py +++ b/salt/returners/sentry_return.py @@ -170,4 +170,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/smtp_return.py b/salt/returners/smtp_return.py index 78dfdc9ef5..f630aa18f6 100644 --- a/salt/returners/smtp_return.py +++ b/salt/returners/smtp_return.py @@ -264,7 +264,7 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def event_return(events): diff --git a/salt/returners/sqlite3_return.py b/salt/returners/sqlite3_return.py index 55c6c0b36b..1b2159980d 100644 --- a/salt/returners/sqlite3_return.py +++ b/salt/returners/sqlite3_return.py @@ -303,4 +303,4 @@ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/returners/syslog_return.py b/salt/returners/syslog_return.py index bcafaf614d..f963c751d8 100644 --- a/salt/returners/syslog_return.py +++ b/salt/returners/syslog_return.py @@ -213,4 +213,4 @@ def prep_jid(nocache=False, ''' Do any work necessary to prepare a JID, including sending a custom id ''' - return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid() + return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) diff --git a/salt/states/module.py b/salt/states/module.py index 202999e7d8..a253db9ae9 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -348,7 +348,7 @@ def _call_function(name, returner=None, **kwargs): returners = salt.loader.returners(__opts__, __salt__) if returner in returners: returners[returner]({'id': __opts__['id'], 'ret': mret, - 'fun': name, 'jid': salt.utils.jid.gen_jid()}) + 'fun': name, 'jid': salt.utils.jid.gen_jid(__opts__)}) return mret @@ -495,7 +495,7 @@ def _run(name, **kwargs): 'id': __opts__['id'], 'ret': mret, 'fun': name, - 'jid': salt.utils.jid.gen_jid()} + 'jid': salt.utils.jid.gen_jid(__opts__)} returners = salt.loader.returners(__opts__, __salt__) if kwargs['returner'] in returners: returners[kwargs['returner']](ret_ret) diff --git a/salt/utils/jid.py b/salt/utils/jid.py index 3f4ef296a2..b65293d8d5 100644 --- a/salt/utils/jid.py +++ b/salt/utils/jid.py @@ -9,12 +9,22 @@ import os from salt.ext import six +LAST_JID_DATETIME = None -def gen_jid(): + +def gen_jid(opts): ''' Generate a jid ''' - return '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now()) + global LAST_JID_DATETIME # pylint: disable=global-statement + + if not opts.get('unique_jid', False): + return '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now()) + jid_dt = datetime.datetime.now() + if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt: + jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1) + LAST_JID_DATETIME = jid_dt + return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid()) def is_jid(jid): @@ -23,10 +33,10 @@ def is_jid(jid): ''' if not isinstance(jid, six.string_types): return False - if len(jid) != 20: + if len(jid) != 20 and (len(jid) <= 21 or jid[20] != '_'): return False try: - int(jid) + int(jid[:20]) return True except ValueError: return False @@ -37,7 +47,7 @@ def jid_to_time(jid): Convert a salt job id into the time when the job was invoked ''' jid = str(jid) - if len(jid) != 20: + if len(jid) != 20 and (len(jid) <= 21 or jid[20] != '_'): return '' year = jid[:4] month = jid[4:6] @@ -45,7 +55,7 @@ def jid_to_time(jid): hour = jid[8:10] minute = jid[10:12] second = jid[12:14] - micro = jid[14:] + micro = jid[14:20] ret = '{0}, {1} {2} {3}:{4}:{5}.{6}'.format(year, months[int(month)], diff --git a/salt/utils/job.py b/salt/utils/job.py index c37e034c32..a10098019a 100644 --- a/salt/utils/job.py +++ b/salt/utils/job.py @@ -18,7 +18,7 @@ def store_job(opts, load, event=None, mminion=None): Store job information using the configured master_job_cache ''' # Generate EndTime - endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) + endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(opts)) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 61e087e607..672918bb0f 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -764,7 +764,7 @@ class Schedule(object): 'fun': func, 'fun_args': [], 'schedule': data['name'], - 'jid': salt.utils.jid.gen_jid()} + 'jid': salt.utils.jid.gen_jid(self.opts)} if 'metadata' in data: if isinstance(data['metadata'], dict): diff --git a/tests/unit/states/test_saltmod.py b/tests/unit/states/test_saltmod.py index 66d8966d93..a884e331d0 100644 --- a/tests/unit/states/test_saltmod.py +++ b/tests/unit/states/test_saltmod.py @@ -47,7 +47,7 @@ class SaltmodTestCase(TestCase, LoaderModuleMockMixin): 'transport': 'tcp' }, '__salt__': {'saltutil.cmd': MagicMock()}, - '__orchestration_jid__': salt.utils.jid.gen_jid(), + '__orchestration_jid__': salt.utils.jid.gen_jid({}), '__utils__': utils, } } @@ -298,7 +298,7 @@ class StatemodTests(TestCase, LoaderModuleMockMixin): 'extension_modules': os.path.join(self.tmp_cachedir, 'extmods'), }, '__salt__': {'saltutil.cmd': MagicMock()}, - '__orchestration_jid__': salt.utils.jid.gen_jid() + '__orchestration_jid__': salt.utils.jid.gen_jid({}) } } diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 7ae386fe63..8fd89fa4aa 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -24,6 +24,7 @@ from salt.exceptions import (SaltInvocationError, SaltSystemExit, CommandNotFoun # Import Python libraries import datetime +import os import yaml import zmq from collections import namedtuple @@ -496,8 +497,13 @@ class UtilsTestCase(TestCase): now = datetime.datetime(2002, 12, 25, 12, 00, 00, 00) with patch('datetime.datetime'): datetime.datetime.now.return_value = now - ret = salt.utils.jid.gen_jid() + ret = salt.utils.jid.gen_jid({}) self.assertEqual(ret, '20021225120000000000') + salt.utils.jid.LAST_JID_DATETIME = None + ret = salt.utils.jid.gen_jid({'unique_jid': True}) + self.assertEqual(ret, '20021225120000000000_{0}'.format(os.getpid())) + ret = salt.utils.jid.gen_jid({'unique_jid': True}) + self.assertEqual(ret, '20021225120000000001_{0}'.format(os.getpid())) @skipIf(NO_MOCK, NO_MOCK_REASON) def test_check_or_die(self): From 1e4691af8a9add70415a93d6f9677bba35b2b72a Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Tue, 6 Jun 2017 09:58:06 -0500 Subject: [PATCH 408/639] Add ability to set minion ID by module Fixes #15389. Add a new configuration option `minion_id_type`. Solution based on configuration option `master_type`. `eval_minion_id_func` based on `salt.minion.eval_master_func`. Signed-off-by: Sergey Kizunov Conflicts: salt/config/__init__.py --- salt/config/__init__.py | 58 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index a51be4b3e4..4dafde91d9 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -858,6 +858,13 @@ VALID_OPTS = { # Always generate minion id in lowercase. 'minion_id_lowercase': bool, + # The behavior when generating a minion ID. Can specify 'str' or 'func'. + # If 'str' is specified (the default), the 'id' option should be set to + # the minion ID or left unspecified for default minion ID generation + # behavior. If 'func' is specified, the 'id' option should be set to an + # exec module function to run to determine the minion ID. + 'minion_id_type': str, + # If set, the master will sign all publications before they are sent out 'sign_pub_messages': bool, @@ -1299,6 +1306,7 @@ DEFAULT_MINION_OPTS = { 'grains_refresh_every': 0, 'minion_id_caching': True, 'minion_id_lowercase': False, + 'minion_id_type': 'str', 'keysize': 2048, 'transport': 'zeromq', 'auth_timeout': 5, @@ -3336,6 +3344,42 @@ def _cache_id(minion_id, cache_file): log.error('Could not cache minion ID: {0}'.format(exc)) +def eval_minion_id_func(opts): + ''' + Evaluate minion ID function if 'minion_id_type' is 'func' + and return the result + ''' + if '__minion_id_func_evaluated' in opts: + return opts['id'] + + import salt.loader as loader + + # split module and function and try loading the module + mod_fun = opts['id'] + mod, fun = mod_fun.split('.') + if not opts.get('grains'): + # Get grains for use by the module + opts['grains'] = loader.grains(opts) + + try: + minion_id_mod = loader.raw_mod(opts, mod, fun) + if not minion_id_mod: + raise KeyError + # we take whatever the module returns as the minion ID + newid = minion_id_mod[mod_fun]() + if not isinstance(newid, str): + log.error('{0} returned from {1} is not a string'.format( + newid, mod_fun) + ) + sys.exit(salt.defaults.exitcodes.EX_GENERIC) + opts['__minion_id_func_evaluated'] = True + log.info('Evaluated minion ID from module: {0}'.format(mod_fun)) + return newid + except KeyError: + log.error('Failed to load module {0}'.format(mod_fun)) + sys.exit(salt.defaults.exitcodes.EX_GENERIC) + + def get_id(opts, cache_minion_id=False): ''' Guess the id of the minion. @@ -3377,13 +3421,19 @@ def get_id(opts, cache_minion_id=False): log.debug('Guessing ID. The id can be explicitly set in {0}' .format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion'))) - newid = salt.utils.network.generate_minion_id() + if opts.get('minion_id_type', 'str') == 'func': + newid = eval_minion_id_func(opts) + else: + newid = salt.utils.network.generate_minion_id() if opts.get('minion_id_lowercase'): newid = newid.lower() log.debug('Changed minion id {0} to lowercase.'.format(newid)) if '__role' in opts and opts.get('__role') == 'minion': - log.debug('Found minion id from generate_minion_id(): {0}'.format(newid)) + if opts.get('minion_id_type', 'str') == 'func': + log.debug('Found minion id from eval_minion_id_func(): {0}'.format(newid)) + else: + log.debug('Found minion id from generate_minion_id(): {0}'.format(newid)) if cache_minion_id and opts.get('minion_id_caching', True): _cache_id(newid, id_cache) is_ipv4 = salt.utils.network.is_ipv4(newid) @@ -3452,7 +3502,7 @@ def apply_minion_config(overrides=None, # No ID provided. Will getfqdn save us? using_ip_for_id = False - if not opts.get('id'): + if not opts.get('id') or opts.get('minion_id_type', 'str') == 'func': if minion_id: opts['id'] = minion_id else: @@ -3626,7 +3676,7 @@ def apply_master_config(overrides=None, defaults=None): opts['ipc_write_buffer'] = 0 using_ip_for_id = False append_master = False - if not opts.get('id'): + if not opts.get('id') or opts.get('minion_id_type', 'str') == 'func': opts['id'], using_ip_for_id = get_id( opts, cache_minion_id=None) From 09d81317fb433c826bec47b8107970e8026d304f Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Thu, 15 Jun 2017 14:05:21 -0500 Subject: [PATCH 409/639] Changes from feedback - Add comment as to why we import inside the function - Have one log.debug that prints out the appropriate function string Signed-off-by: Sergey Kizunov --- salt/config/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 4dafde91d9..45efa96ea8 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -3352,6 +3352,7 @@ def eval_minion_id_func(opts): if '__minion_id_func_evaluated' in opts: return opts['id'] + # Import 'salt.loader' here to avoid a circular dependency import salt.loader as loader # split module and function and try loading the module @@ -3430,10 +3431,9 @@ def get_id(opts, cache_minion_id=False): newid = newid.lower() log.debug('Changed minion id {0} to lowercase.'.format(newid)) if '__role' in opts and opts.get('__role') == 'minion': - if opts.get('minion_id_type', 'str') == 'func': - log.debug('Found minion id from eval_minion_id_func(): {0}'.format(newid)) - else: - log.debug('Found minion id from generate_minion_id(): {0}'.format(newid)) + log.debug('Found minion id from {0}(): {1}'.format( + opts.get('minion_id_type') == 'func' and 'eval_minion_id_func' or + 'generate_minion_id', newid)) if cache_minion_id and opts.get('minion_id_caching', True): _cache_id(newid, id_cache) is_ipv4 = salt.utils.network.is_ipv4(newid) From 88ee8ad566e0698cc5fe010b3a52831f51761b14 Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Wed, 5 Jul 2017 16:09:16 -0500 Subject: [PATCH 410/639] Add config option `id_function` Signed-off-by: Sergey Kizunov --- salt/config/__init__.py | 61 ++++++++++++++++++++------------ tests/unit/config/test_config.py | 47 ++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 22 deletions(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 45efa96ea8..17ab775fde 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -187,6 +187,16 @@ VALID_OPTS = { # A unique identifier for this daemon 'id': str, + # Use a module function to determine the unique identifier. If this is + # set and 'id' is not set, it will allow invocation of a module function + # to determine the value of 'id'. For simple invocations without function + # arguments, this may be a string that is the function name. For + # invocations with function arguments, this may be a dictionary with the + # key being the function name, and the value being an embedded dictionary + # where each key is a function argument name and each value is the + # corresponding argument value. + 'id_function': (dict, str), + # The directory to store all cache files. 'cachedir': str, @@ -858,13 +868,6 @@ VALID_OPTS = { # Always generate minion id in lowercase. 'minion_id_lowercase': bool, - # The behavior when generating a minion ID. Can specify 'str' or 'func'. - # If 'str' is specified (the default), the 'id' option should be set to - # the minion ID or left unspecified for default minion ID generation - # behavior. If 'func' is specified, the 'id' option should be set to an - # exec module function to run to determine the minion ID. - 'minion_id_type': str, - # If set, the master will sign all publications before they are sent out 'sign_pub_messages': bool, @@ -1110,6 +1113,7 @@ DEFAULT_MINION_OPTS = { 'root_dir': salt.syspaths.ROOT_DIR, 'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'), 'id': '', + 'id_function': {}, 'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'), 'append_minionid_config_dirs': [], 'cache_jobs': False, @@ -1306,7 +1310,6 @@ DEFAULT_MINION_OPTS = { 'grains_refresh_every': 0, 'minion_id_caching': True, 'minion_id_lowercase': False, - 'minion_id_type': 'str', 'keysize': 2048, 'transport': 'zeromq', 'auth_timeout': 5, @@ -3344,38 +3347,52 @@ def _cache_id(minion_id, cache_file): log.error('Could not cache minion ID: {0}'.format(exc)) -def eval_minion_id_func(opts): +def eval_id_function(opts): ''' - Evaluate minion ID function if 'minion_id_type' is 'func' - and return the result + Evaluate the function that determines the ID if the 'id_function' + option is set and return the result ''' - if '__minion_id_func_evaluated' in opts: + if opts.get('id'): return opts['id'] # Import 'salt.loader' here to avoid a circular dependency import salt.loader as loader + if isinstance(opts['id_function'], str): + mod_fun = opts['id_function'] + fun_kwargs = {} + elif isinstance(opts['id_function'], dict): + mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function'])) + if fun_kwargs is None: + fun_kwargs = {} + else: + log.error('\'id_function\' option is not a string nor a dictionary') + sys.exit(salt.defaults.exitcodes.EX_GENERIC) + # split module and function and try loading the module - mod_fun = opts['id'] mod, fun = mod_fun.split('.') if not opts.get('grains'): # Get grains for use by the module opts['grains'] = loader.grains(opts) try: - minion_id_mod = loader.raw_mod(opts, mod, fun) - if not minion_id_mod: + id_mod = loader.raw_mod(opts, mod, fun) + if not id_mod: raise KeyError # we take whatever the module returns as the minion ID - newid = minion_id_mod[mod_fun]() + newid = id_mod[mod_fun](**fun_kwargs) if not isinstance(newid, str): log.error('{0} returned from {1} is not a string'.format( newid, mod_fun) ) sys.exit(salt.defaults.exitcodes.EX_GENERIC) - opts['__minion_id_func_evaluated'] = True log.info('Evaluated minion ID from module: {0}'.format(mod_fun)) return newid + except TypeError: + log.error('Function arguments {0} are incorrect for function {1}'.format( + fun_kwargs, mod_fun) + ) + sys.exit(salt.defaults.exitcodes.EX_GENERIC) except KeyError: log.error('Failed to load module {0}'.format(mod_fun)) sys.exit(salt.defaults.exitcodes.EX_GENERIC) @@ -3422,8 +3439,8 @@ def get_id(opts, cache_minion_id=False): log.debug('Guessing ID. The id can be explicitly set in {0}' .format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion'))) - if opts.get('minion_id_type', 'str') == 'func': - newid = eval_minion_id_func(opts) + if opts.get('id_function'): + newid = eval_id_function(opts) else: newid = salt.utils.network.generate_minion_id() @@ -3432,7 +3449,7 @@ def get_id(opts, cache_minion_id=False): log.debug('Changed minion id {0} to lowercase.'.format(newid)) if '__role' in opts and opts.get('__role') == 'minion': log.debug('Found minion id from {0}(): {1}'.format( - opts.get('minion_id_type') == 'func' and 'eval_minion_id_func' or + opts.get('id_function') and 'eval_id_function' or 'generate_minion_id', newid)) if cache_minion_id and opts.get('minion_id_caching', True): _cache_id(newid, id_cache) @@ -3502,7 +3519,7 @@ def apply_minion_config(overrides=None, # No ID provided. Will getfqdn save us? using_ip_for_id = False - if not opts.get('id') or opts.get('minion_id_type', 'str') == 'func': + if not opts.get('id'): if minion_id: opts['id'] = minion_id else: @@ -3676,7 +3693,7 @@ def apply_master_config(overrides=None, defaults=None): opts['ipc_write_buffer'] = 0 using_ip_for_id = False append_master = False - if not opts.get('id') or opts.get('minion_id_type', 'str') == 'func': + if not opts.get('id'): opts['id'], using_ip_for_id = get_id( opts, cache_minion_id=None) diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index 878d3db4c9..00914ee15d 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -448,6 +448,30 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): if os.path.isdir(tempdir): shutil.rmtree(tempdir) + def test_master_id_function(self): + try: + tempdir = tempfile.mkdtemp(dir=TMP) + master_config = os.path.join(tempdir, 'master') + + with salt.utils.fopen(master_config, 'w') as fp_: + fp_.write( + 'id_function:\n' + ' test.echo:\n' + ' text: hello_world\n' + 'root_dir: {0}\n' + 'log_file: {1}\n'.format(tempdir, master_config) + ) + + # Let's load the configuration + config = sconfig.master_config(master_config) + + self.assertEqual(config['log_file'], master_config) + # 'master_config' appends '_master' to the ID + self.assertEqual(config['id'], 'hello_world_master') + finally: + if os.path.isdir(tempdir): + shutil.rmtree(tempdir) + def test_minion_file_roots_glob(self): # Config file and stub file_roots. fpath = tempfile.mktemp() @@ -508,6 +532,29 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): if os.path.isdir(tempdir): shutil.rmtree(tempdir) + def test_minion_id_function(self): + try: + tempdir = tempfile.mkdtemp(dir=TMP) + minion_config = os.path.join(tempdir, 'minion') + + with salt.utils.fopen(minion_config, 'w') as fp_: + fp_.write( + 'id_function:\n' + ' test.echo:\n' + ' text: hello_world\n' + 'root_dir: {0}\n' + 'log_file: {1}\n'.format(tempdir, minion_config) + ) + + # Let's load the configuration + config = sconfig.minion_config(minion_config) + + self.assertEqual(config['log_file'], minion_config) + self.assertEqual(config['id'], 'hello_world') + finally: + if os.path.isdir(tempdir): + shutil.rmtree(tempdir) + def test_syndic_config(self): syndic_conf_path = self.get_config_file_path('syndic') minion_conf_path = self.get_config_file_path('minion') From 9adb3f9c72888197f3f16b6f50dc4c0bd873b772 Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Thu, 6 Jul 2017 06:28:08 -0500 Subject: [PATCH 411/639] Updates due to feedback Signed-off-by: Sergey Kizunov --- salt/config/__init__.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 17ab775fde..8efab6dc1a 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -3347,7 +3347,7 @@ def _cache_id(minion_id, cache_file): log.error('Could not cache minion ID: {0}'.format(exc)) -def eval_id_function(opts): +def call_id_function(opts): ''' Evaluate the function that determines the ID if the 'id_function' option is set and return the result @@ -3366,7 +3366,7 @@ def eval_id_function(opts): if fun_kwargs is None: fun_kwargs = {} else: - log.error('\'id_function\' option is not a string nor a dictionary') + log.error('\'id_function\' option is neither a string nor a dictionary') sys.exit(salt.defaults.exitcodes.EX_GENERIC) # split module and function and try loading the module @@ -3381,9 +3381,9 @@ def eval_id_function(opts): raise KeyError # we take whatever the module returns as the minion ID newid = id_mod[mod_fun](**fun_kwargs) - if not isinstance(newid, str): - log.error('{0} returned from {1} is not a string'.format( - newid, mod_fun) + if not isinstance(newid, str) or not newid: + log.error('Function {0} returned value "{1}" of type {2} instead of string'.format( + mod_fun, newid, type(newid)) ) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated minion ID from module: {0}'.format(mod_fun)) @@ -3440,7 +3440,7 @@ def get_id(opts, cache_minion_id=False): .format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion'))) if opts.get('id_function'): - newid = eval_id_function(opts) + newid = call_id_function(opts) else: newid = salt.utils.network.generate_minion_id() @@ -3448,9 +3448,12 @@ def get_id(opts, cache_minion_id=False): newid = newid.lower() log.debug('Changed minion id {0} to lowercase.'.format(newid)) if '__role' in opts and opts.get('__role') == 'minion': - log.debug('Found minion id from {0}(): {1}'.format( - opts.get('id_function') and 'eval_id_function' or - 'generate_minion_id', newid)) + if opts.get('id_function'): + log.debug('Found minion id from external function {0}: {1}'.format( + opts['id_function'], newid)) + else: + log.debug('Found minion id from generate_minion_id(): {0}'.format( + newid)) if cache_minion_id and opts.get('minion_id_caching', True): _cache_id(newid, id_cache) is_ipv4 = salt.utils.network.is_ipv4(newid) From d82e406f156bcec7223832d19fe8fca7681032bb Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Fri, 28 Jul 2017 17:10:34 -0500 Subject: [PATCH 412/639] Fix loader.py's raw_mod() to look in all module dirs Previously, it wasn't looking in module dirs defined via the `module_dirs` config option. Fix this issue. Signed-off-by: Sergey Kizunov --- salt/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/loader.py b/salt/loader.py index 1c03e475be..20d9b1339c 100644 --- a/salt/loader.py +++ b/salt/loader.py @@ -270,7 +270,7 @@ def raw_mod(opts, name, functions, mod='modules'): testmod['test.ping']() ''' loader = LazyLoader( - _module_dirs(opts, mod, 'rawmodule'), + _module_dirs(opts, mod, 'module'), opts, tag='rawmodule', virtual_enable=False, From 2d6e8e7a7c7bbfcfbe4aa743e26ff299c77248ef Mon Sep 17 00:00:00 2001 From: spenceation Date: Thu, 7 Sep 2017 15:11:58 -0400 Subject: [PATCH 413/639] Added high availability execution module for PANOS proxies. Fixes XPATH formatting on set commands. Fixes PANOS proxy ping function. --- salt/modules/panos.py | 128 ++++++++++++++++++++++++++++++++++++++---- salt/proxy/panos.py | 2 +- salt/states/panos.py | 24 ++++++++ 3 files changed, 141 insertions(+), 13 deletions(-) diff --git a/salt/modules/panos.py b/salt/modules/panos.py index 6e8ffd556a..5543c18132 100644 --- a/salt/modules/panos.py +++ b/salt/modules/panos.py @@ -499,6 +499,76 @@ def get_ha_config(): return __proxy__['panos.call'](query) +def get_ha_link(): + ''' + Show high-availability link-monitoring state. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ha_link + + ''' + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_ha_path(): + ''' + Show high-availability path-monitoring state. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ha_path + + ''' + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_ha_state(): + ''' + Show high-availability state information. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ha_state + + ''' + + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + +def get_ha_transitions(): + ''' + Show high-availability transition statistic information. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_ha_transitions + + ''' + + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + def get_hostname(): ''' Get the hostname of the device. @@ -856,6 +926,23 @@ def get_platform(): return __proxy__['panos.call'](query) +def get_session_info(): + ''' + Show device session statistics. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_session_info + + ''' + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + def get_snmp_config(): ''' Get the SNMP configuration from the device. @@ -979,6 +1066,23 @@ def get_system_state(filter=None): return __proxy__['panos.call'](query) +def get_uncommitted_changes(): + ''' + Retrieve a list of all uncommitted changes on the device. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_uncommitted_changes + + ''' + query = {'type': 'op', + 'cmd': ''} + + return __proxy__['panos.call'](query) + + def get_users_config(): ''' Get the local administrative user account configuration. @@ -1261,7 +1365,7 @@ def set_authentication_profile(profile=None, deploy=False): query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/' - 'authentication-profile/', + 'authentication-profile', 'element': '{0}'.format(profile)} ret.update(__proxy__['panos.call'](query)) @@ -1297,7 +1401,7 @@ def set_hostname(hostname=None, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system', 'element': '{0}'.format(hostname)} ret.update(__proxy__['panos.call'](query)) @@ -1337,7 +1441,7 @@ def set_management_icmp(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1377,7 +1481,7 @@ def set_management_http(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1417,7 +1521,7 @@ def set_management_https(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1457,7 +1561,7 @@ def set_management_ocsp(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1497,7 +1601,7 @@ def set_management_snmp(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1537,7 +1641,7 @@ def set_management_ssh(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1577,7 +1681,7 @@ def set_management_telnet(enabled=True, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service', 'element': '{0}'.format(value)} ret.update(__proxy__['panos.call'](query)) @@ -1770,8 +1874,8 @@ def set_permitted_ip(address=None, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip/', - 'element': ''.format(address)} + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/permitted-ip', + 'element': ''.format(address)} ret.update(__proxy__['panos.call'](query)) @@ -1806,7 +1910,7 @@ def set_timezone(tz=None, deploy=False): query = {'type': 'config', 'action': 'set', - 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone/', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone', 'element': '{0}'.format(tz)} ret.update(__proxy__['panos.call'](query)) diff --git a/salt/proxy/panos.py b/salt/proxy/panos.py index 324a2e8ea3..f7fb8f574c 100644 --- a/salt/proxy/panos.py +++ b/salt/proxy/panos.py @@ -402,7 +402,7 @@ def ping(): ''' try: query = {'type': 'op', 'cmd': ''} - if 'result' in call(query)['system']: + if 'system' in call(query): return True else: return False diff --git a/salt/states/panos.py b/salt/states/panos.py index ee30325340..c453cace5c 100644 --- a/salt/states/panos.py +++ b/salt/states/panos.py @@ -54,6 +54,30 @@ commit to the device. panos/commit: panos.commit +Version Specific Configurations +=============================== +Palo Alto devices running different versions will have different supported features and different command structures. In +order to account for this, the proxy module can be leveraged to check if the panos device is at a specific revision +level. + +The proxy['panos.is_required_version'] method will check if a panos device is currently running a version equal or +greater than the passed version. For example, proxy['panos.is_required_version']('7.0.0') would match both 7.1.0 and +8.0.0. + +.. code-block:: yaml + + {% if proxy['panos.is_required_version']('8.0.0') %} + panos/deviceconfig/system/motd-and-banner: + panos.set_config: + - xpath: /config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/motd-and-banner + - value: | + BANNER TEXT + color2 + color18 + yes + - commit: False + {% endif %} + .. seealso:: :prox:`Palo Alto Proxy Module ` From 6721afa330477b3f39da6adcce4ddb941e9608d1 Mon Sep 17 00:00:00 2001 From: Heghedus Razvan Date: Thu, 7 Sep 2017 13:13:35 +0300 Subject: [PATCH 414/639] nilrt_ip: Fix DNS Configuring DNS in connman is done by using Nameservers section not Domain section Signed-off-by: Heghedus Razvan --- salt/modules/nilrt_ip.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/salt/modules/nilrt_ip.py b/salt/modules/nilrt_ip.py index 18b4470e73..a2450b067d 100644 --- a/salt/modules/nilrt_ip.py +++ b/salt/modules/nilrt_ip.py @@ -183,10 +183,10 @@ def _get_service_info(service): except Exception as exc: log.warning('Unable to get IPv6 {0} for service {1}\n'.format(info, service)) - domains = [] - for x in service_info.get_property('Domains'): - domains.append(str(x)) - data['ipv4']['dns'] = domains + nameservers = [] + for x in service_info.get_property('Nameservers'): + nameservers.append(str(x)) + data['ipv4']['dns'] = nameservers else: data['up'] = False @@ -351,13 +351,13 @@ def set_dhcp_linklocal_all(interface): ipv4['Gateway'] = dbus.String('', variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) - service.set_property('Domains.Configuration', ['']) # reset domains list + service.set_property('Nameservers.Configuration', ['']) # reset nameservers list except Exception as exc: raise salt.exceptions.CommandExecutionError('Couldn\'t set dhcp linklocal for service: {0}\nError: {1}\n'.format(service, exc)) return True -def set_static_all(interface, address, netmask, gateway, domains): +def set_static_all(interface, address, netmask, gateway, nameservers): ''' Configure specified adapter to use ipv4 manual settings @@ -365,7 +365,7 @@ def set_static_all(interface, address, netmask, gateway, domains): :param str address: ipv4 address :param str netmask: ipv4 netmask :param str gateway: ipv4 gateway - :param str domains: list of domains servers separated by spaces + :param str nameservers: list of nameservers servers separated by spaces :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool @@ -373,7 +373,7 @@ def set_static_all(interface, address, netmask, gateway, domains): .. code-block:: bash - salt '*' ip.set_static_all interface-label address netmask gateway domains + salt '*' ip.set_static_all interface-label address netmask gateway nameservers ''' service = _interface_to_service(interface) if not service: @@ -381,9 +381,15 @@ def set_static_all(interface, address, netmask, gateway, domains): validate, msg = _validate_ipv4([address, netmask, gateway]) if not validate: raise salt.exceptions.CommandExecutionError(msg) - validate, msg = _space_delimited_list(domains) - if not validate: - raise salt.exceptions.CommandExecutionError(msg) + if nameservers: + validate, msg = _space_delimited_list(nameservers) + if not validate: + raise salt.exceptions.CommandExecutionError(msg) + if not isinstance(nameservers, list): + nameservers = nameservers.split(' ') + service = _interface_to_service(interface) + if not service: + raise salt.exceptions.CommandExecutionError('Invalid interface name: {0}'.format(interface)) service = pyconnman.ConnService(_add_path(service)) ipv4 = service.get_property('IPv4.Configuration') ipv4['Method'] = dbus.String('manual', variant_level=1) @@ -392,10 +398,8 @@ def set_static_all(interface, address, netmask, gateway, domains): ipv4['Gateway'] = dbus.String('{0}'.format(gateway), variant_level=1) try: service.set_property('IPv4.Configuration', ipv4) - if not isinstance(domains, list): - dns = domains.split(' ') - domains = dns - service.set_property('Domains.Configuration', [dbus.String('{0}'.format(d)) for d in domains]) + if nameservers: + service.set_property('Nameservers.Configuration', [dbus.String('{0}'.format(d)) for d in nameservers]) except Exception as exc: raise salt.exceptions.CommandExecutionError('Couldn\'t set manual settings for service: {0}\nError: {1}\n'.format(service, exc)) return True From 4a8d7e522ce94961cb08140cdb50b2d30404d276 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 14:41:56 -0600 Subject: [PATCH 415/639] Fix tests, Use full path to salt.utils.which --- salt/modules/mount.py | 5 ++--- tests/unit/modules/test_mount.py | 25 ++++++++++++++++++------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index 1418bc7673..1749c82ccd 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -11,7 +11,6 @@ import logging # Import salt libs import salt.utils -from salt.utils import which as _which from salt.exceptions import CommandNotFoundError, CommandExecutionError # Import 3rd-party libs @@ -1114,12 +1113,12 @@ def is_fuse_exec(cmd): salt '*' mount.is_fuse_exec sshfs ''' - cmd_path = _which(cmd) + cmd_path = salt.utils.which(cmd) # No point in running ldd on a command that doesn't exist if not cmd_path: return False - elif not _which('ldd'): + elif not salt.utils.which('ldd'): raise CommandNotFoundError('ldd') out = __salt__['cmd.run']('ldd {0}'.format(cmd_path), python_shell=False) diff --git a/tests/unit/modules/test_mount.py b/tests/unit/modules/test_mount.py index 4f72f28a1a..1e42048d14 100644 --- a/tests/unit/modules/test_mount.py +++ b/tests/unit/modules/test_mount.py @@ -19,7 +19,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.utils -from salt.exceptions import CommandExecutionError +from salt.exceptions import CommandExecutionError, CommandNotFoundError import salt.modules.mount as mount MOCK_SHELL_FILE = 'A B C D F G\n' @@ -242,15 +242,26 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): ''' Returns true if the command passed is a fuse mountable application ''' - with patch.object(salt.utils, 'which', return_value=None): + # Return False if fuse doesn't exist + with patch('salt.utils.which', return_value=None): self.assertFalse(mount.is_fuse_exec('cmd')) - with patch.object(salt.utils, 'which', return_value=True): - self.assertFalse(mount.is_fuse_exec('cmd')) + # Return CommandNotFoundError if fuse exists, but ldd doesn't exist + with patch('salt.utils.which', side_effect=[True, False]): + self.assertRaises(CommandNotFoundError, mount.is_fuse_exec, 'cmd') - mock = MagicMock(side_effect=[1, 0]) - with patch.object(salt.utils, 'which', mock): - self.assertFalse(mount.is_fuse_exec('cmd')) + # Return False if fuse exists, ldd exists, but libfuse is not in the + # return + with patch('salt.utils.which', side_effect=[True, True]): + mock = MagicMock(return_value='not correct') + with patch.dict(mount.__salt__, {'cmd.run': mock}): + self.assertFalse(mount.is_fuse_exec('cmd')) + + # Return True if fuse exists, ldd exists, and libfuse is in the return + with patch('salt.utils.which', side_effect=[True, True]): + mock = MagicMock(return_value='contains libfuse') + with patch.dict(mount.__salt__, {'cmd.run': mock}): + self.assertTrue(mount.is_fuse_exec('cmd')) def test_swaps(self): ''' From 6257aa964ad5f2831fffa4799ef2789c7b9fc783 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 15:18:14 -0600 Subject: [PATCH 416/639] Fix `unit.modules.test_pam` for Windows Mocks os.path.exists --- tests/unit/modules/test_pam.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_pam.py b/tests/unit/modules/test_pam.py index 9cb2bc4577..492aabdd90 100644 --- a/tests/unit/modules/test_pam.py +++ b/tests/unit/modules/test_pam.py @@ -34,7 +34,8 @@ class PamTestCase(TestCase): ''' Test if the parsing function works ''' - with patch('salt.utils.fopen', mock_open(read_data=MOCK_FILE)): + with patch('os.path.exists', return_value=True), \ + patch('salt.utils.fopen', mock_open(read_data=MOCK_FILE)): self.assertListEqual(pam.read_file('/etc/pam.d/login'), [{'arguments': [], 'control_flag': 'ok', 'interface': 'ok', 'module': 'ignore'}]) From 8e3e897ee26ab3af3efaf9f8ac9b55db6227aad9 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 15:27:11 -0600 Subject: [PATCH 417/639] Fix `unit.modules.test_parted` for Windows Mock salt.utils.is_windows to be False so that the __virtual__ tests will run --- tests/unit/modules/test_parted.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/unit/modules/test_parted.py b/tests/unit/modules/test_parted.py index 991c6787a2..abd0b5d91f 100644 --- a/tests/unit/modules/test_parted.py +++ b/tests/unit/modules/test_parted.py @@ -49,21 +49,24 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin): def test_virtual_bails_without_parted(self): '''If parted not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "parted"): + with patch('salt.utils.which', lambda exe: not exe == "parted"),\ + patch('salt.utils.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load parted binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_lsblk(self): '''If lsblk not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "lsblk"): + with patch('salt.utils.which', lambda exe: not exe == "lsblk"),\ + patch('salt.utils.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load lsblk binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_partprobe(self): '''If partprobe not in PATH, __virtual__ shouldn't register module''' - with patch('salt.utils.which', lambda exe: not exe == "partprobe"): + with patch('salt.utils.which', lambda exe: not exe == "partprobe"),\ + patch('salt.utils.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load partprobe binary is not in the path.') self.assertEqual(err, ret) From 78e39a1b9dba1cda3a5c829787fdafd4767fbe93 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 15:34:57 -0600 Subject: [PATCH 418/639] Fix `unit.modules.test_pw_group` for Windows Skip `test_info` and `test_getent` because they require grp which is not available on Windows --- tests/unit/modules/test_pw_group.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit/modules/test_pw_group.py b/tests/unit/modules/test_pw_group.py index 3d21bbd43c..07854e0aea 100644 --- a/tests/unit/modules/test_pw_group.py +++ b/tests/unit/modules/test_pw_group.py @@ -18,6 +18,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.pw_group as pw_group +import salt.utils @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -44,6 +45,7 @@ class PwGroupTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(pw_group.__salt__, {'cmd.run_all': mock}): self.assertTrue(pw_group.delete('a')) + @skipIf(salt.utils.is_windows(), 'grp not available on Windows') def test_info(self): ''' Tests to return information about a group @@ -57,6 +59,7 @@ class PwGroupTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(pw_group.grinfo, mock): self.assertDictEqual(pw_group.info('name'), {}) + @skipIf(salt.utils.is_windows(), 'grp not available on Windows') def test_getent(self): ''' Tests for return info on all groups From 531ce8022b1aa05ec31a1420554f3d5812caef58 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 15:44:35 -0600 Subject: [PATCH 419/639] Fix `unit.modules.test_qemu_nbd` for Windows Use os.sep --- tests/unit/modules/test_qemu_nbd.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/unit/modules/test_qemu_nbd.py b/tests/unit/modules/test_qemu_nbd.py index ec6ec84587..59361c0050 100644 --- a/tests/unit/modules/test_qemu_nbd.py +++ b/tests/unit/modules/test_qemu_nbd.py @@ -80,15 +80,14 @@ class QemuNbdTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(qemu_nbd.__salt__, {'cmd.run': mock}): self.assertEqual(qemu_nbd.init('/srv/image.qcow2'), '') - with patch.object(os.path, 'isfile', mock): - with patch.object(glob, 'glob', - MagicMock(return_value=['/dev/nbd0'])): - with patch.dict(qemu_nbd.__salt__, - {'cmd.run': mock, - 'mount.mount': mock, - 'cmd.retcode': MagicMock(side_effect=[1, 0])}): - self.assertDictEqual(qemu_nbd.init('/srv/image.qcow2'), - {'{0}/nbd/nbd0/nbd0'.format(tempfile.gettempdir()): '/dev/nbd0'}) + with patch.object(os.path, 'isfile', mock),\ + patch.object(glob, 'glob', MagicMock(return_value=['/dev/nbd0'])),\ + patch.dict(qemu_nbd.__salt__, + {'cmd.run': mock, + 'mount.mount': mock, + 'cmd.retcode': MagicMock(side_effect=[1, 0])}): + expected = {os.sep.join([tempfile.gettempdir(), 'nbd', 'nbd0', 'nbd0']): '/dev/nbd0'} + self.assertDictEqual(qemu_nbd.init('/srv/image.qcow2'), expected) # 'clear' function tests: 1 From f6da23e1aad54a647b4a4428d505938e1de888bb Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 16:33:30 -0600 Subject: [PATCH 420/639] Properly handle timestamp conversion --- salt/modules/redismod.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/salt/modules/redismod.py b/salt/modules/redismod.py index ee15a45a03..a95e1b9f3f 100644 --- a/salt/modules/redismod.py +++ b/salt/modules/redismod.py @@ -18,6 +18,8 @@ Module to provide redis functionality to Salt # Import Python libs from __future__ import absolute_import from salt.ext.six.moves import zip +from salt.ext import six +from datetime import datetime # Import third party libs try: @@ -513,8 +515,14 @@ def lastsave(host=None, port=None, db=None, password=None): salt '*' redis.lastsave ''' + # Use of %s to get the timestamp is not supported by Python. The reason it + # works is because it's passed to the system strftime which may not support + # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) - return int(server.lastsave().strftime("%s")) + if six.PY2: + return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) + else: + return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): From 6ceb895a843e1188eff4dafe26a3abbd0c3550f3 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Sep 2017 16:51:10 -0600 Subject: [PATCH 421/639] Use os.path.join for paths --- tests/unit/modules/test_seed.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/tests/unit/modules/test_seed.py b/tests/unit/modules/test_seed.py index 39be4a47cb..ea00a25d90 100644 --- a/tests/unit/modules/test_seed.py +++ b/tests/unit/modules/test_seed.py @@ -47,14 +47,19 @@ class SeedTestCase(TestCase, LoaderModuleMockMixin): ''' Test to update and get the random script to a random place ''' - with patch.dict(seed.__salt__, - {'config.gather_bootstrap_script': MagicMock(return_value='BS_PATH/BS')}): - with patch.object(uuid, 'uuid4', return_value='UUID'): - with patch.object(os.path, 'exists', return_value=True): - with patch.object(os, 'chmod', return_value=None): - with patch.object(shutil, 'copy', return_value=None): - self.assertEqual(seed.prep_bootstrap('MPT'), ('MPT/tmp/UUID/BS', '/tmp/UUID')) - self.assertEqual(seed.prep_bootstrap('/MPT'), ('/MPT/tmp/UUID/BS', '/tmp/UUID')) + with patch.dict(seed.__salt__, {'config.gather_bootstrap_script': MagicMock(return_value=os.path.join('BS_PATH', 'BS'))}),\ + patch.object(uuid, 'uuid4', return_value='UUID'),\ + patch.object(os.path, 'exists', return_value=True),\ + patch.object(os, 'chmod', return_value=None),\ + patch.object(shutil, 'copy', return_value=None): + + expect = (os.path.join('MPT', 'tmp', 'UUID', 'BS'), + os.sep + os.path.join('tmp', 'UUID')) + self.assertEqual(seed.prep_bootstrap('MPT'), expect) + + expect = (os.sep + os.path.join('MPT', 'tmp', 'UUID', 'BS'), + os.sep + os.path.join('tmp', 'UUID')) + self.assertEqual(seed.prep_bootstrap(os.sep + 'MPT'), expect) def test_apply_(self): ''' From 70642e495db3f14d7b4e5db84e5e5ade88b18536 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Thu, 7 Sep 2017 17:04:39 +0200 Subject: [PATCH 422/639] better qemu_static parameter mangle in deboostrap management, tests --- salt/modules/genesis.py | 17 ++++++++++++----- tests/unit/modules/genesis_test.py | 8 +++++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index eceaba5bb5..aab6aebb1e 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -17,10 +17,10 @@ except ImportError: from pipes import quote as _cmd_quote # Import salt libs -import salt.utils import salt.utils.yast import salt.utils.preseed import salt.utils.kickstart +import salt.utils.validate.path import salt.syspaths from salt.exceptions import SaltInvocationError @@ -403,6 +403,11 @@ def _bootstrap_deb( log.error('Required tool debootstrap is not installed.') return False + if static_qemu and not salt.utils.validate.path.is_readable(static_qemu): + log.error('Required tool qemu not ' + 'present/readable at: {0}'.format(static_qemu)) + return False + if isinstance(pkgs, (list, tuple)): pkgs = ','.join(pkgs) if isinstance(exclude_pkgs, (list, tuple)): @@ -427,11 +432,13 @@ def _bootstrap_deb( __salt__['cmd.run'](deb_args, python_shell=False) - __salt__['cmd.run']( - 'cp {qemu} {root}/usr/bin/'.format( - qemu=_cmd_quote(static_qemu), root=_cmd_quote(root) + if static_qemu: + __salt__['cmd.run']( + 'cp {qemu} {root}/usr/bin/'.format( + qemu=_cmd_quote(static_qemu), root=_cmd_quote(root) + ) ) - ) + env = {'DEBIAN_FRONTEND': 'noninteractive', 'DEBCONF_NONINTERACTIVE_SEEN': 'true', 'LC_ALL': 'C', diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index 784bb8ad84..303acaefd7 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -97,9 +97,11 @@ class GenesisTestCase(TestCase): 'cmd.run': MagicMock(), 'disk.blkid': MagicMock(return_value={})}): with patch('salt.modules.genesis.salt.utils.which', return_value=True): - param_set['params'].update(common_parms) - self.assertEqual(genesis.bootstrap(**param_set['params']), None) - genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False) + with patch('salt.modules.genesis.salt.utils.validate.path.is_readable', + return_value=True): + param_set['params'].update(common_parms) + self.assertEqual(genesis.bootstrap(**param_set['params']), None) + genesis.__salt__['cmd.run'].assert_any_call(param_set['cmd'], python_shell=False) with patch.object(genesis, '_bootstrap_pacman', return_value='A') as pacman_patch: with patch.dict(genesis.__salt__, {'mount.umount': MagicMock(), From 51c7a1ba00441572c69da95cbd9db1118ea5e1f8 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Fri, 8 Sep 2017 10:27:29 +0200 Subject: [PATCH 423/639] only check if static_qemu is_executable() --- salt/modules/genesis.py | 2 +- salt/utils/validate/path.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/modules/genesis.py b/salt/modules/genesis.py index aab6aebb1e..24653ddac2 100644 --- a/salt/modules/genesis.py +++ b/salt/modules/genesis.py @@ -403,7 +403,7 @@ def _bootstrap_deb( log.error('Required tool debootstrap is not installed.') return False - if static_qemu and not salt.utils.validate.path.is_readable(static_qemu): + if static_qemu and not salt.utils.validate.path.is_executable(static_qemu): log.error('Required tool qemu not ' 'present/readable at: {0}'.format(static_qemu)) return False diff --git a/salt/utils/validate/path.py b/salt/utils/validate/path.py index 1385b9bbce..87f2d789c7 100644 --- a/salt/utils/validate/path.py +++ b/salt/utils/validate/path.py @@ -64,3 +64,14 @@ def is_readable(path): # The path does not exist return False + + +def is_executable(path): + ''' + Check if a given path is executable by the current user. + + :param path: The path to check + :returns: True or False + ''' + + return os.access(path, os.X_OK) From 8d79ee2d49aaaec06134a0271bc15a7a262b0a8f Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Wed, 23 Aug 2017 12:30:50 +0200 Subject: [PATCH 424/639] test system group creation --- tests/integration/modules/test_groupadd.py | 76 ++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tests/integration/modules/test_groupadd.py b/tests/integration/modules/test_groupadd.py index ed75916ba3..9963793ca1 100644 --- a/tests/integration/modules/test_groupadd.py +++ b/tests/integration/modules/test_groupadd.py @@ -11,6 +11,9 @@ from tests.support.helpers import destructiveTest, skip_if_not_root # Import 3rd-party libs from salt.ext.six.moves import range +import os +import grp +from salt import utils @skip_if_not_root @@ -57,6 +60,43 @@ class GroupModuleTest(ModuleCase): for x in range(size) ) + def __get_system_group_gid_range(self): + ''' + Returns (SYS_GID_MIN, SYS_GID_MAX) + ''' + defs_file = '/etc/login.defs' + if os.path.exists(defs_file): + with utils.fopen(defs_file) as defs_fd: + login_defs = dict([x.split() + for x in defs_fd.readlines() + if x.strip() + and not x.strip().startswith('#')]) + else: + login_defs = {'SYS_GID_MIN': 101, + 'SYS_GID_MAX': 999} + + gid_min = login_defs.get('SYS_GID_MIN', 101) + gid_max = login_defs.get('SYS_GID_MAX', + int(login_defs.get('GID_MIN', 1000)) - 1) + + return gid_min, gid_max + + def __get_free_system_gid(self): + ''' + Find a free system gid + ''' + + gid_min, gid_max = self.__get_system_group_gid_range() + + busy_gids = [x.gr_gid + for x in grp.getgrall() + if gid_min <= x.gr_gid <= gid_max] + + # find free system gid + for gid in range(gid_min, gid_max + 1): + if gid not in busy_gids: + return gid + @destructiveTest def test_add(self): ''' @@ -70,6 +110,42 @@ class GroupModuleTest(ModuleCase): #try adding the group again self.assertFalse(self.run_function('group.add', [self._group, self._gid])) + @destructiveTest + def test_add_system_group(self): + ''' + Test the add group function with system=True + ''' + + gid_min, gid_max = self.__get_system_group_gid_range() + + # add a new system group + self.assertTrue(self.run_function('group.add', + [self._group, None, True])) + group_info = self.run_function('group.info', [self._group]) + self.assertEqual(group_info['name'], self._group) + self.assertTrue(gid_min <= group_info['gid'] <= gid_max) + #try adding the group again + self.assertFalse(self.run_function('group.add', + [self._group])) + + @destructiveTest + def test_add_system_group_gid(self): + ''' + Test the add group function with system=True and a specific gid + ''' + + gid = self.__get_free_system_gid() + + # add a new system group + self.assertTrue(self.run_function('group.add', + [self._group, gid, True])) + group_info = self.run_function('group.info', [self._group]) + self.assertEqual(group_info['name'], self._group) + self.assertEqual(group_info['gid'], gid) + #try adding the group again + self.assertFalse(self.run_function('group.add', + [self._group, gid])) + @destructiveTest def test_delete(self): ''' From e93a962980e64afa6ba5486e86d1df577d60de3d Mon Sep 17 00:00:00 2001 From: matt Date: Fri, 8 Sep 2017 17:10:07 +0200 Subject: [PATCH 425/639] Fix env_order in state.py Fixes #42165 --- salt/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/state.py b/salt/state.py index 44fb73b97d..0f625c8fe2 100644 --- a/salt/state.py +++ b/salt/state.py @@ -3100,7 +3100,7 @@ class BaseHighState(object): Returns: {'saltenv': ['state1', 'state2', ...]} ''' - matches = {} + matches = DefaultOrderedDict(OrderedDict) # pylint: disable=cell-var-from-loop for saltenv, body in six.iteritems(top): if self.opts['environment']: From 54dd4d3ef774e26d92bbd1bdf1f2c835e39d7287 Mon Sep 17 00:00:00 2001 From: Nathan Fish Date: Fri, 8 Sep 2017 11:59:27 -0500 Subject: [PATCH 426/639] nfs_exports: linting and versionadded --- salt/states/nfs_export.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/states/nfs_export.py b/salt/states/nfs_export.py index 52237c32d6..7f4f488b0a 100644 --- a/salt/states/nfs_export.py +++ b/salt/states/nfs_export.py @@ -3,6 +3,8 @@ Management of NFS exports =============================================== +.. versionadded:: Oxygen + To ensure an NFS export exists: .. code-block:: yaml @@ -55,6 +57,7 @@ To ensure an NFS export is absent: - name: '/srv/nfs' ''' +from __future__ import absolute_import import salt.utils.path @@ -72,6 +75,7 @@ def __virtual__(): 'the exportfs binary is not in the path' ) + def present(name, clients=None, hosts=None, From 496f14a7e7b5fca1e3c84291bf5d94fe20060610 Mon Sep 17 00:00:00 2001 From: Alessandro -oggei- Ogier Date: Fri, 8 Sep 2017 21:37:09 +0200 Subject: [PATCH 427/639] forgot to mock the proper one --- tests/unit/modules/genesis_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/genesis_test.py b/tests/unit/modules/genesis_test.py index 303acaefd7..5fe7c833c7 100644 --- a/tests/unit/modules/genesis_test.py +++ b/tests/unit/modules/genesis_test.py @@ -97,7 +97,7 @@ class GenesisTestCase(TestCase): 'cmd.run': MagicMock(), 'disk.blkid': MagicMock(return_value={})}): with patch('salt.modules.genesis.salt.utils.which', return_value=True): - with patch('salt.modules.genesis.salt.utils.validate.path.is_readable', + with patch('salt.modules.genesis.salt.utils.validate.path.is_executable', return_value=True): param_set['params'].update(common_parms) self.assertEqual(genesis.bootstrap(**param_set['params']), None) From 58378866e53bac9379a66ad1f485d5e6739f3374 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 8 Sep 2017 14:10:46 -0600 Subject: [PATCH 428/639] make cache dirs when spm starts --- salt/cli/spm.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/cli/spm.py b/salt/cli/spm.py index 3d347c80a8..303e5ce65f 100644 --- a/salt/cli/spm.py +++ b/salt/cli/spm.py @@ -14,7 +14,7 @@ from __future__ import absolute_import # Import Salt libs import salt.spm import salt.utils.parsers as parsers -from salt.utils.verify import verify_log +from salt.utils.verify import verify_log, verify_env class SPM(parsers.SPMParser): @@ -29,6 +29,10 @@ class SPM(parsers.SPMParser): ui = salt.spm.SPMCmdlineInterface() self.parse_args() self.setup_logfile_logger() + v_dirs = [ + self.config['cachedir'], + ] + verify_env(v_dirs, self.config['user'],) verify_log(self.config) client = salt.spm.SPMClient(ui, self.config) client.run(self.args) From f85bf8c18f2102e9456f9f52090a4046821be795 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Fri, 8 Sep 2017 15:07:44 -0600 Subject: [PATCH 429/639] Revert "Reduce fileclient.get_file latency by merging _file_find and _file_hash" This reverts commit 94c62388e792884cebc11095db20e3db81fa1348. --- salt/fileclient.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/salt/fileclient.py b/salt/fileclient.py index 2b4484211c..fc396fcc56 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -1270,10 +1270,10 @@ class RemoteClient(Client): hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(path, form=hash_type) ret['hash_type'] = hash_type - return ret, list(os.stat(path)) + return ret load = {'path': path, 'saltenv': saltenv, - 'cmd': '_file_hash_and_stat'} + 'cmd': '_file_hash'} return self.channel.send(load) def hash_file(self, path, saltenv='base'): @@ -1282,14 +1282,33 @@ class RemoteClient(Client): master file server prepend the path with salt:// otherwise, prepend the file with / for a local file. ''' - return self.__hash_and_stat_file(path, saltenv)[0] + return self.__hash_and_stat_file(path, saltenv) def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' - return self.__hash_and_stat_file(path, saltenv) + hash_result = self.hash_file(path, saltenv) + try: + path = self._check_proto(path) + except MinionError as err: + if not os.path.isfile(path): + return hash_result, None + else: + try: + return hash_result, list(os.stat(path)) + except Exception: + return hash_result, None + load = {'path': path, + 'saltenv': saltenv, + 'cmd': '_file_find'} + fnd = self.channel.send(load) + try: + stat_result = fnd.get('stat') + except AttributeError: + stat_result = None + return hash_result, stat_result def list_env(self, saltenv='base'): ''' From 137962733477d7d71f0ee5d10d6edce2bf57f6b4 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 8 Sep 2017 15:36:52 -0600 Subject: [PATCH 430/639] Fix `unit.cloud.clouds.test_ec2` for Windows Mock instead of create tempfile --- tests/unit/cloud/clouds/test_ec2.py | 41 +++++++++++++---------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/tests/unit/cloud/clouds/test_ec2.py b/tests/unit/cloud/clouds/test_ec2.py index 9ffd74d47b..4f77b14a1b 100644 --- a/tests/unit/cloud/clouds/test_ec2.py +++ b/tests/unit/cloud/clouds/test_ec2.py @@ -2,42 +2,39 @@ # Import Python libs from __future__ import absolute_import -import os -import tempfile # Import Salt Libs from salt.cloud.clouds import ec2 from salt.exceptions import SaltCloudSystemExit # Import Salt Testing Libs -from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, PropertyMock @skipIf(NO_MOCK, NO_MOCK_REASON) -class EC2TestCase(TestCase, LoaderModuleMockMixin): +class EC2TestCase(TestCase): ''' Unit TestCase for salt.cloud.clouds.ec2 module. ''' - def setup_loader_modules(self): - return {ec2: {}} - def test__validate_key_path_and_mode(self): - with tempfile.NamedTemporaryFile() as f: - key_file = f.name - os.chmod(key_file, 0o644) - self.assertRaises(SaltCloudSystemExit, - ec2._validate_key_path_and_mode, - key_file) - os.chmod(key_file, 0o600) - self.assertTrue(ec2._validate_key_path_and_mode(key_file)) - os.chmod(key_file, 0o400) - self.assertTrue(ec2._validate_key_path_and_mode(key_file)) + # Key file exists + with patch('os.path.exists', return_value=True): + with patch('os.stat') as patched_stat: - # tmp file removed - self.assertRaises(SaltCloudSystemExit, - ec2._validate_key_path_and_mode, - key_file) + type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o644) + self.assertRaises( + SaltCloudSystemExit, ec2._validate_key_path_and_mode, 'key_file') + + type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o600) + self.assertTrue(ec2._validate_key_path_and_mode('key_file')) + + type(patched_stat.return_value).st_mode = PropertyMock(return_value=0o400) + self.assertTrue(ec2._validate_key_path_and_mode('key_file')) + + # Key file does not exist + with patch('os.path.exists', return_value=False): + self.assertRaises( + SaltCloudSystemExit, ec2._validate_key_path_and_mode, 'key_file') From b2cea18d1368cb1df1e53b033f61396209cd6f0f Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 8 Sep 2017 15:53:07 -0600 Subject: [PATCH 431/639] Fix `unit.modules.test_gem` for Windows Mock `salt.utils.is_windows` to return False so the test will run on Windows --- tests/unit/modules/test_gem.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_gem.py b/tests/unit/modules/test_gem.py index 14e38da893..b1ff893f7e 100644 --- a/tests/unit/modules/test_gem.py +++ b/tests/unit/modules/test_gem.py @@ -65,7 +65,8 @@ class TestGemModule(TestCase, LoaderModuleMockMixin): with patch.dict(gem.__salt__, {'rvm.is_installed': MagicMock(return_value=False), 'rbenv.is_installed': MagicMock(return_value=True), - 'rbenv.do': mock}): + 'rbenv.do': mock}),\ + patch('salt.utils.is_windows', return_value=False): gem._gem(['install', 'rails']) mock.assert_called_once_with( ['gem', 'install', 'rails'], From 90dcf8287cb4c84d23e7609fa58d6964f1811a96 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 8 Sep 2017 17:21:01 -0600 Subject: [PATCH 432/639] Fix `unit.modules.test_hosts` for Windows Fix problem with TmpStringIO Class on Windows. The module uses this class twice in normal operation. In Windows there's an additional run before the real runs where it is opened with mode='w'. This causes the data to be wiped out. So, this sets it to only save the value in the instance to data if it is not empty. Use the windows path to the hosts file in Windows --- tests/unit/modules/test_hosts.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tests/unit/modules/test_hosts.py b/tests/unit/modules/test_hosts.py index 4727e5e585..70456dcdef 100644 --- a/tests/unit/modules/test_hosts.py +++ b/tests/unit/modules/test_hosts.py @@ -16,6 +16,7 @@ from tests.support.mock import ( ) # Import Salt Libs import salt.modules.hosts as hosts +import salt.utils from salt.ext.six.moves import StringIO @@ -92,8 +93,12 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): ''' Tests true if the alias is set ''' + hosts_file = '/etc/hosts' + if salt.utils.is_windows(): + hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' + with patch('salt.modules.hosts.__get_hosts_filename', - MagicMock(return_value='/etc/hosts')), \ + MagicMock(return_value=hosts_file)), \ patch('os.path.isfile', MagicMock(return_value=False)), \ patch.dict(hosts.__salt__, {'config.option': MagicMock(return_value=None)}): @@ -139,7 +144,16 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): self.close() def close(self): - data[0] = self.getvalue() + # Don't save unless there's something there. In Windows + # the class gets initialized the first time with mode = w + # which sets the initial value to ''. When the class closes + # it clears out data and causes the test to fail. + # I don't know why it get's initialized with a mode of 'w' + # For the purposes of this test data shouldn't be empty + # This is a problem with this class and not with the hosts + # module + if self.getvalue(): + data[0] = self.getvalue() StringIO.close(self) expected = '\n'.join(( @@ -151,6 +165,7 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): mock_opt = MagicMock(return_value=None) with patch.dict(hosts.__salt__, {'config.option': mock_opt}): self.assertTrue(hosts.set_host('1.1.1.1', ' ')) + self.assertEqual(data[0], expected) # 'rm_host' function tests: 2 @@ -182,9 +197,13 @@ class HostsTestCase(TestCase, LoaderModuleMockMixin): ''' Tests if specified host entry gets added from the hosts file ''' + hosts_file = '/etc/hosts' + if salt.utils.is_windows(): + hosts_file = r'C:\Windows\System32\Drivers\etc\hosts' + with patch('salt.utils.fopen', mock_open()), \ patch('salt.modules.hosts.__get_hosts_filename', - MagicMock(return_value='/etc/hosts')): + MagicMock(return_value=hosts_file)): mock_opt = MagicMock(return_value=None) with patch.dict(hosts.__salt__, {'config.option': mock_opt}): self.assertTrue(hosts.add_host('10.10.10.10', 'Salt1')) From a3b2e191494804eecf0b6722b460b15f4032f0dd Mon Sep 17 00:00:00 2001 From: matt LLVW Date: Mon, 11 Sep 2017 11:58:02 +0200 Subject: [PATCH 433/639] Fix /etc/hosts not being modified when hostname is changed Fixes #42926 --- salt/modules/debian_ip.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/salt/modules/debian_ip.py b/salt/modules/debian_ip.py index 16870cc6ca..28f7e6e2c6 100644 --- a/salt/modules/debian_ip.py +++ b/salt/modules/debian_ip.py @@ -2027,19 +2027,12 @@ def build_network_settings(**settings): # Write settings _write_file_network(network, _DEB_NETWORKING_FILE, True) - # Write hostname to /etc/hostname + # Get hostname and domain from opts sline = opts['hostname'].split('.', 1) opts['hostname'] = sline[0] - hostname = '{0}\n' . format(opts['hostname']) current_domainname = current_network_settings['domainname'] current_searchdomain = current_network_settings['searchdomain'] - # Only write the hostname if it has changed - if not opts['hostname'] == current_network_settings['hostname']: - if not ('test' in settings and settings['test']): - # TODO replace wiht a call to network.mod_hostname instead - _write_file_network(hostname, _DEB_HOSTNAME_FILE) - new_domain = False if len(sline) > 1: new_domainname = sline[1] From 1279556873364f0010a0b0cd0299464ecd3ba73d Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 29 Aug 2017 16:51:56 -0400 Subject: [PATCH 434/639] Move salt.utils.istextfile to salt.utils.files.is_text_file Moves the original function to `salt.utils.files.py`, adds a deprecation warning to the original function, and updates all of the istextfile references in salt code. --- salt/modules/file.py | 10 +++--- salt/states/file.py | 12 +++---- salt/utils/__init__.py | 64 ++++++++++++---------------------- salt/utils/files.py | 39 +++++++++++++++++++++ salt/wheel/file_roots.py | 3 +- salt/wheel/pillar_roots.py | 3 +- tests/unit/states/test_file.py | 2 +- 7 files changed, 75 insertions(+), 58 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index c43a371afe..ce02496976 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -126,8 +126,8 @@ def _binary_replace(old, new): This function should only be run AFTER it has been determined that the files differ. ''' - old_isbin = not salt.utils.istextfile(old) - new_isbin = not salt.utils.istextfile(new) + old_isbin = not salt.utils.files.is_text_file(old) + new_isbin = not salt.utils.files.is_text_file(new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return u'Replace binary file' @@ -1436,7 +1436,7 @@ def comment_line(path, raise SaltInvocationError('File not found: {0}'.format(path)) # Make sure it is a text file - if not salt.utils.istextfile(path): + if not salt.utils.files.is_text_file(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}'.format(path)) @@ -2180,7 +2180,7 @@ def replace(path, else: raise SaltInvocationError('File not found: {0}'.format(path)) - if not salt.utils.istextfile(path): + if not salt.utils.files.is_text_file(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) @@ -2497,7 +2497,7 @@ def blockreplace(path, 'Only one of append and prepend_if_not_found is permitted' ) - if not salt.utils.istextfile(path): + if not salt.utils.files.is_text_file(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) diff --git a/salt/states/file.py b/salt/states/file.py index aacb9e32e5..c304c1a267 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -4366,7 +4366,7 @@ def comment(name, regex, char='#', backup='.bak'): ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True) if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4478,7 +4478,7 @@ def uncomment(name, regex, char='#', backup='.bak'): ) if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4721,7 +4721,7 @@ def append(name, nlines = list(slines) nlines.extend(append_lines) if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4746,7 +4746,7 @@ def append(name, nlines = nlines.splitlines() if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4914,7 +4914,7 @@ def prepend(name, if __opts__['test']: nlines = test_lines + slines if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4957,7 +4957,7 @@ def prepend(name, nlines = nlines.splitlines(True) if slines != nlines: - if not salt.utils.istextfile(name): + if not salt.utils.files.is_text_file(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 85a70a3396..97d17a5767 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -983,48 +983,6 @@ def arg_lookup(fun, aspec=None): return ret -@jinja_filter('is_text_file') -def istextfile(fp_, blocksize=512): - ''' - Uses heuristics to guess whether the given file is text or binary, - by reading a single block of bytes from the file. - If more than 30% of the chars in the block are non-text, or there - are NUL ('\x00') bytes in the block, assume this is a binary file. - ''' - # Late import to avoid circular import. - import salt.utils.files - - int2byte = (lambda x: bytes((x,))) if six.PY3 else chr - text_characters = ( - b''.join(int2byte(i) for i in range(32, 127)) + - b'\n\r\t\f\b') - try: - block = fp_.read(blocksize) - except AttributeError: - # This wasn't an open filehandle, so treat it as a file path and try to - # open the file - try: - with salt.utils.files.fopen(fp_, 'rb') as fp2_: - block = fp2_.read(blocksize) - except IOError: - # Unable to open file, bail out and return false - return False - if b'\x00' in block: - # Files with null bytes are binary - return False - elif not block: - # An empty file is considered a valid text file - return True - try: - block.decode('utf-8') - return True - except UnicodeDecodeError: - pass - - nontext = block.translate(None, text_characters) - return float(len(nontext)) / len(block) <= 0.30 - - @jinja_filter('sorted_ignorecase') def isorted(to_sort): ''' @@ -3046,6 +3004,28 @@ def mkstemp(*args, **kwargs): return salt.utils.files.mkstemp(*args, **kwargs) +@jinja_filter('is_text_file') +def istextfile(fp_, blocksize=512): + ''' + Uses heuristics to guess whether the given file is text or binary, + by reading a single block of bytes from the file. + If more than 30% of the chars in the block are non-text, or there + are NUL ('\x00') bytes in the block, assume this is a binary file. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.files + + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.istextfile\' detected. This function has been moved ' + 'to \'salt.utils.files.is_text_file\' as of Salt Oxygen. This warning will ' + 'be removed in Salt Neon.' + ) + return salt.utils.files.is_text_file(fp_, blocksize=blocksize) + + def str_version_to_evr(verstring): ''' Split the package version string into epoch, version and release. diff --git a/salt/utils/files.py b/salt/utils/files.py index 0d87ab6219..36cde2f7ad 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -498,3 +498,42 @@ def safe_filepath(file_path_name): return os.sep.join([drive, path]) else: return path + + +@jinja_filter('is_text_file') +def is_text_file(fp_, blocksize=512): + ''' + Uses heuristics to guess whether the given file is text or binary, + by reading a single block of bytes from the file. + If more than 30% of the chars in the block are non-text, or there + are NUL ('\x00') bytes in the block, assume this is a binary file. + ''' + int2byte = (lambda x: bytes((x,))) if six.PY3 else chr + text_characters = ( + b''.join(int2byte(i) for i in range(32, 127)) + + b'\n\r\t\f\b') + try: + block = fp_.read(blocksize) + except AttributeError: + # This wasn't an open filehandle, so treat it as a file path and try to + # open the file + try: + with fopen(fp_, 'rb') as fp2_: + block = fp2_.read(blocksize) + except IOError: + # Unable to open file, bail out and return false + return False + if b'\x00' in block: + # Files with null bytes are binary + return False + elif not block: + # An empty file is considered a valid text file + return True + try: + block.decode('utf-8') + return True + except UnicodeDecodeError: + pass + + nontext = block.translate(None, text_characters) + return float(len(nontext)) / len(block) <= 0.30 diff --git a/salt/wheel/file_roots.py b/salt/wheel/file_roots.py index 109c8ea79c..8df2eee4b8 100644 --- a/salt/wheel/file_roots.py +++ b/salt/wheel/file_roots.py @@ -8,7 +8,6 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils import salt.utils.files # Import 3rd-party libs @@ -28,7 +27,7 @@ def find(path, saltenv='base'): if os.path.isfile(full): # Add it to the dict with salt.utils.files.fopen(full, 'rb') as fp_: - if salt.utils.istextfile(fp_): + if salt.utils.files.is_text_file(fp_): ret.append({full: 'txt'}) else: ret.append({full: 'bin'}) diff --git a/salt/wheel/pillar_roots.py b/salt/wheel/pillar_roots.py index 9eab69344a..65790e17d9 100644 --- a/salt/wheel/pillar_roots.py +++ b/salt/wheel/pillar_roots.py @@ -9,7 +9,6 @@ from __future__ import absolute_import import os # Import salt libs -import salt.utils import salt.utils.files # Import 3rd-party libs @@ -29,7 +28,7 @@ def find(path, saltenv='base'): if os.path.isfile(full): # Add it to the dict with salt.utils.files.fopen(full, 'rb') as fp_: - if salt.utils.istextfile(fp_): + if salt.utils.files.is_text_file(fp_): ret.append({full: 'txt'}) else: ret.append({full: 'bin'}) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index bac6594652..d27c29120a 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -1181,7 +1181,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): ret.update({'name': name}) with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open(read_data=''))): - with patch.object(salt.utils, 'istextfile', mock_f): + with patch.object(salt.utils.files, 'is_text_file', mock_f): with patch.dict(filestate.__opts__, {'test': True}): change = {'diff': 'Replace binary file'} comt = ('File {0} is set to be updated' From e1621f7b2ef08e31668700cafbdd805a1b6810a3 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 29 Aug 2017 17:41:28 -0400 Subject: [PATCH 435/639] Lint: Add range import from six --- salt/utils/files.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/utils/files.py b/salt/utils/files.py index 36cde2f7ad..1d7068987a 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -25,6 +25,7 @@ from salt.utils.decorators.jinja import jinja_filter # Import 3rd-party libs from salt.ext import six +from salt.ext.six.moves import range try: import fcntl HAS_FCNTL = True From eea53a12442864623a4b3c09633904e5154512c7 Mon Sep 17 00:00:00 2001 From: rallytime Date: Fri, 1 Sep 2017 15:19:05 -0400 Subject: [PATCH 436/639] Change salt.utils.files.is_text_files calls to use __utils__ --- salt/modules/file.py | 10 +++++----- salt/states/file.py | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index ce02496976..944736f740 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -126,8 +126,8 @@ def _binary_replace(old, new): This function should only be run AFTER it has been determined that the files differ. ''' - old_isbin = not salt.utils.files.is_text_file(old) - new_isbin = not salt.utils.files.is_text_file(new) + old_isbin = not __utils__['files.is_text_file'](old) + new_isbin = not __utils__['files.is_text_file'](new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return u'Replace binary file' @@ -1436,7 +1436,7 @@ def comment_line(path, raise SaltInvocationError('File not found: {0}'.format(path)) # Make sure it is a text file - if not salt.utils.files.is_text_file(path): + if not __utils__['files.is_text_file'](path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}'.format(path)) @@ -2180,7 +2180,7 @@ def replace(path, else: raise SaltInvocationError('File not found: {0}'.format(path)) - if not salt.utils.files.is_text_file(path): + if not __utils__['files.is_text_file'](path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) @@ -2497,7 +2497,7 @@ def blockreplace(path, 'Only one of append and prepend_if_not_found is permitted' ) - if not salt.utils.files.is_text_file(path): + if not __utils__['files.is_text_file'](path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) diff --git a/salt/states/file.py b/salt/states/file.py index c304c1a267..c28f1ee926 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -4366,7 +4366,7 @@ def comment(name, regex, char='#', backup='.bak'): ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True) if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4478,7 +4478,7 @@ def uncomment(name, regex, char='#', backup='.bak'): ) if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4721,7 +4721,7 @@ def append(name, nlines = list(slines) nlines.extend(append_lines) if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4746,7 +4746,7 @@ def append(name, nlines = nlines.splitlines() if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4914,7 +4914,7 @@ def prepend(name, if __opts__['test']: nlines = test_lines + slines if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them @@ -4957,7 +4957,7 @@ def prepend(name, nlines = nlines.splitlines(True) if slines != nlines: - if not salt.utils.files.is_text_file(name): + if not __utils__['files.is_text_file'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them From e11aa7e5ef018009243f5b368c9b301b81445425 Mon Sep 17 00:00:00 2001 From: rallytime Date: Tue, 5 Sep 2017 10:37:08 -0400 Subject: [PATCH 437/639] Adjust test mocking to handle __utils__['files.is_text_file'] --- tests/unit/modules/test_file.py | 8 ++++++-- tests/unit/states/test_file.py | 5 +++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index eb440350c1..e4d74f1266 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -14,6 +14,8 @@ from tests.support.unit import TestCase from tests.support.mock import MagicMock, patch # Import Salt libs +import salt.config +import salt.loader import salt.utils.files import salt.modules.file as filemod import salt.modules.config as configmod @@ -45,7 +47,8 @@ class FileReplaceTestCase(TestCase, LoaderModuleMockMixin): 'cachedir': 'tmp', 'grains': {}, }, - '__grains__': {'kernel': 'Linux'} + '__grains__': {'kernel': 'Linux'}, + '__utils__': {'files.is_text_file': MagicMock(return_value=True)}, } } @@ -203,7 +206,8 @@ class FileBlockReplaceTestCase(TestCase, LoaderModuleMockMixin): 'cachedir': 'tmp', 'grains': {}, }, - '__grains__': {'kernel': 'Linux'} + '__grains__': {'kernel': 'Linux'}, + '__utils__': {'files.is_text_file': MagicMock(return_value=True)}, } } diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index d27c29120a..7aa2c0d0f7 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -56,7 +56,8 @@ class TestFileState(TestCase, LoaderModuleMockMixin): }, '__opts__': {'test': False, 'cachedir': ''}, '__instance_id__': '', - '__low__': {} + '__low__': {}, + '__utils__': {}, } } @@ -1181,7 +1182,7 @@ class TestFileState(TestCase, LoaderModuleMockMixin): ret.update({'name': name}) with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open(read_data=''))): - with patch.object(salt.utils.files, 'is_text_file', mock_f): + with patch.dict(filestate.__utils__, {'files.is_text_file': mock_f}): with patch.dict(filestate.__opts__, {'test': True}): change = {'diff': 'Replace binary file'} comt = ('File {0} is set to be updated' From 6a74fec6a62a4f606eb03e40e2d5ae1b0c91c480 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Mon, 11 Sep 2017 15:58:17 +0200 Subject: [PATCH 438/639] boto_elb.register_instances: do not skip instances being unregistered `boto_elb.register_instances` currently skips nodes that are in the process of being unregistered, which causes an instance to getting registered again, if registration happens too fast after deregistration. This patch skips instances, where describe-instance-health returns: { "InstanceId": "i-XXX", "State": "InService", "ReasonCode": "N/A", "Description": "Instance deregistration currently in progress." }, The normal state is: { "InstanceId": "i-XXX", "State": "InService", "ReasonCode": "N/A", "Description": "N/A" }, btw: for an instance being in the process of being registered it looks like this: { "InstanceId": "i-XXX", "State": "OutOfService", "ReasonCode": "ELB", "Description": "Instance registration is still in progress." }, Ref: http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_InstanceState.html --- salt/states/boto_elb.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/states/boto_elb.py b/salt/states/boto_elb.py index 6163c09dfd..5beb01fd37 100644 --- a/salt/states/boto_elb.py +++ b/salt/states/boto_elb.py @@ -517,7 +517,8 @@ def register_instances(name, instances, region=None, key=None, keyid=None, health = __salt__['boto_elb.get_instance_health']( name, region, key, keyid, profile) - nodes = [value['instance_id'] for value in health] + nodes = [value['instance_id'] for value in health + if value['description'] != 'Instance deregistration currently in progress.'] new = [value for value in instances if value not in nodes] if not len(new): msg = 'Instance/s {0} already exist.'.format(str(instances).strip('[]')) From 4a1b889b766559c59ca87e29d606c6a00703b268 Mon Sep 17 00:00:00 2001 From: rajvidhimar Date: Thu, 7 Sep 2017 16:30:18 +0530 Subject: [PATCH 439/639] Fix junos rpc to work with scheduler jobs --- salt/modules/junos.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/modules/junos.py b/salt/modules/junos.py index c6e3e64c6d..f6ae3e4dc4 100644 --- a/salt/modules/junos.py +++ b/salt/modules/junos.py @@ -27,6 +27,7 @@ except ImportError: # Import Salt libs import salt.utils.files +from salt.ext import six # Juniper interface libraries # https://github.com/Juniper/py-junos-eznc @@ -176,6 +177,10 @@ def rpc(cmd=None, dest=None, format='xml', **kwargs): if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) + elif '__pub_schedule' in kwargs: + for key, value in six.iteritems(kwargs): + if not key.startswith('__pub_'): + op[key] = value else: op.update(kwargs) op['dev_timeout'] = str(op.pop('timeout', conn.timeout)) From eb91a073d846633cb579d3fbe2440ef3417702d8 Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 11 Sep 2017 12:12:33 -0400 Subject: [PATCH 440/639] Reduce the number of days an issue is stale by 30 --- .github/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index e67b536243..3d5da2f3df 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 1060 is approximately 2 years and 11 months -daysUntilStale: 1060 +# 1030 is approximately 2 years and 10 months +daysUntilStale: 1030 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 From b5360cf8fdad5b39c2078027888aa226820e9d62 Mon Sep 17 00:00:00 2001 From: Sergey Kizunov Date: Mon, 11 Sep 2017 11:19:59 -0500 Subject: [PATCH 441/639] Add warning to unique_jid option Signed-off-by: Sergey Kizunov --- salt/config/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 210d55f2d3..a6825d1737 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -420,7 +420,8 @@ VALID_OPTS = { # Ensure that a generated jid is always unique. If this is set, the jid # format is different due to an underscore and process id being appended - # to the jid. + # to the jid. WARNING: A change to the jid format may break external + # applications that depend on the original format. 'unique_jid': bool, # Tells the highstate outputter to show successful states. False will omit successes. From 3fd59ed3690a53b994f4e51a2f836c0917d576fd Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 11 Sep 2017 11:22:26 -0700 Subject: [PATCH 442/639] Adding a small check to ensure we do not continue to populate kwargs with __pub_ items from the kwargs item. --- salt/utils/schedule.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 8613d6a36f..a0597a004c 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -842,7 +842,8 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) + is key is not 'kwargs': + kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) From 7c03bc2ee79c10e12fee0895e8ee604bad28dd12 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 11 Sep 2017 11:23:14 -0700 Subject: [PATCH 443/639] Adding a small check to ensure we do not continue to populate kwargs with __pub_ items from the kwargs item. --- salt/utils/schedule.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 61e087e607..4a0c5e84e3 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -848,7 +848,8 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) + if key is not 'kwargs': + kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) From 059ae3e46c92f2aa0067d09075d6b358030c250d Mon Sep 17 00:00:00 2001 From: root Date: Fri, 1 Sep 2017 12:58:05 -0700 Subject: [PATCH 444/639] Add module to control Pure Storage FlashArray Provides ability to manage volumes, hosts, hostgroup, protection groups and volume snapshots. --- salt/modules/purefa.py | 1241 ++++++++++++++++++++++++++++++++++++++++ salt/utils/__init__.py | 35 ++ 2 files changed, 1276 insertions(+) create mode 100644 salt/modules/purefa.py diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py new file mode 100644 index 0000000000..279cdcd836 --- /dev/null +++ b/salt/modules/purefa.py @@ -0,0 +1,1241 @@ +# -*- coding: utf-8 -*- + +## +# Copyright 2017 Pure Storage Inc +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' + +Management of Pure Storage FlashArray + +Installation Prerequisites +-------------------------- +- You will need the ``purestorage`` python package in your python installation + path that is running salt. + + .. code-block:: bash + + pip install purestorage + +:maintainer: Simon Dodsley (simon@purestorage.com) +:maturity: new +:requires: purestorage +:platform: all +''' +from __future__ import absolute_import + +import os +import platform +from datetime import datetime + +# 3rd party modules +# pylint: disable=import-error,no-name-in-module,redefined-builtin +from salt.exceptions import CommandExecutionError +from salt.utils import xor, human_to_bytes +# pylint: enable=import-error,no-name-in-module + + +try: + import purestorage + HAS_PURESTORAGE = True +except ImportError: + HAS_PURESTORAGE = False + +__docformat__ = 'restructuredtext en' + +VERSION = '1.0.0' +USER_AGENT_BASE = 'Salt' + +__virtualname__ = 'purefa' + +# Default symbols to use for passwords. Avoids visually confusing characters. +# ~6 bits per symbol +DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l + + +def __virtual__(): + ''' + Determine whether or not to load this module + ''' + if HAS_PURESTORAGE: + return __virtualname__ + return (False, 'purefa execution module not loaded: purestorage python library not available.') + + +def _get_system(): + ''' + Get Pure Storage FlashArray configuration + + 1) From the minion config + pure_tags: + fa: + san_ip: management vip or hostname for the FlashArray + api_token: A valid api token for the FlashArray being managed + 2) From environment (PUREFA_IP and PUREFA_API) + 3) From the pillar (PUREFA_IP and PUREFA_API) + + ''' + agent = {'base': USER_AGENT_BASE, + 'class': __name__, + 'version': VERSION, + 'platform': platform.platform() + } + + user_agent = '{base} {class}/{version} ({platform})'.format(**agent) + + try: + array = __opts__['pure_tags']['fa'].get('san_ip') + api = __opts__['pure_tags']['fa'].get('api_token') + if array and api: + system = purestorage.FlashArray(array, api_token=api, user_agent=user_agent) + except (KeyError, NameError, TypeError): + try: + san_ip = os.environ.get('PUREFA_IP') + api_token = os.environ.get('PUREFA_API') + system = purestorage.FlashArray(san_ip, + api_token=api_token, + user_agent=user_agent) + except (ValueError, KeyError, NameError): + try: + system = purestorage.FlashArray(__pillar__['PUREFA_IP'], + api_token=__pillar__['PUREFA_API'], + user_agent=user_agent) + except (KeyError, NameError): + raise CommandExecutionError('No Pure Storage FlashArray credentials found.') + + try: + system.get() + except Exception: + raise CommandExecutionError('Pure Storage FlashArray authentication failed.') + return system + + +def _get_volume(name, array): + '''Private function to check volume''' + try: + return array.get_volume(name) + except purestorage.PureError: + return None + + +def _get_snapshot(name, suffix, array): + '''Private function to check snapshot''' + snapshot = name + '.' + suffix + try: + for snap in array.get_volume(name, snap=True): + if snap['name'] == snapshot: + return snapshot + except purestorage.PureError: + return None + + +def _get_deleted_volume(name, array): + '''Private function to check deleted volume''' + try: + return array.get_volume(name, pending='true') + except purestorage.PureError: + return None + + +def _get_pgroup(name, array): + '''Private function to check protection group''' + pgroup = None + for temp in array.list_pgroups(): + if temp['name'] == name: + pgroup = temp + break + return pgroup + + +def _get_deleted_pgroup(name, array): + '''Private function to check deleted protection group''' + try: + return array.get_pgroup(name, pending='true') + except purestorage.PureError: + return None + + +def _get_hgroup(name, array): + '''Private function to check hostgroup''' + hostgroup = None + for temp in array.list_hgroups(): + if temp['name'] == name: + hostgroup = temp + break + return hostgroup + + +def _get_host(name, array): + '''Private function to check host''' + host = None + for temp in array.list_hosts(): + if temp['name'] == name: + host = temp + break + return host + + +def snap_create(name, suffix=None): + ''' + + Create a volume snapshot on a Pure Storage FlashArray. + + Will return False is volume selected to snap does not exist. + + .. versionadded:: 2017.7.3 + + name : string + name of volume to snapshot + suffix : string + if specificed forces snapshot name suffix. If not specified defaults to timestamp. + + CLI Example: + + .. code-block:: bash + + salt '*' pure.snap_create foo + salt '*' pure.snap_create foo suffix=bar + + ''' + array = _get_system() + if suffix is None: + suffix = 'snap-' + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()) + suffix = suffix.replace('.', '') + if _get_volume(name, array) is not None: + try: + array.create_snapshot(name, suffix=suffix) + return True + except purestorage.PureError: + return False + else: + return False + + +def snap_delete(name, suffix=None, eradicate=False): + ''' + + Delete a volume snapshot on a Pure Storage FlashArray. + + Will return False if selected snapshot does not exist. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + suffix : string + name of snapshot + eradicate : boolean + Eradicate snapshot after deletion if True. Default is False + + CLI Example: + + .. code-block:: bash + + salt '*' pure.snap_delete foo suffix=snap eradicate=True + + ''' + array = _get_system() + if _get_snapshot(name, suffix, array) is not None: + try: + snapname = name + '.' + suffix + array.destroy_volume(snapname) + except purestorage.PureError: + return False + if eradicate is True: + try: + array.eradicate_volume(snapname) + return True + except purestorage.PureError: + return False + else: + return True + else: + return False + + +def snap_eradicate(name, suffix=None): + ''' + + Eradicate a deleted volume snapshot on a Pure Storage FlashArray. + + Will retunr False is snapshot is not in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + suffix : string + name of snapshot + + CLI Example: + + .. code-block:: bash + + salt '*' pure.snap_delete foo suffix=snap eradicate=True + + ''' + array = _get_system() + if _get_snapshot(name, suffix, array) is not None: + snapname = name + '.' + suffix + try: + array.eradicate_volume(snapname) + return True + except purestorage.PureError: + return False + else: + return False + + +def volume_create(name, size=None): + ''' + + Create a volume on a Pure Storage FlashArray. + + Will return False if volume already exists. + + .. versionadded:: 2017.7.3 + + name : string + name of volume (truncated to 63 characters) + size : string + if specificed capacity of volume. If not specified default to 1G. + Refer to Pure Storage documentation for formatting rules. + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_create foo + salt '*' pure.volume_create foo size=10T + + ''' + if len(name) > 63: + name = name[0:63] + array = _get_system() + if _get_volume(name, array) is None: + if size is None: + size = '1G' + try: + array.create_volume(name, size) + return True + except purestorage.PureError: + return False + else: + return False + + +def volume_delete(name, eradicate=False): + ''' + + Delete a volume on a Pure Storage FlashArray. + + Will return False if volume doesn't exist is already in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + eradicate : boolean + Eradicate volume after deletion if True. Default is False + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_delete foo eradicate=True + + ''' + array = _get_system() + if _get_volume(name, array) is not None: + try: + array.destroy_volume(name) + except purestorage.PureError: + return False + if eradicate is True: + try: + array.eradicate_volume(name) + return True + except purestorage.PureError: + return False + else: + return True + else: + return False + + +def volume_eradicate(name): + ''' + + Eradicate a deleted volume on a Pure Storage FlashArray. + + Will return False is volume is not in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_eradicate foo + + ''' + array = _get_system() + if _get_deleted_volume(name, array) is not None: + try: + array.eradicate_volume(name) + return True + except purestorage.PureError: + return False + else: + return False + + +def volume_extend(name, size): + ''' + + Extend an existing volume on a Pure Storage FlashArray. + + Will return False if new size is less than or equal to existing size. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + size : string + New capacity of volume. + Refer to Pure Storage documentation for formatting rules. + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_extend foo 10T + + ''' + array = _get_system() + vol = _get_volume(name, array) + if vol is not None: + if human_to_bytes(size) > vol['size']: + try: + array.extend_volume(name, size) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + + +def snap_volume_create(name, target, overwrite=False): + ''' + + Create R/W volume from snapshot on a Pure Storage FlashArray. + + Will return False if target volume already exists and + overwrite is not specified, or selected snapshot doesn't exist. + + .. versionadded:: 2017.7.3 + + name : string + name of volume snapshot + target : string + name of clone volume + overwrite : boolean + overwrite clone if already exists (default: False) + + CLI Example: + + .. code-block:: bash + + salt '*' pure.snap_volume_create foo.bar clone overwrite=True + + ''' + array = _get_system() + source, suffix = name.split('.') + if _get_snapshot(source, suffix, array) is not None: + if _get_volume(target, array) is None: + try: + array.copy_volume(name, target) + return True + except purestorage.PureError: + return False + else: + if overwrite: + try: + array.copy_volume(name, target, overwrite=overwrite) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + + +def volume_clone(name, target, overwrite=False): + ''' + + Clone an existing volume on a Pure Storage FlashArray. + + Will return False if source volume doesn't exist, or + target volume already exists and overwrite not specified. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + target : string + name of clone volume + overwrite : boolean + overwrite clone if already exists (default: False) + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_clone foo bar overwrite=True + + ''' + array = _get_system() + if _get_volume(name, array) is not None: + if _get_volume(target, array) is None: + try: + array.copy_volume(name, target) + return True + except purestorage.PureError: + return False + else: + if overwrite: + try: + array.copy_volume(name, target, overwrite=overwrite) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + + +def volume_attach(name, host): + ''' + + Attach a volume to a host on a Pure Storage FlashArray. + + Host and volume must exist or else will return False. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + host : string + name of host + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_attach foo bar + + ''' + array = _get_system() + if _get_volume(name, array) is not None and _get_host(host, array) is not None: + try: + array.connect_host(host, name) + return True + except purestorage.PureError: + return False + else: + return False + + +def volume_detach(name, host): + ''' + + Detach a volume from a host on a Pure Storage FlashArray. + + Will return False if either host or volume do not exist, or + if selected volume isn't already connected to the host. + + .. versionadded:: 2017.7.3 + + name : string + name of volume + host : string + name of host + + CLI Example: + + .. code-block:: bash + + salt '*' pure.volume_detach foo bar + + ''' + array = _get_system() + if _get_volume(name, array) is None or _get_host(host, array) is None: + return False + elif _get_volume(name, array) is not None and _get_host(host, array) is not None: + try: + array.disconnect_host(host, name) + return True + except purestorage.PureError: + return False + + +def host_create(name, iqn=None, wwn=None): + ''' + + Add a host on a Pure Storage FlashArray. + + Will return False if host already exists, or the iSCSI or + Fibre Channel parameters are not in a valid format. + See Pure Storage FlashArray documentation. + + .. versionadded:: 2017.7.3 + + name : string + name of host (truncated to 63 characters) + iqn : string + iSCSI IQN of host + wwn : string + Fibre Channel WWN of host + + CLI Example: + + .. code-block:: bash + + salt '*' pure.host_create foo iqn='' wwn='' + + ''' + array = _get_system() + if len(name) > 63: + name = name[0:63] + if _get_host(name, array) is None: + try: + array.create_host(name) + except purestorage.PureError: + return False + if iqn is not None: + try: + array.set_host(name, addiqnlist=[iqn]) + except purestorage.PureError: + array.delete_host(name) + return False + if wwn is not None: + try: + array.set_host(name, addwwnlist=[wwn]) + except purestorage.PureError: + array.delete_host(name) + return False + else: + return False + + return True + + +def host_update(name, iqn=None, wwn=None): + ''' + + Update a hosts port definitions on a Pure Storage FlashArray. + + Will return False if new port definitions are already in use + by another host, or are not in a valid format. + See Pure Storage FlashArray documentation. + + .. versionadded:: 2017.7.3 + + name : string + name of host + iqn : string + Additional iSCSI IQN of host + wwn : string + Additional Fibre Channel WWN of host + + CLI Example: + + .. code-block:: bash + + salt '*' pure.host_update foo iqn='' wwn='' + + ''' + array = _get_system() + if _get_host(name, array) is not None: + if iqn is not None: + try: + array.set_host(name, addiqnlist=[iqn]) + except purestorage.PureError: + return False + if wwn is not None: + try: + array.set_host(name, addwwnlist=[wwn]) + except purestorage.PureError: + return False + return True + else: + return False + + +def host_delete(name): + ''' + + Delete a host on a Pure Storage FlashArray (detaches all volumes). + + Will return False if the host doesn't exist. + + .. versionadded:: 2017.7.3 + + name : string + name of host + + CLI Example: + + .. code-block:: bash + + salt '*' pure.host_delete foo + + ''' + array = _get_system() + if _get_host(name, array) is not None: + for vol in array.list_host_connections(name): + try: + array.disconnect_host(name, vol['vol']) + except purestorage.PureError: + return False + try: + array.delete_host(name) + return True + except purestorage.PureError: + return False + else: + return False + + +def hg_create(name, host=None, volume=None): + ''' + + Create a hostgroup on a Pure Storage FlashArray. + + Will return False if hostgroup already exists, or if + named host or volume do not exist. + + .. versionadded:: 2017.7.3 + + name : string + name of hostgroup (truncated to 63 characters) + host : string + name of host to add to hostgroup + volume : string + name of volume to add to hostgroup + + CLI Example: + + .. code-block:: bash + + salt '*' pure.hg_create foo host=bar volume=vol + + ''' + array = _get_system() + if len(name) > 63: + name = name[0:63] + if _get_hgroup(name, array) is None: + try: + array.create_hgroup(name) + except purestorage.PureError: + return False + if host is not None: + if _get_host(host, array): + try: + array.set_hgroup(name, addhostlist=[host]) + except purestorage.PureError: + return False + else: + hg_delete(name) + return False + if volume is not None: + if _get_volume(volume, array): + try: + array.connect_hgroup(name, volume) + except purestorage.PureError: + hg_delete(name) + return False + else: + hg_delete(name) + return False + return True + else: + return False + + +def hg_update(name, host=None, volume=None): + ''' + + Adds entries to a hostgroup on a Pure Storage FlashArray. + + Will return False is hostgroup doesn't exist, or host + or volume do not exist. + + .. versionadded:: 2017.7.3 + + name : string + name of hostgroup + host : string + name of host to add to hostgroup + volume : string + name of volume to add to hostgroup + + CLI Example: + + .. code-block:: bash + + salt '*' pure.hg_update foo host=bar volume=vol + + ''' + array = _get_system() + if _get_hgroup(name, array) is not None: + if host is not None: + if _get_host(host, array): + try: + array.set_hgroup(name, addhostlist=[host]) + except purestorage.PureError: + return False + else: + return False + if volume is not None: + if _get_volume(volume, array): + try: + array.connect_hgroup(name, volume) + except purestorage.PureError: + return False + else: + return False + return True + else: + return False + + +def hg_delete(name): + ''' + + Delete a hostgroup on a Pure Storage FlashArray (removes all volumes and hosts). + + Will return False is hostgroup is already in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of hostgroup + + CLI Example: + + .. code-block:: bash + + salt '*' pure.hg_delete foo + + ''' + array = _get_system() + if _get_hgroup(name, array) is not None: + for vol in array.list_hgroup_connections(name): + try: + array.disconnect_hgroup(name, vol['vol']) + except purestorage.PureError: + return False + host = array.get_hgroup(name) + try: + array.set_hgroup(name, remhostlist=host['hosts']) + array.delete_hgroup(name) + return True + except purestorage.PureError: + return False + else: + return False + + +def hg_remove(name, volume=None, host=None): + ''' + + Remove a host and/or volume from a hostgroup on a Pure Storage FlashArray. + + Will return False is hostgroup does not exist, or named host or volume are + not in the hostgroup. + + .. versionadded:: 2017.7.3 + + name : string + name of hostgroup + volume : string + name of volume to remove from hostgroup + host : string + name of host to remove from hostgroup + + CLI Example: + + .. code-block:: bash + + salt '*' pure.hg_remove foo volume=test host=bar + + ''' + array = _get_system() + if _get_hgroup(name, array) is not None: + if volume is not None: + if _get_volume(volume, array): + for temp in array.list_hgroup_connections(name): + if temp['vol'] == volume: + try: + array.disconnect_hgroup(name, volume) + return True + except purestorage.PureError: + return False + return False + else: + return False + if host is not None: + if _get_host(host, array): + temp = _get_host(host, array) + if temp['hgroup'] == name: + try: + array.set_hgroup(name, remhostlist=[host]) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + if host is None and volume is None: + return False + else: + return False + + +def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True): + ''' + + Create a protection group on a Pure Storage FlashArray. + + Will return False is the following cases: + * Protection Grop already exists + * Protection Group in a deleted state + * More than one type is specified - protection groups are for only + hostgroups, hosts or volumes + * Named type for protection group does not exist + + .. versionadded:: 2017.7.3 + + name : string + name of protection group + hostgroup : string + name of hostgroup to add to protection group + host : string + name of host to add to protection group + volume : string + name of volume to add to protection group + + CLI Example: + + .. code-block:: bash + + salt '*' pure.pg_create foo [hostgroup=foo | host=bar | volume=vol] enabled=[true | false] + + ''' + array = _get_system() + if hostgroup is None and host is None and volume is None: + if _get_pgroup(name, array) is None: + try: + array.create_pgroup(name) + except purestorage.PureError: + return False + try: + array.set_pgroup(name, snap_enabled=enabled) + return True + except purestorage.PureError: + pg_delete(name) + return False + else: + return False + elif xor(hostgroup, host, volume): + if _get_pgroup(name, array) is None: + try: + array.create_pgroup(name) + except purestorage.PureError: + return False + try: + array.set_pgroup(name, snap_enabled=enabled) + except purestorage.PureError: + pg_delete(name) + return False + if hostgroup is not None: + if _get_hgroup(hostgroup, array) is not None: + try: + array.set_pgroup(name, addhgrouplist=[hostgroup]) + return True + except purestorage.PureError: + pg_delete(name) + return False + else: + pg_delete(name) + return False + elif host is not None: + if _get_host(host, array) is not None: + try: + array.set_pgroup(name, addhostlist=[host]) + return True + except purestorage.PureError: + pg_delete(name) + return False + else: + pg_delete(name) + return False + elif volume is not None: + if _get_volume(volume, array) is not None: + try: + array.set_pgroup(name, addvollist=[volume]) + return True + except purestorage.PureError: + pg_delete(name) + return False + else: + pg_delete(name) + return False + else: + return False + else: + return False + + +def pg_update(name, hostgroup=None, host=None, volume=None): + ''' + + Update a protection group on a Pure Storage FlashArray. + + Will return False in the following cases: + * Protection group does not exist + * Incorrect type selected for current protection group type + * Specified type does not exist + + .. versionadded:: 2017.7.3 + + name : string + name of protection group + hostgroup : string + name of hostgroup to add to protection group + host : string + name of host to add to protection group + volume : string + name of volume to add to protection group + + CLI Example: + + .. code-block:: bash + + salt '*' pure.pg_update foo [hostgroup=foo | host=bar | volume=vol] + + ''' + array = _get_system() + pgroup = _get_pgroup(name, array) + if pgroup is not None: + if hostgroup is not None and pgroup['hgroups'] is not None: + if _get_hgroup(hostgroup, array) is not None: + try: + array.add_hgroup(hostgroup, name) + return True + except purestorage.PureError: + return False + else: + return False + elif host is not None and pgroup['hosts'] is not None: + if _get_host(host, array) is not None: + try: + array.add_host(host, name) + return True + except purestorage.PureError: + return False + else: + return False + elif volume is not None and pgroup['volumes'] is not None: + if _get_volume(volume, array) is not None: + try: + array.add_volume(volume, name) + return True + except purestorage.PureError: + return False + else: + return False + else: + if pgroup['hgroups'] is None and pgroup['hosts'] is None and pgroup['volumes'] is None: + if hostgroup is not None: + if _get_hgroup(hostgroup, array) is not None: + try: + array.set_pgroup(name, addhgrouplist=[hostgroup]) + return True + except purestorage.PureError: + return False + else: + return False + elif host is not None: + if _get_host(host, array) is not None: + try: + array.set_pgroup(name, addhostlist=[host]) + return True + except purestorage.PureError: + return False + else: + return False + elif volume is not None: + if _get_volume(volume, array) is not None: + try: + array.set_pgroup(name, addvollist=[volume]) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + else: + return False + + +def pg_delete(name, eradicate=False): + ''' + + Delete a protecton group on a Pure Storage FlashArray. + + Will return False if protection group is already in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of protection group + + CLI Example: + + .. code-block:: bash + + salt '*' pure.pg_delete foo + + ''' + array = _get_system() + if _get_pgroup(name, array) is not None: + try: + array.destroy_pgroup(name) + except purestorage.PureError: + return False + if eradicate is True: + try: + array.eradicate_pgroup(name) + return True + except purestorage.PureError: + return False + else: + return True + else: + return False + + +def pg_eradicate(name): + ''' + + Eradicate a deleted protecton group on a Pure Storage FlashArray. + + Will return False if protection group is not in a deleted state. + + .. versionadded:: 2017.7.3 + + name : string + name of protection group + + CLI Example: + + .. code-block:: bash + + salt '*' pure.pg_eradicate foo + + ''' + array = _get_system() + if _get_deleted_pgroup(name, array) is not None: + try: + array.eradicate_pgroup(name) + return True + except purestorage.PureError: + return False + else: + return False + + +def pg_remove(name, hostgroup=None, host=None, volume=None): + ''' + + Remove a hostgroup, host or volume from a protection group on a Pure Storage FlashArray. + + Will return False in the following cases: + * Protection group does not exist + * Specified type is not currently associated with the protection group + + .. versionadded:: 2017.7.3 + + name : string + name of hostgroup + hostgroup : string + name of hostgroup to remove from protection group + host : string + name of host to remove from hostgroup + volume : string + name of volume to remove from hostgroup + + CLI Example: + + .. code-block:: bash + + salt '*' pure.pg_remove foo [hostgroup=bar | host=test | volume=bar] + + ''' + array = _get_system() + pgroup = _get_pgroup(name, array) + if pgroup is not None: + if hostgroup is not None and pgroup['hgroups'] is not None: + if _get_hgroup(hostgroup, array) is not None: + try: + array.remove_hgroup(hostgroup, name) + return True + except purestorage.PureError: + return False + else: + return False + elif host is not None and pgroup['hosts'] is not None: + if _get_host(host, array) is not None: + try: + array.remove_host(host, name) + return True + except purestorage.PureError: + return False + else: + return False + elif volume is not None and pgroup['volumes'] is not None: + if _get_volume(volume, array) is not None: + try: + array.remove_volume(volume, name) + return True + except purestorage.PureError: + return False + else: + return False + else: + return False + else: + return False diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 903e396561..89149feaf2 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -3341,3 +3341,38 @@ def check_state_result(running, recurse=False, highstate=None): return salt.utils.state.check_result( running, recurse=recurse, highstate=highstate ) + + +def xor(*vars): + ''' + XOR definition for multiple variables + ''' + sum = bool(False) + for value in vars: + sum = sum ^ bool(value) + return sum + + +def human_to_bytes(size): + ''' + Given a human-readable byte string (e.g. 2G, 30M), + return the number of bytes. Will return 0 if the argument has + unexpected form. + ''' + sbytes = size[:-1] + unit = size[-1] + if sbytes.isdigit(): + sbytes = int(sbytes) + if unit == 'P': + sbytes *= 1125899906842624 + elif unit == 'T': + sbytes *= 1099511627776 + elif unit == 'G': + sbytes *= 1073741824 + elif unit == 'M': + sbytes *= 1048576 + else: + sbytes = 0 + else: + sbytes = 0 + return sbytes From 5413a5337e4533a06efa9618862d9f07344dc06a Mon Sep 17 00:00:00 2001 From: rallytime Date: Mon, 11 Sep 2017 14:34:20 -0400 Subject: [PATCH 445/639] Lint: remove unused import --- tests/unit/modules/test_mount.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_mount.py b/tests/unit/modules/test_mount.py index 1850faa212..8d8dcb6067 100644 --- a/tests/unit/modules/test_mount.py +++ b/tests/unit/modules/test_mount.py @@ -22,7 +22,7 @@ from tests.support.mock import ( import salt.utils.files import salt.utils.path import salt.modules.mount as mount -from salt.exceptions import CommandExecutionError, CommandNotFoundError +from salt.exceptions import CommandExecutionError MOCK_SHELL_FILE = 'A B C D F G\n' From 5c5df57efb523de33a344b9fddd496bc551057ff Mon Sep 17 00:00:00 2001 From: Mike Place Date: Mon, 11 Sep 2017 13:12:45 -0600 Subject: [PATCH 446/639] Remove extra space for linter --- salt/engines/slack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/engines/slack.py b/salt/engines/slack.py index 00452f6e3a..9f0e1638bc 100644 --- a/salt/engines/slack.py +++ b/salt/engines/slack.py @@ -241,7 +241,7 @@ class SlackClient(object): # pillar_groups = __salt__['pillar.get'](pillar_name, {}) log.debug('Got pillar groups %s from pillar %s', pillar_groups, pillar_name) log.debug('pillar groups is %s', pillar_groups) - log.debug('pillar groups type is %s', type(pillar_groups)) + log.debug('pillar groups type is %s', type(pillar_groups)) if pillar_groups: return pillar_groups else: From 980d3f4a5308d3ee5837e193485de5e6bede1a31 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 8 Sep 2017 17:04:16 -0700 Subject: [PATCH 447/639] Adding the ability to isolate the scheduler to run outside of Salt minion or Salt master. --- salt/utils/schedule.py | 179 +++++++++++++++++++++-------------------- 1 file changed, 93 insertions(+), 86 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index efbf128bf4..b3fd6785e9 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -385,7 +385,7 @@ class Schedule(object): ''' instance = None - def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None): + def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): ''' Only create one instance of Schedule ''' @@ -395,33 +395,36 @@ class Schedule(object): # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller cls.instance = object.__new__(cls) - cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy) + cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy, isolate) else: log.debug('Re-using Schedule') return cls.instance # has to remain empty for singletons, since __init__ will *always* be called - def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None): + def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): pass # an init for the singleton instance to call - def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None): + def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): self.opts = opts self.proxy = proxy self.functions = functions + self.isolate = isolate if isinstance(intervals, dict): self.intervals = intervals else: self.intervals = {} - if hasattr(returners, '__getitem__'): - self.returners = returners - else: - self.returners = returners.loader.gen_functions() + if not self.isolate: + if hasattr(returners, '__getitem__'): + self.returners = returners + else: + self.returners = returners.loader.gen_functions() self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')() self.schedule_returner = self.option('schedule_returner') # Keep track of the lowest loop interval needed in this variable self.loop_interval = six.MAXSIZE - clean_proc_dir(opts) + if not self.isolate: + clean_proc_dir(opts) if cleanup: for prefix in cleanup: self.delete_job_prefix(prefix) @@ -778,36 +781,37 @@ class Schedule(object): salt.utils.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid'])) - proc_fn = os.path.join( - salt.minion.get_proc_dir(self.opts['cachedir']), - ret['jid'] - ) + if not self.isolate: + proc_fn = os.path.join( + salt.minion.get_proc_dir(self.opts['cachedir']), + ret['jid'] + ) - # Check to see if there are other jobs with this - # signature running. If there are more than maxrunning - # jobs present then don't start another. - # If jid_include is False for this job we can ignore all this - # NOTE--jid_include defaults to True, thus if it is missing from the data - # dict we treat it like it was there and is True - if 'jid_include' not in data or data['jid_include']: - jobcount = 0 - for job in salt.utils.minion.running(self.opts): - if 'schedule' in job: - log.debug('schedule.handle_func: Checking job against ' - 'fun {0}: {1}'.format(ret['fun'], job)) - if ret['schedule'] == job['schedule'] \ - and salt.utils.process.os_is_running(job['pid']): - jobcount += 1 - log.debug( - 'schedule.handle_func: Incrementing jobcount, now ' - '{0}, maxrunning is {1}'.format( - jobcount, data['maxrunning'])) - if jobcount >= data['maxrunning']: + # Check to see if there are other jobs with this + # signature running. If there are more than maxrunning + # jobs present then don't start another. + # If jid_include is False for this job we can ignore all this + # NOTE--jid_include defaults to True, thus if it is missing from the data + # dict we treat it like it was there and is True + if 'jid_include' not in data or data['jid_include']: + jobcount = 0 + for job in salt.utils.minion.running(self.opts): + if 'schedule' in job: + log.debug('schedule.handle_func: Checking job against ' + 'fun {0}: {1}'.format(ret['fun'], job)) + if ret['schedule'] == job['schedule'] \ + and salt.utils.process.os_is_running(job['pid']): + jobcount += 1 log.debug( - 'schedule.handle_func: The scheduled job {0} ' - 'was not started, {1} already running'.format( - ret['schedule'], data['maxrunning'])) - return False + 'schedule.handle_func: Incrementing jobcount, now ' + '{0}, maxrunning is {1}'.format( + jobcount, data['maxrunning'])) + if jobcount >= data['maxrunning']: + log.debug( + 'schedule.handle_func: The scheduled job {0} ' + 'was not started, {1} already running'.format( + ret['schedule'], data['maxrunning'])) + return False if multiprocessing_enabled and not salt.utils.platform.is_windows(): # Reconfigure multiprocessing logging after daemonizing @@ -820,12 +824,13 @@ class Schedule(object): try: ret['pid'] = os.getpid() - if 'jid_include' not in data or data['jid_include']: - log.debug('schedule.handle_func: adding this job to the jobcache ' - 'with data {0}'.format(ret)) - # write this to /var/cache/salt/minion/proc - with salt.utils.files.fopen(proc_fn, 'w+b') as fp_: - fp_.write(salt.payload.Serial(self.opts).dumps(ret)) + if not self.isolate: + if 'jid_include' not in data or data['jid_include']: + log.debug('schedule.handle_func: adding this job to the jobcache ' + 'with data {0}'.format(ret)) + # write this to /var/cache/salt/minion/proc + with salt.utils.files.fopen(proc_fn, 'w+b') as fp_: + fp_.write(salt.payload.Serial(self.opts).dumps(ret)) args = tuple() if 'args' in data: @@ -853,35 +858,36 @@ class Schedule(object): ret['return'] = self.functions[func](*args, **kwargs) - # runners do not provide retcode - if 'retcode' in self.functions.pack['__context__']: - ret['retcode'] = self.functions.pack['__context__']['retcode'] + if not self.isolate: + # runners do not provide retcode + if 'retcode' in self.functions.pack['__context__']: + ret['retcode'] = self.functions.pack['__context__']['retcode'] - ret['success'] = True + ret['success'] = True - data_returner = data.get('returner', None) - if data_returner or self.schedule_returner: - if 'return_config' in data: - ret['ret_config'] = data['return_config'] - if 'return_kwargs' in data: - ret['ret_kwargs'] = data['return_kwargs'] - rets = [] - for returner in [data_returner, self.schedule_returner]: - if isinstance(returner, six.string_types): - rets.append(returner) - elif isinstance(returner, list): - rets.extend(returner) - # simple de-duplication with order retained - for returner in OrderedDict.fromkeys(rets): - ret_str = '{0}.returner'.format(returner) - if ret_str in self.returners: - self.returners[ret_str](ret) - else: - log.info( - 'Job {0} using invalid returner: {1}. Ignoring.'.format( - func, returner + data_returner = data.get('returner', None) + if data_returner or self.schedule_returner: + if 'return_config' in data: + ret['ret_config'] = data['return_config'] + if 'return_kwargs' in data: + ret['ret_kwargs'] = data['return_kwargs'] + rets = [] + for returner in [data_returner, self.schedule_returner]: + if isinstance(returner, six.string_types): + rets.append(returner) + elif isinstance(returner, list): + rets.extend(returner) + # simple de-duplication with order retained + for returner in OrderedDict.fromkeys(rets): + ret_str = '{0}.returner'.format(returner) + if ret_str in self.returners: + self.returners[ret_str](ret) + else: + log.info( + 'Job {0} using invalid returner: {1}. Ignoring.'.format( + func, returner + ) ) - ) except Exception: log.exception("Unhandled exception running {0}".format(ret['fun'])) @@ -923,24 +929,25 @@ class Schedule(object): except Exception as exc: log.exception("Unhandled exception firing event: {0}".format(exc)) - log.debug('schedule.handle_func: Removing {0}'.format(proc_fn)) + if not self.isolate: + log.debug('schedule.handle_func: Removing {0}'.format(proc_fn)) - try: - os.unlink(proc_fn) - except OSError as exc: - if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT: - # EEXIST and ENOENT are OK because the file is gone and that's what - # we wanted - pass - else: - log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno)) - # Otherwise, failing to delete this file is not something - # we can cleanly handle. - raise - finally: - if multiprocessing_enabled: - # Let's make sure we exit the process! - sys.exit(salt.defaults.exitcodes.EX_GENERIC) + try: + os.unlink(proc_fn) + except OSError as exc: + if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT: + # EEXIST and ENOENT are OK because the file is gone and that's what + # we wanted + pass + else: + log.error("Failed to delete '{0}': {1}".format(proc_fn, exc.errno)) + # Otherwise, failing to delete this file is not something + # we can cleanly handle. + raise + finally: + if multiprocessing_enabled: + # Let's make sure we exit the process! + sys.exit(salt.defaults.exitcodes.EX_GENERIC) def eval(self): ''' From 54cb69edec45a418ce50af7b94a1cb51c27d2c13 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 11 Sep 2017 11:43:31 -0700 Subject: [PATCH 448/639] Renaming isolate to standalone --- salt/utils/schedule.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index b3fd6785e9..d24759ec95 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -385,7 +385,7 @@ class Schedule(object): ''' instance = None - def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): + def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): ''' Only create one instance of Schedule ''' @@ -395,26 +395,26 @@ class Schedule(object): # it in a WeakValueDictionary-- which will remove the item if no one # references it-- this forces a reference while we return to the caller cls.instance = object.__new__(cls) - cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy, isolate) + cls.instance.__singleton_init__(opts, functions, returners, intervals, cleanup, proxy, standalone) else: log.debug('Re-using Schedule') return cls.instance # has to remain empty for singletons, since __init__ will *always* be called - def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): + def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): pass # an init for the singleton instance to call - def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, isolate=None): + def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): self.opts = opts self.proxy = proxy self.functions = functions - self.isolate = isolate + self.standalone = standalone if isinstance(intervals, dict): self.intervals = intervals else: self.intervals = {} - if not self.isolate: + if not self.standalone: if hasattr(returners, '__getitem__'): self.returners = returners else: @@ -423,7 +423,7 @@ class Schedule(object): self.schedule_returner = self.option('schedule_returner') # Keep track of the lowest loop interval needed in this variable self.loop_interval = six.MAXSIZE - if not self.isolate: + if not self.standalone: clean_proc_dir(opts) if cleanup: for prefix in cleanup: @@ -781,7 +781,7 @@ class Schedule(object): salt.utils.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid'])) - if not self.isolate: + if not self.standalone: proc_fn = os.path.join( salt.minion.get_proc_dir(self.opts['cachedir']), ret['jid'] @@ -824,7 +824,7 @@ class Schedule(object): try: ret['pid'] = os.getpid() - if not self.isolate: + if not self.standalone: if 'jid_include' not in data or data['jid_include']: log.debug('schedule.handle_func: adding this job to the jobcache ' 'with data {0}'.format(ret)) @@ -858,7 +858,7 @@ class Schedule(object): ret['return'] = self.functions[func](*args, **kwargs) - if not self.isolate: + if not self.standalone: # runners do not provide retcode if 'retcode' in self.functions.pack['__context__']: ret['retcode'] = self.functions.pack['__context__']['retcode'] @@ -929,7 +929,7 @@ class Schedule(object): except Exception as exc: log.exception("Unhandled exception firing event: {0}".format(exc)) - if not self.isolate: + if not self.standalone: log.debug('schedule.handle_func: Removing {0}'.format(proc_fn)) try: From 8140cab71dbc8fc5c75e011260252810400bf770 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 11 Sep 2017 14:23:08 -0700 Subject: [PATCH 449/639] Swapping the default value for the standalone from None to False to be more clear. --- salt/utils/schedule.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index d24759ec95..d29a3bf314 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -385,7 +385,7 @@ class Schedule(object): ''' instance = None - def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): + def __new__(cls, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=False): ''' Only create one instance of Schedule ''' @@ -401,11 +401,11 @@ class Schedule(object): return cls.instance # has to remain empty for singletons, since __init__ will *always* be called - def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): + def __init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=False): pass # an init for the singleton instance to call - def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=None): + def __singleton_init__(self, opts, functions, returners=None, intervals=None, cleanup=None, proxy=None, standalone=False): self.opts = opts self.proxy = proxy self.functions = functions From 2e9f1999f61318fc4fe0ac2a1290268a5f8d9416 Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Fri, 1 Sep 2017 00:38:03 +0000 Subject: [PATCH 450/639] Upstream boto_cloudfront execution and state modules AWS recently added support for tagging CloudFront distributions, which allows us to start managing them via Salt as we can insert a Salt-controlled identifier as a `Name` tag. (CloudFront distributions get unique IDs generated by AWS, which we can't predict and thus use to manage them idempotently.) --- doc/ref/modules/all/index.rst | 1 + .../all/salt.modules.boto_cloudfront.rst | 6 + doc/ref/states/all/index.rst | 1 + .../all/salt.states.boto_cloudfront.rst | 6 + salt/modules/boto_cloudfront.py | 462 ++++++++++++++++++ salt/states/boto_cloudfront.py | 229 +++++++++ tests/unit/states/test_boto_cloudfront.py | 223 +++++++++ 7 files changed, 928 insertions(+) create mode 100644 doc/ref/modules/all/salt.modules.boto_cloudfront.rst create mode 100644 doc/ref/states/all/salt.states.boto_cloudfront.rst create mode 100644 salt/modules/boto_cloudfront.py create mode 100644 salt/states/boto_cloudfront.py create mode 100644 tests/unit/states/test_boto_cloudfront.py diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index 1365f38453..e5318e60d1 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -44,6 +44,7 @@ execution modules boto_apigateway boto_asg boto_cfn + boto_cloudfront boto_cloudtrail boto_cloudwatch boto_cloudwatch_event diff --git a/doc/ref/modules/all/salt.modules.boto_cloudfront.rst b/doc/ref/modules/all/salt.modules.boto_cloudfront.rst new file mode 100644 index 0000000000..a76ea991fc --- /dev/null +++ b/doc/ref/modules/all/salt.modules.boto_cloudfront.rst @@ -0,0 +1,6 @@ +============================ +salt.modules.boto_cloudfront +============================ + +.. automodule:: salt.modules.boto_cloudfront + :members: diff --git a/doc/ref/states/all/index.rst b/doc/ref/states/all/index.rst index 4803648006..3d393f9a5c 100644 --- a/doc/ref/states/all/index.rst +++ b/doc/ref/states/all/index.rst @@ -31,6 +31,7 @@ state modules boto_apigateway boto_asg boto_cfn + boto_cloudfront boto_cloudtrail boto_cloudwatch_alarm boto_cloudwatch_event diff --git a/doc/ref/states/all/salt.states.boto_cloudfront.rst b/doc/ref/states/all/salt.states.boto_cloudfront.rst new file mode 100644 index 0000000000..671965b2dc --- /dev/null +++ b/doc/ref/states/all/salt.states.boto_cloudfront.rst @@ -0,0 +1,6 @@ +=========================== +salt.states.boto_cloudfront +=========================== + +.. automodule:: salt.states.boto_cloudfront + :members: diff --git a/salt/modules/boto_cloudfront.py b/salt/modules/boto_cloudfront.py new file mode 100644 index 0000000000..aa932884bf --- /dev/null +++ b/salt/modules/boto_cloudfront.py @@ -0,0 +1,462 @@ +# -*- coding: utf-8 -*- +''' +Connection module for Amazon CloudFront + +.. versionadded:: Oxygen + +:depends: boto3 + +:configuration: This module accepts explicit AWS credentials but can also + utilize IAM roles assigned to the instance through Instance Profiles or + it can read them from the ~/.aws/credentials file or from these + environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. + Dynamic credentials are then automatically obtained from AWS API and no + further configuration is necessary. More information available at: + + .. code-block:: text + + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ + iam-roles-for-amazon-ec2.html + + http://boto3.readthedocs.io/en/latest/guide/ + configuration.html#guide-configuration + + If IAM roles are not used you need to specify them either in a pillar or + in the minion's config file: + + .. code-block:: yaml + + cloudfront.keyid: GKTADJGHEIQSXMKKRBJ08H + cloudfront.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + + A region may also be specified in the configuration: + + .. code-block:: yaml + + cloudfront.region: us-east-1 + + If a region is not specified, the default is us-east-1. + + It's also possible to specify key, keyid and region via a profile, either + as a passed in dict, or as a string to pull from pillars or minion config: + + .. code-block:: yaml + + myprofile: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 +''' +# keep lint from choking on _get_conn and _cache_id +# pylint: disable=E0602 + +# Import Python libs +from __future__ import absolute_import +import logging + +# Import Salt libs +import salt.ext.six as six +from salt.utils.odict import OrderedDict + +import yaml + +# Import third party libs +try: + # pylint: disable=unused-import + import boto3 + import botocore + # pylint: enable=unused-import + logging.getLogger('boto3').setLevel(logging.CRITICAL) + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only load if boto3 libraries exist. + ''' + if not HAS_BOTO: + msg = 'The boto_cloudfront module could not be loaded: {}.' + return (False, msg.format('boto3 libraries not found')) + __utils__['boto3.assign_funcs'](__name__, 'cloudfront') + return True + + +def _list_distributions( + conn, + name=None, + region=None, + key=None, + keyid=None, + profile=None, +): + ''' + Private function that returns an iterator over all CloudFront distributions. + The caller is responsible for all boto-related error handling. + + name + (Optional) Only yield the distribution with the given name + ''' + for dl_ in conn.get_paginator('list_distributions').paginate(): + distribution_list = dl_['DistributionList'] + if 'Items' not in distribution_list: + # If there are no items, AWS omits the `Items` key for some reason + continue + for partial_dist in distribution_list['Items']: + tags = conn.list_tags_for_resource(Resource=partial_dist['ARN']) + tags = dict( + (kv['Key'], kv['Value']) for kv in tags['Tags']['Items'] + ) + + id_ = partial_dist['Id'] + if 'Name' not in tags: + log.warning( + 'CloudFront distribution {0} has no Name tag.'.format(id_), + ) + continue + distribution_name = tags.pop('Name', None) + if name is not None and distribution_name != name: + continue + + # NOTE: list_distributions() returns a DistributionList, + # which nominally contains a list of Distribution objects. + # However, they are mangled in that they are missing values + # (`Logging`, `ActiveTrustedSigners`, and `ETag` keys) + # and moreover flatten the normally nested DistributionConfig + # attributes to the top level. + # Hence, we must call get_distribution() to get the full object, + # and we cache these objects to help lessen API calls. + distribution = _cache_id( + 'cloudfront', + sub_resource=distribution_name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if distribution: + yield (distribution_name, distribution) + continue + + dist_with_etag = conn.get_distribution(Id=id_) + distribution = { + 'distribution': dist_with_etag['Distribution'], + 'etag': dist_with_etag['ETag'], + 'tags': tags, + } + _cache_id( + 'cloudfront', + sub_resource=distribution_name, + resource_id=distribution, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + yield (distribution_name, distribution) + + +def get_distribution(name, region=None, key=None, keyid=None, profile=None): + ''' + Get information about a CloudFront distribution (configuration, tags) with a given name. + + name + Name of the CloudFront distribution + + region + Region to connect to + + key + Secret key to use + + keyid + Access key to use + + profile + A dict with region, key, and keyid, + or a pillar key (string) that contains such a dict. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_cloudfront.get_distribution name=mydistribution profile=awsprofile + + ''' + distribution = _cache_id( + 'cloudfront', + sub_resource=name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if distribution: + return {'result': distribution} + + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + try: + for _, dist in _list_distributions( + conn, + name=name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ): + # _list_distributions should only return the one distribution + # that we want (with the given name). + # In case of multiple distributions with the same name tag, + # our use of caching means list_distributions will just + # return the first one over and over again, + # so only the first result is useful. + if distribution is not None: + msg = 'More than one distribution found with name {0}' + return {'error': msg.format(name)} + distribution = dist + except botocore.exceptions.ClientError as err: + return {'error': __utils__['boto3.get_error'](err)} + if not distribution: + return {'result': None} + + _cache_id( + 'cloudfront', + sub_resource=name, + resource_id=distribution, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + return {'result': distribution} + + +def export_distributions(region=None, key=None, keyid=None, profile=None): + ''' + Get details of all CloudFront distributions. + Produces results that can be used to create an SLS file. + + CLI Example: + + .. code-block:: bash + + salt-call boto_cloudfront.export_distributions --out=txt |\ + sed "s/local: //" > cloudfront_distributions.sls + + ''' + results = OrderedDict() + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + try: + for name, distribution in _list_distributions( + conn, + region=region, + key=key, + keyid=keyid, + profile=profile, + ): + config = distribution['distribution']['DistributionConfig'] + tags = distribution['tags'] + + distribution_sls_data = [ + {'name': name}, + {'config': config}, + {'tags': tags}, + ] + results['Manage CloudFront distribution {0}'.format(name)] = { + 'boto_cloudfront.present': distribution_sls_data, + } + except botocore.exceptions.ClientError as err: + # Raise an exception, as this is meant to be user-invoked at the CLI + # as opposed to being called from execution or state modules + raise err + + dumper = __utils__['yamldumper.get_dumper']('IndentedSafeOrderedDumper') + return yaml.dump( + results, + default_flow_style=False, + Dumper=dumper, + ) + + +def create_distribution( + name, + config, + tags=None, + region=None, + key=None, + keyid=None, + profile=None, +): + ''' + Create a CloudFront distribution with the given name, config, and (optionally) tags. + + name + Name for the CloudFront distribution + + config + Configuration for the distribution + + tags + Tags to associate with the distribution + + region + Region to connect to + + key + Secret key to use + + keyid + Access key to use + + profile + A dict with region, key, and keyid, + or a pillar key (string) that contains such a dict. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_cloudfront.create_distribution name=mydistribution profile=awsprofile \ + config='{"Comment":"partial configuration","Enabled":true}' + ''' + if tags is None: + tags = {} + if 'Name' in tags: + # Be lenient and silently accept if names match, else error + if tags['Name'] != name: + return {'error': 'Must not pass `Name` in `tags` but as `name`'} + tags['Name'] = name + tags = { + 'Items': [{'Key': k, 'Value': v} for k, v in six.iteritems(tags)] + } + + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + try: + conn.create_distribution_with_tags( + DistributionConfigWithTags={ + 'DistributionConfig': config, + 'Tags': tags, + }, + ) + _cache_id( + 'cloudfront', + sub_resource=name, + invalidate=True, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + except botocore.exceptions.ClientError as err: + return {'error': __utils__['boto3.get_error'](err)} + + return {'result': True} + + +def update_distribution( + name, + config, + tags=None, + region=None, + key=None, + keyid=None, + profile=None, +): + ''' + Update the config (and optionally tags) for the CloudFront distribution with the given name. + + name + Name of the CloudFront distribution + + config + Configuration for the distribution + + tags + Tags to associate with the distribution + + region + Region to connect to + + key + Secret key to use + + keyid + Access key to use + + profile + A dict with region, key, and keyid, + or a pillar key (string) that contains such a dict. + + CLI Example: + + .. code-block:: bash + + salt myminion boto_cloudfront.update_distribution name=mydistribution profile=awsprofile \ + config='{"Comment":"partial configuration","Enabled":true}' + ''' + distribution_ret = get_distribution( + name, + region=region, + key=key, + keyid=keyid, + profile=profile + ) + if 'error' in distribution_result: + return distribution_result + dist_with_tags = distribution_result['result'] + + current_distribution = dist_with_tags['distribution'] + current_config = current_distribution['DistributionConfig'] + current_tags = dist_with_tags['tags'] + etag = dist_with_tags['etag'] + + config_diff = __utils__['dictdiffer.deep_diff'](current_config, config) + if tags: + tags_diff = __utils__['dictdiffer.deep_diff'](current_tags, tags) + + conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + try: + if 'old' in config_diff or 'new' in config_diff: + conn.update_distribution( + DistributionConfig=config, + Id=current_distribution['Id'], + IfMatch=etag, + ) + if tags: + arn = current_distribution['ARN'] + if 'new' in tags_diff: + tags_to_add = { + 'Items': [ + {'Key': k, 'Value': v} + for k, v in six.iteritems(tags_diff['new']) + ], + } + conn.tag_resource( + Resource=arn, + Tags=tags_to_add, + ) + if 'old' in tags_diff: + tags_to_remove = { + 'Items': list(tags_diff['old'].keys()), + } + conn.untag_resource( + Resource=arn, + TagKeys=tags_to_remove, + ) + except botocore.exceptions.ClientError as err: + return {'error': __utils__['boto3.get_error'](err)} + finally: + _cache_id( + 'cloudfront', + sub_resource=name, + invalidate=True, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + + return {'result': True} diff --git a/salt/states/boto_cloudfront.py b/salt/states/boto_cloudfront.py new file mode 100644 index 0000000000..eb4d2ab940 --- /dev/null +++ b/salt/states/boto_cloudfront.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- +''' +Manage CloudFront distributions + +.. versionadded:: Oxygen + +Create, update and destroy CloudFront distributions. + +This module accepts explicit AWS credentials but can also utilize +IAM roles assigned to the instance through Instance Profiles. +Dynamic credentials are then automatically obtained from AWS API +and no further configuration is necessary. +More information available `here +`_. + +If IAM roles are not used you need to specify them, +either in a pillar file or in the minion's config file: + +.. code-block:: yaml + + cloudfront.keyid: GKTADJGHEIQSXMKKRBJ08H + cloudfront.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + +It's also possible to specify ``key``, ``keyid``, and ``region`` via a profile, +either passed in as a dict, or a string to pull from pillars or minion config: + +.. code-block:: yaml + + myprofile: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 + +.. code-block:: yaml + + aws: + region: + us-east-1: + profile: + keyid: GKTADJGHEIQSXMKKRBJ08H + key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs + region: us-east-1 + +:depends: boto3 +''' + +# Import Python Libs +from __future__ import absolute_import +import difflib +import logging + +import yaml + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only load if boto is available. + ''' + if 'boto_cloudfront.get_distribution' not in __salt__: + msg = 'The boto_cloudfront state module could not be loaded: {}.' + return (False, msg.format('boto_cloudfront exec module unavailable.')) + return 'boto_cloudfront' + + +def present( + name, + config, + tags, + region=None, + key=None, + keyid=None, + profile=None, +): + ''' + Ensure the CloudFront distribution is present. + + name (string) + Name of the CloudFront distribution + + config (dict) + Configuration for the distribution + + tags (dict) + Tags to associate with the distribution + + region (string) + Region to connect to + + key (string) + Secret key to use + + keyid (string) + Access key to use + + profile (dict or string) + A dict with region, key, and keyid, + or a pillar key (string) that contains such a dict. + + Example: + + .. code-block:: yaml + + Manage my_distribution CloudFront distribution: + boto_cloudfront.present: + - name: my_distribution + - config: + Comment: 'partial config shown, most parameters elided' + Enabled: True + - tags: + testing_key: testing_value + ''' + ret = { + 'name': name, + 'comment': '', + 'changes': {}, + } + + res = __salt__['boto_cloudfront.get_distribution']( + name, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in res: + ret['result'] = False + ret['comment'] = 'Error checking distribution {0}: {1}'.format( + name, + res['error'], + ) + return ret + + old = res['result'] + if old is None: + if __opts__['test']: + ret['result'] = None + ret['comment'] = 'Distribution {0} set for creation.'.format(name) + ret['pchanges'] = {'old': None, 'new': name} + return ret + + res = __salt__['boto_cloudfront.create_distribution']( + name, + config, + tags, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in res: + ret['result'] = False + ret['comment'] = 'Error creating distribution {0}: {1}'.format( + name, + res['error'], + ) + return ret + + ret['result'] = True + ret['comment'] = 'Created distribution {0}.'.format(name) + ret['changes'] = {'old': None, 'new': name} + return ret + else: + full_config_old = { + 'config': old['distribution']['DistributionConfig'], + 'tags': old['tags'], + } + full_config_new = { + 'config': config, + 'tags': tags, + } + diffed_config = __utils__['dictdiffer.deep_diff']( + full_config_old, + full_config_new, + ) + + def _yaml_safe_dump(attrs): + '''Safely dump YAML using a readable flow style''' + dumper_name = 'IndentedSafeOrderedDumper' + dumper = __utils__['yamldumper.get_dumper'](dumper_name) + return yaml.dump( + attrs, + default_flow_style=False, + Dumper=dumper, + ) + changes_diff = ''.join(difflib.unified_diff( + _yaml_safe_dump(full_config_old).splitlines(True), + _yaml_safe_dump(full_config_new).splitlines(True), + )) + + any_changes = bool('old' in diffed_config or 'new' in diffed_config) + if not any_changes: + ret['result'] = True + ret['comment'] = 'Distribution {0} has correct config.'.format( + name, + ) + return ret + + if __opts__['test']: + ret['result'] = None + ret['comment'] = '\n'.join([ + 'Distribution {0} set for new config:'.format(name), + changes_diff, + ]) + ret['pchanges'] = {'diff': changes_diff} + return ret + + res = __salt__['boto_cloudfront.update_distribution']( + name, + config, + tags, + region=region, + key=key, + keyid=keyid, + profile=profile, + ) + if 'error' in res: + ret['result'] = False + ret['comment'] = 'Error updating distribution {0}: {1}'.format( + name, + res['error'], + ) + return ret + + ret['result'] = True + ret['comment'] = 'Updated distribution {0}.'.format(name) + ret['changes'] = {'diff': changes_diff} + return ret diff --git a/tests/unit/states/test_boto_cloudfront.py b/tests/unit/states/test_boto_cloudfront.py new file mode 100644 index 0000000000..dddb78d384 --- /dev/null +++ b/tests/unit/states/test_boto_cloudfront.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +''' +Unit tests for the boto_cloudfront state module. +''' +# Import Python libs +from __future__ import absolute_import +import copy +import textwrap + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import skipIf, TestCase +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch + +# Import Salt Libs +import salt.config +import salt.loader +import salt.states.boto_cloudfront as boto_cloudfront + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class BotoCloudfrontTestCase(TestCase, LoaderModuleMockMixin): + ''' + Test cases for salt.states.boto_cloudfront + ''' + def setup_loader_modules(self): + utils = salt.loader.utils( + self.opts, + whitelist=['boto3', 'dictdiffer', 'yamldumper'], + context={}, + ) + return { + boto_cloudfront: { + '__utils__': utils, + } + } + + @classmethod + def setUpClass(cls): + cls.opts = salt.config.DEFAULT_MINION_OPTS + + cls.name = 'my_distribution' + cls.base_ret = {'name': cls.name, 'changes': {}} + + # Most attributes elided since there are so many required ones + cls.config = {'Enabled': True, 'HttpVersion': 'http2'} + cls.tags = {'test_tag1': 'value1'} + + @classmethod + def tearDownClass(cls): + del cls.opts + + del cls.name + del cls.base_ret + + del cls.config + del cls.tags + + def base_ret_with(self, extra_ret): + new_ret = copy.deepcopy(self.base_ret) + new_ret.update(extra_ret) + return new_ret + + def test_present_distribution_retrieval_error(self): + ''' + Test for boto_cloudfront.present when we cannot get the distribution. + ''' + mock_get = MagicMock(return_value={'error': 'get_distribution error'}) + with patch.multiple(boto_cloudfront, + __salt__={'boto_cloudfront.get_distribution': mock_get}, + __opts__={'test': False}, + ): + comment = 'Error checking distribution {0}: get_distribution error' + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': False, + 'comment': comment.format(self.name), + }), + ) + + def test_present_from_scratch(self): + mock_get = MagicMock(return_value={'result': None}) + + with patch.multiple(boto_cloudfront, + __salt__={'boto_cloudfront.get_distribution': mock_get}, + __opts__={'test': True}, + ): + comment = 'Distribution {0} set for creation.'.format(self.name) + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': None, + 'comment': comment, + 'pchanges': {'old': None, 'new': self.name}, + }), + ) + + mock_create_failure = MagicMock(return_value={'error': 'create error'}) + with patch.multiple(boto_cloudfront, + __salt__={ + 'boto_cloudfront.get_distribution': mock_get, + 'boto_cloudfront.create_distribution': mock_create_failure, + }, + __opts__={'test': False}, + ): + comment = 'Error creating distribution {0}: create error' + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': False, + 'comment': comment.format(self.name), + }), + ) + + mock_create_success = MagicMock(return_value={'result': True}) + with patch.multiple(boto_cloudfront, + __salt__={ + 'boto_cloudfront.get_distribution': mock_get, + 'boto_cloudfront.create_distribution': mock_create_success, + }, + __opts__={'test': False}, + ): + comment = 'Created distribution {0}.' + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': True, + 'comment': comment.format(self.name), + 'changes': {'old': None, 'new': self.name}, + }), + ) + + def test_present_correct_state(self): + mock_get = MagicMock(return_value={'result': { + 'distribution': {'DistributionConfig': self.config}, + 'tags': self.tags, + 'etag': 'test etag', + }}) + with patch.multiple(boto_cloudfront, + __salt__={'boto_cloudfront.get_distribution': mock_get}, + __opts__={'test': False}, + ): + comment = 'Distribution {0} has correct config.' + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': True, + 'comment': comment.format(self.name), + }), + ) + + def test_present_update_config_and_tags(self): + mock_get = MagicMock(return_value={'result': { + 'distribution': {'DistributionConfig': { + 'Enabled': False, + 'Comment': 'to be removed', + }}, + 'tags': {'bad existing tag': 'also to be removed'}, + 'etag': 'test etag', + }}) + + diff = textwrap.dedent('''\ + --- + +++ + @@ -1,5 +1,5 @@ + config: + - Comment: to be removed + - Enabled: false + + Enabled: true + + HttpVersion: http2 + tags: + - bad existing tag: also to be removed + + test_tag1: value1 + ''') + + with patch.multiple(boto_cloudfront, + __salt__={'boto_cloudfront.get_distribution': mock_get}, + __opts__={'test': True}, + ): + header = 'Distribution {0} set for new config:'.format(self.name) + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': None, + 'comment': '\n'.join([header, diff]), + 'pchanges': {'diff': diff}, + }), + ) + + mock_update_failure = MagicMock(return_value={'error': 'update error'}) + with patch.multiple(boto_cloudfront, + __salt__={ + 'boto_cloudfront.get_distribution': mock_get, + 'boto_cloudfront.update_distribution': mock_update_failure, + }, + __opts__={'test': False}, + ): + comment = 'Error updating distribution {0}: update error' + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': False, + 'comment': comment.format(self.name), + }), + ) + + mock_update_success = MagicMock(return_value={'result': True}) + with patch.multiple(boto_cloudfront, + __salt__={ + 'boto_cloudfront.get_distribution': mock_get, + 'boto_cloudfront.update_distribution': mock_update_success, + }, + __opts__={'test': False}, + ): + self.assertDictEqual( + boto_cloudfront.present(self.name, self.config, self.tags), + self.base_ret_with({ + 'result': True, + 'comment': 'Updated distribution {0}.'.format(self.name), + 'changes': {'diff': diff}, + }), + ) From f452860316afa89e800913597cb704415d04157a Mon Sep 17 00:00:00 2001 From: Rico Gloeckner Date: Tue, 12 Sep 2017 08:24:56 +0200 Subject: [PATCH 451/639] typofix 'tempalte' => 'template' --- salt/states/netconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py index 24c15a7924..7311baaf1f 100644 --- a/salt/states/netconfig.py +++ b/salt/states/netconfig.py @@ -347,7 +347,7 @@ def managed(name, template_user=template_user, template_group=template_group, template_mode=template_mode, - tempalte_attrs=template_attrs, + template_attrs=template_attrs, saltenv=saltenv, template_engine=template_engine, skip_verify=skip_verify, From a29a9855a624e189a24b642740481b8fe98f1176 Mon Sep 17 00:00:00 2001 From: garethgreenaway Date: Tue, 12 Sep 2017 08:47:55 -0700 Subject: [PATCH 452/639] Fixing typo. Fixing typo. --- salt/utils/schedule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index a0597a004c..5b182d1c80 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -842,7 +842,7 @@ class Schedule(object): if argspec.keywords: # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(ret): - is key is not 'kwargs': + if key is not 'kwargs': kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val) ret['return'] = self.functions[func](*args, **kwargs) From dfc44b8d149698f8497aba6d0fbf30415b227d3c Mon Sep 17 00:00:00 2001 From: spenceation Date: Tue, 12 Sep 2017 11:37:25 -0400 Subject: [PATCH 453/639] Added Cisco UCS Chassis associated modules. --- salt/grains/cimc.py | 38 +++ salt/modules/cimc.py | 710 +++++++++++++++++++++++++++++++++++++++++++ salt/proxy/cimc.py | 290 ++++++++++++++++++ salt/states/cimc.py | 211 +++++++++++++ 4 files changed, 1249 insertions(+) create mode 100644 salt/grains/cimc.py create mode 100644 salt/modules/cimc.py create mode 100644 salt/proxy/cimc.py create mode 100644 salt/states/cimc.py diff --git a/salt/grains/cimc.py b/salt/grains/cimc.py new file mode 100644 index 0000000000..e1fba64947 --- /dev/null +++ b/salt/grains/cimc.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +''' +Generate baseline proxy minion grains for cimc hosts. + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +# Import Salt Libs +import salt.utils.platform +import salt.proxy.cimc + +__proxyenabled__ = ['cimc'] +__virtualname__ = 'cimc' + +log = logging.getLogger(__file__) + +GRAINS_CACHE = {'os_family': 'Cisco UCS'} + + +def __virtual__(): + try: + if salt.utils.platform.is_proxy() and __opts__['proxy']['proxytype'] == 'cimc': + return __virtualname__ + except KeyError: + pass + + return False + + +def cimc(proxy=None): + if not proxy: + return {} + if proxy['cimc.initialized']() is False: + return {} + return {'cimc': proxy['cimc.grains']()} diff --git a/salt/modules/cimc.py b/salt/modules/cimc.py new file mode 100644 index 0000000000..ddadaec6a4 --- /dev/null +++ b/salt/modules/cimc.py @@ -0,0 +1,710 @@ +# -*- coding: utf-8 -*- +''' +Module to provide Cisco UCS compatibility to Salt. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + + +Configuration +============= +This module accepts connection configuration details either as +parameters, or as configuration settings in pillar as a Salt proxy. +Options passed into opts will be ignored if options are passed into pillar. + +.. seealso:: + :prox:`Cisco UCS Proxy Module ` + +About +===== +This execution module was designed to handle connections to a Cisco UCS server. This module adds support to send +connections directly to the device through the rest API. + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +# Import Salt Libs +import salt.utils.platform +import salt.proxy.cimc + +log = logging.getLogger(__name__) + +__virtualname__ = 'cimc' + + +def __virtual__(): + ''' + Will load for the cimc proxy minions. + ''' + try: + if salt.utils.platform.is_proxy() and \ + __opts__['proxy']['proxytype'] == 'cimc': + return __virtualname__ + except KeyError: + pass + + return False, 'The cimc execution module can only be loaded for cimc proxy minions.' + + +def activate_backup_image(reset=False): + ''' + Activates the firmware backup image. + + CLI Example: + + Args: + reset(bool): Reset the CIMC device on activate. + + .. code-block:: bash + + salt '*' cimc.activate_backup_image + salt '*' cimc.activate_backup_image reset=True + + ''' + + dn = "sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined" + + r = "no" + + if reset is True: + r = "yes" + + inconfig = """""".format(r) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def create_user(uid=None, username=None, password=None, priv=None): + ''' + Create a CIMC user with username and password. + + Args: + uid(int): The user ID slot to create the user account in. + + username(str): The name of the user. + + password(str): The clear text password of the user. + + priv(str): The privilege level of the user. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.create_user 11 username=admin password=foobar priv=admin + + ''' + + if not uid: + raise salt.exceptions.CommandExecutionError("The user ID must be specified.") + + if not username: + raise salt.exceptions.CommandExecutionError("The username must be specified.") + + if not password: + raise salt.exceptions.CommandExecutionError("The password must be specified.") + + if not priv: + raise salt.exceptions.CommandExecutionError("The privilege level must be specified.") + + dn = "sys/user-ext/user-{0}".format(uid) + + inconfig = """""".format(uid, + username, + priv, + password) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def get_bios_defaults(): + ''' + Get the default values of BIOS tokens. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_bios_defaults + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('biosPlatformDefaults', True) + + return ret + + +def get_bios_settings(): + ''' + Get the C240 server BIOS token values. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_bios_settings + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('biosSettings', True) + + return ret + + +def get_boot_order(): + ''' + Retrieves the configured boot order table. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_boot_order + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('lsbootDef', True) + + return ret + + +def get_cpu_details(): + ''' + Get the CPU product ID details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_cpu_details + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogCpu', True) + + return ret + + +def get_disks(): + ''' + Get the HDD product ID details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_disks + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogHdd', True) + + return ret + + +def get_ethernet_interfaces(): + ''' + Get the adapter Ethernet interface details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_ethernet_interfaces + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('adaptorHostEthIf', True) + + return ret + + +def get_fibre_channel_interfaces(): + ''' + Get the adapter fibre channel interface details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_fibre_channel_interfaces + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('adaptorHostFcIf', True) + + return ret + + +def get_firmware(): + ''' + Retrieves the current running firmware versions of server components. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_firmware + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('firmwareRunning', False) + + return ret + + +def get_ldap(): + ''' + Retrieves LDAP server details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_ldap + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('aaaLdap', True) + + return ret + + +def get_management_interface(): + ''' + Retrieve the management interface details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_management_interface + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('mgmtIf', False) + + return ret + + +def get_memory_token(): + ''' + Get the memory RAS BIOS token. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_memory_token + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('biosVfSelectMemoryRASConfiguration', False) + + return ret + + +def get_memory_unit(): + ''' + Get the IMM/Memory unit product ID details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_memory_unit + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogDimm', True) + + return ret + + +def get_network_adapters(): + ''' + Get the list of network adapaters and configuration details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_network_adapters + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('networkAdapterEthIf', True) + + return ret + + +def get_ntp(): + ''' + Retrieves the current running NTP configuration. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_ntp + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('commNtpProvider', False) + + return ret + + +def get_pci_adapters(): + ''' + Get the PCI adapter product ID details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_disks + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('pidCatalogPCIAdapter', True) + + return ret + + +def get_power_supplies(): + ''' + Retrieves the power supply unit details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_power_supplies + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('equipmentPsu', False) + + return ret + + +def get_snmp_config(): + ''' + Get the snmp configuration details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_snmp_config + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('commSnmp', False) + + return ret + + +def get_syslog(): + ''' + Get the Syslog client-server details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_syslog + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('commSyslogClient', False) + + return ret + + +def get_system_info(): + ''' + Get the system information. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_system_info + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('computeRackUnit', False) + + return ret + + +def get_users(): + ''' + Get the CIMC users. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_users + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('aaaUser', False) + + return ret + + +def get_vic_adapters(): + ''' + Get the VIC adapter general profile details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_vic_adapters + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('adaptorGenProfile', True) + + return ret + + +def get_vic_uplinks(): + ''' + Get the VIC adapter uplink port details. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.get_vic_uplinks + + ''' + ret = __proxy__['cimc.get_config_resolver_class']('adaptorExtEthIf', True) + + return ret + + +def mount_share(name=None, + remote_share=None, + remote_file=None, + mount_type="nfs", + username=None, + password=None): + ''' + Mounts a remote file through a remote share. Currently, this feature is supported in version 1.5 or greater. + The remote share can be either NFS, CIFS, or WWW. + + Some of the advantages of CIMC Mounted vMedia include: + Communication between mounted media and target stays local (inside datacenter) + Media mounts can be scripted/automated + No vKVM requirements for media connection + Multiple share types supported + Connections supported through all CIMC interfaces + + Note: CIMC Mounted vMedia is enabled through BIOS configuration. + + Args: + name(str): The name of the volume on the CIMC device. + + remote_share(str): The file share link that will be used to mount the share. This can be NFS, CIFS, or WWW. This + must be the directory path and not the full path to the remote file. + + remote_file(str): The name of the remote file to mount. It must reside within remote_share. + + mount_type(str): The type of share to mount. Valid options are nfs, cifs, and www. + + username(str): An optional requirement to pass credentials to the remote share. If not provided, an + unauthenticated connection attempt will be made. + + password(str): An optional requirement to pass a password to the remote share. If not provided, an + unauthenticated connection attempt will be made. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso + + salt '*' cimc.mount_share name=WIN7 remote_share=10.xxx.27.xxx:/nfs remote_file=sl1huu.iso username=bob password=badpassword + + ''' + + if not name: + raise salt.exceptions.CommandExecutionError("The share name must be specified.") + + if not remote_share: + raise salt.exceptions.CommandExecutionError("The remote share path must be specified.") + + if not remote_file: + raise salt.exceptions.CommandExecutionError("The remote file name must be specified.") + + if username and password: + mount_options = " mountOptions='username={0},password={1}'".format(username, password) + else: + mount_options = "" + + dn = 'sys/svc-ext/vmedia-svc/vmmap-{0}'.format(name) + inconfig = """""".format(name, mount_type, mount_options, remote_file, remote_share) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def reboot(): + ''' + Power cycling the server. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.reboot + + ''' + + dn = "sys/rack-unit-1" + + inconfig = """""" + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def set_ntp_server(server1='', server2='', server3='', server4=''): + ''' + Sets the NTP servers configuration. This will also enable the client NTP service. + + Args: + server1(str): The first IP address or FQDN of the NTP servers. + + server2(str): The second IP address or FQDN of the NTP servers. + + server3(str): The third IP address or FQDN of the NTP servers. + + server4(str): The fourth IP address or FQDN of the NTP servers. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.set_ntp_server 10.10.10.1 + + salt '*' cimc.set_ntp_server 10.10.10.1 foo.bar.com + + ''' + + dn = "sys/svc-ext/ntp-svc" + inconfig = """""".format(server1, server2, server3, server4) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def set_syslog_server(server=None, type="primary"): + ''' + Set the SYSLOG server on the host. + + Args: + server(str): The hostname or IP address of the SYSLOG server. + + type(str): Specifies the type of SYSLOG server. This can either be primary (default) or secondary. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.set_syslog_server foo.bar.com + + salt '*' cimc.set_syslog_server foo.bar.com primary + + salt '*' cimc.set_syslog_server foo.bar.com secondary + + ''' + + if not server: + raise salt.exceptions.CommandExecutionError("The SYSLOG server must be specified.") + + if type == "primary": + dn = "sys/svc-ext/syslog/client-primary" + inconfig = """ """.format(server) + elif type == "secondary": + dn = "sys/svc-ext/syslog/client-secondary" + inconfig = """ """.format(server) + else: + raise salt.exceptions.CommandExecutionError("The SYSLOG type must be either primary or secondary.") + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def tftp_update_bios(server=None, path=None): + ''' + Update the BIOS firmware through TFTP. + + Args: + server(str): The IP address or hostname of the TFTP server. + + path(str): The TFTP path and filename for the BIOS image. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.tftp_update_bios foo.bar.com HP-SL2.cap + + ''' + + if not server: + raise salt.exceptions.CommandExecutionError("The server name must be specified.") + + if not path: + raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.") + + dn = "sys/rack-unit-1/bios/fw-updatable" + + inconfig = """""".format(server, path) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret + + +def tftp_update_cimc(server=None, path=None): + ''' + Update the CIMC firmware through TFTP. + + Args: + server(str): The IP address or hostname of the TFTP server. + + path(str): The TFTP path and filename for the CIMC image. + + CLI Example: + + .. code-block:: bash + + salt '*' cimc.tftp_update_cimc foo.bar.com HP-SL2.bin + + ''' + + if not server: + raise salt.exceptions.CommandExecutionError("The server name must be specified.") + + if not path: + raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.") + + dn = "sys/rack-unit-1/mgmt/fw-updatable" + + inconfig = """""".format(server, path) + + ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) + + return ret diff --git a/salt/proxy/cimc.py b/salt/proxy/cimc.py new file mode 100644 index 0000000000..4692a8ef31 --- /dev/null +++ b/salt/proxy/cimc.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +''' + +Proxy Minion interface module for managing Cisco Integrated Management Controller devices. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + +This proxy minion enables Cisco Integrated Management Controller devices (hereafter referred to +as simply 'cimc' devices to be treated individually like a Salt Minion. + +The cimc proxy leverages the XML API functionality on the Cisco Integrated Management Controller. +The Salt proxy must have access to the cimc on HTTPS (tcp/443). + +More in-depth conceptual reading on Proxy Minions can be found in the +:ref:`Proxy Minion ` section of Salt's +documentation. + + +Configuration +============= +To use this integration proxy module, please configure the following: + +Pillar +------ + +Proxy minions get their configuration from Salt's Pillar. Every proxy must +have a stanza in Pillar and a reference in the Pillar top-file that matches +the ID. + +.. code-block:: yaml + + proxy: + proxytype: cimc + host: + username: + password: + +proxytype +^^^^^^^^^ +The ``proxytype`` key and value pair is critical, as it tells Salt which +interface to load from the ``proxy`` directory in Salt's install hierarchy, +or from ``/srv/salt/_proxy`` on the Salt Master (if you have created your +own proxy module, for example). To use this cimc Proxy Module, set this to +``cimc``. + +host +^^^^ +The location, or ip/dns, of the cimc host. Required. + +username +^^^^^^^^ +The username used to login to the cimc host. Required. + +password +^^^^^^^^ +The password used to login to the cimc host. Required. + +''' + +from __future__ import absolute_import + +# Import Python Libs +import logging +import re + +# Import Salt Libs +import salt.exceptions +from salt._compat import ElementTree as ET + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['cimc'] + +# Variables are scoped to this module so we can have persistent data. +GRAINS_CACHE = {'vendor': 'Cisco'} +DETAILS = {} + +# Set up logging +log = logging.getLogger(__file__) + +# Define the module's virtual name +__virtualname__ = 'cimc' + + +def __virtual__(): + ''' + Only return if all the modules are available. + ''' + return __virtualname__ + + +def init(opts): + ''' + This function gets called when the proxy starts up. + ''' + if 'host' not in opts['proxy']: + log.critical('No \'host\' key found in pillar for this proxy.') + return False + if 'username' not in opts['proxy']: + log.critical('No \'username\' key found in pillar for this proxy.') + return False + if 'password' not in opts['proxy']: + log.critical('No \'passwords\' key found in pillar for this proxy.') + return False + + DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host']) + DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded', + 'Content-Length': 62, + 'USER-Agent': 'lwp-request/2.06'} + + # Set configuration details + DETAILS['host'] = opts['proxy']['host'] + DETAILS['username'] = opts['proxy'].get('username') + DETAILS['password'] = opts['proxy'].get('password') + + # Ensure connectivity to the device + log.debug("Attempting to connect to cimc proxy host.") + get_config_resolver_class("computeRackUnit") + log.debug("Successfully connected to cimc proxy host.") + + DETAILS['initialized'] = True + + +def set_config_modify(dn=None, inconfig=None, hierarchical=False): + ''' + The configConfMo method configures the specified managed object in a single subtree (for example, DN). + ''' + ret = {} + cookie = logon() + + # Declare if the search contains hierarchical results. + h = "false" + if hierarchical is True: + h = "true" + + payload = '' \ + '{3}'.format(cookie, h, dn, inconfig) + r = __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='plain', + decode=True, + verify_ssl=False, + raise_error=True, + headers=DETAILS['headers']) + answer = re.findall(r'(<[\s\S.]*>)', r['text'])[0] + items = ET.fromstring(answer) + logout(cookie) + for item in items: + ret[item.tag] = prepare_return(item) + return ret + + +def get_config_resolver_class(cid=None, hierarchical=False): + ''' + The configResolveClass method returns requested managed object in a given class. + ''' + ret = {} + cookie = logon() + + # Declare if the search contains hierarchical results. + h = "false" + if hierarchical is True: + h = "true" + + payload = ''.format(cookie, h, cid) + r = __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='plain', + decode=True, + verify_ssl=False, + raise_error=True, + headers=DETAILS['headers']) + + answer = re.findall(r'(<[\s\S.]*>)', r['text'])[0] + items = ET.fromstring(answer) + logout(cookie) + for item in items: + ret[item.tag] = prepare_return(item) + return ret + + +def logon(): + ''' + Logs into the cimc device and returns the session cookie. + ''' + content = {} + payload = "".format(DETAILS['username'], DETAILS['password']) + r = __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='plain', + decode=True, + verify_ssl=False, + raise_error=False, + headers=DETAILS['headers']) + answer = re.findall(r'(<[\s\S.]*>)', r['text'])[0] + items = ET.fromstring(answer) + for item in items.attrib: + content[item] = items.attrib[item] + + if 'outCookie' not in content: + raise salt.exceptions.CommandExecutionError("Unable to log into proxy device.") + + return content['outCookie'] + + +def logout(cookie=None): + ''' + Closes the session with the device. + ''' + payload = ''.format(cookie) + __utils__['http.query'](DETAILS['url'], + data=payload, + method='POST', + decode_type='plain', + decode=True, + verify_ssl=False, + raise_error=True, + headers=DETAILS['headers']) + return + + +def prepare_return(x): + ''' + Converts the etree to dict + ''' + ret = {} + for a in list(x): + if a.tag not in ret: + ret[a.tag] = [] + ret[a.tag].append(prepare_return(a)) + for a in x.attrib: + ret[a] = x.attrib[a] + return ret + + +def initialized(): + ''' + Since grains are loaded in many different places and some of those + places occur before the proxy can be initialized, return whether + our init() function has been called + ''' + return DETAILS.get('initialized', False) + + +def grains(): + ''' + Get the grains from the proxied device + ''' + if not DETAILS.get('grains_cache', {}): + DETAILS['grains_cache'] = GRAINS_CACHE + try: + compute_rack = get_config_resolver_class('computeRackUnit', False) + DETAILS['grains_cache'] = compute_rack['outConfigs']['computeRackUnit'] + except Exception as err: + log.error(err) + return DETAILS['grains_cache'] + + +def grains_refresh(): + ''' + Refresh the grains from the proxied device + ''' + DETAILS['grains_cache'] = None + return grains() + + +def ping(): + ''' + Returns true if the device is reachable, else false. + ''' + try: + cookie = logon() + logout(cookie) + except Exception as err: + log.debug(err) + return False + return True + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('CIMC proxy shutdown() called.') diff --git a/salt/states/cimc.py b/salt/states/cimc.py new file mode 100644 index 0000000000..11b84839e7 --- /dev/null +++ b/salt/states/cimc.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +''' +A state module to manage Cisco UCS chassis devices. + +:codeauthor: :email:`Spencer Ervin ` +:maturity: new +:depends: none +:platform: unix + + +About +===== +This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module +relies on the CIMC proxy module to interface with the device. + +.. seealso:: + :prox:`CIMC Proxy Module ` + +''' + +# Import Python Libs +from __future__ import absolute_import +import logging + +log = logging.getLogger(__name__) + + +def __virtual__(): + return 'cimc.get_system_info' in __salt__ + + +def _default_ret(name): + ''' + Set the default response values. + + ''' + ret = { + 'name': name, + 'changes': {}, + 'result': False, + 'comment': '' + } + return ret + + +def ntp(name, servers): + ''' + Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four + NTP servers will be reviewed. Any entries past four will be ignored. + + name: The name of the module function to execute. + + servers(str, list): The IP address or FQDN of the NTP servers. + + SLS Example: + + .. code-block:: yaml + + ntp_configuration_list: + cimc.ntp: + - servers: + - foo.bar.com + - 10.10.10.10 + + ntp_configuration_str: + cimc.ntp: + - servers: foo.bar.com + + ''' + ret = _default_ret(name) + + ntp_servers = ['', '', '', ''] + + # Parse our server arguments + if isinstance(servers, list): + i = 0 + for x in servers: + ntp_servers[i] = x + i += 1 + else: + ntp_servers[0] = servers + + conf = __salt__['cimc.get_ntp']() + + # Check if our NTP configuration is already set + req_change = False + try: + if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ + or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ + or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ + or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ + or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: + req_change = True + except KeyError as err: + ret['result'] = False + ret['comment'] = "Unable to confirm current NTP settings." + log.error(err) + return ret + + if req_change: + + try: + update = __salt__['cimc.set_ntp_server'](ntp_servers[0], + ntp_servers[1], + ntp_servers[2], + ntp_servers[3]) + if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': + ret['result'] = False + ret['comment'] = "Error setting NTP configuration." + return ret + except Exception as err: + ret['result'] = False + ret['comment'] = "Error setting NTP configuration." + log.error(err) + return ret + + ret['changes']['before'] = conf + ret['changes']['after'] = __salt__['cimc.get_ntp']() + ret['comment'] = "NTP settings modified." + else: + ret['comment'] = "NTP already configured. No changes required." + + ret['result'] = True + + return ret + + +def syslog(name, primary=None, secondary=None): + ''' + Ensures that the syslog servers are set to the specified values. A value of None will be ignored. + + name: The name of the module function to execute. + + primary(str): The IP address or FQDN of the primary syslog server. + + secondary(str): The IP address or FQDN of the secondary syslog server. + + SLS Example: + + .. code-block:: yaml + + syslog_configuration: + cimc.syslog: + - primary: 10.10.10.10 + - secondary: foo.bar.com + + ''' + ret = _default_ret(name) + + conf = __salt__['cimc.get_syslog']() + + req_change = False + + if primary: + prim_change = True + if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: + for entry in conf['outConfigs']['commSyslogClient']: + if entry['name'] != 'primary': + continue + if entry['adminState'] == 'enabled' and entry['hostname'] == primary: + prim_change = False + + if prim_change: + try: + update = __salt__['cimc.set_syslog_server'](primary, "primary") + if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': + req_change = True + else: + ret['result'] = False + ret['comment'] = "Error setting primary SYSLOG server." + return ret + except Exception as err: + ret['result'] = False + ret['comment'] = "Error setting primary SYSLOG server." + log.error(err) + return ret + + if secondary: + sec_change = True + if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: + for entry in conf['outConfig']['commSyslogClient']: + if entry['name'] != 'secondary': + continue + if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: + sec_change = False + + if sec_change: + try: + update = __salt__['cimc.set_syslog_server'](secondary, "secondary") + if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': + req_change = True + else: + ret['result'] = False + ret['comment'] = "Error setting secondary SYSLOG server." + return ret + except Exception as err: + ret['result'] = False + ret['comment'] = "Error setting secondary SYSLOG server." + log.error(err) + return ret + + if req_change: + ret['changes']['before'] = conf + ret['changes']['after'] = __salt__['cimc.get_syslog']() + ret['comment'] = "SYSLOG settings modified." + else: + ret['comment'] = "SYSLOG already configured. No changes required." + + ret['result'] = True + + return ret From 507c1b871e0fdd525941dbd08e70063f474ff634 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 6 Sep 2017 15:27:47 -0500 Subject: [PATCH 454/639] Add a GitConfigParser class for manipulating git config files This class is designed to be used for (among other things) gitfs and git_pillar, to enforce config settings which reside in a git config file without resorting to using the git CLI. --- salt/utils/configparser.py | 259 +++++++++++++++++++++++++ tests/unit/utils/test_configparser.py | 269 ++++++++++++++++++++++++++ 2 files changed, 528 insertions(+) create mode 100644 salt/utils/configparser.py create mode 100644 tests/unit/utils/test_configparser.py diff --git a/salt/utils/configparser.py b/salt/utils/configparser.py new file mode 100644 index 0000000000..32246d5ede --- /dev/null +++ b/salt/utils/configparser.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Import Python libs +from __future__ import absolute_import +import re + +# Import Salt libs +import salt.utils.stringutils + +# Import 3rd-party libs +from salt.ext import six +from salt.ext.six.moves import configparser # pylint: disable=redefined-builtin + + +# pylint: disable=string-substitution-usage-error +class GitConfigParser(configparser.RawConfigParser, object): + ''' + Custom ConfigParser which reads and writes git config files. + + READ A GIT CONFIG FILE INTO THE PARSER OBJECT + + >>> from salt.utils.configparser import GitConfigParser + >>> conf = GitConfigParser() + >>> conf.read('/home/user/.git/config') + + MAKE SOME CHANGES + + >>> # Change user.email + >>> conf.set('user', 'email', 'myaddress@mydomain.tld') + >>> # Add another refspec to the "origin" remote's "fetch" multivar + >>> conf.set_multivar('remote "origin"', 'fetch', '+refs/tags/*:refs/tags/*') + + WRITE THE CONFIG TO A FILEHANDLE + + >>> import salt.utils.files + >>> with salt.utils.files.fopen('/home/user/.git/config', 'w') as fh: + ... conf.write(fh) + >>> + ''' + DEFAULTSECT = u'DEFAULT' + SPACEINDENT = u' ' * 8 + + def __init__(self, defaults=None, dict_type=configparser._default_dict, + allow_no_value=True): + ''' + Changes default value for allow_no_value from False to True + ''' + super(GitConfigParser, self).__init__( + defaults, dict_type, allow_no_value) + + def _read(self, fp, fpname): + ''' + Makes the following changes from the RawConfigParser: + + 1. Strip leading tabs from non-section-header lines. + 2. Treat 8 spaces at the beginning of a line as a tab. + 3. Treat lines beginning with a tab as options. + 4. Drops support for continuation lines. + 5. Multiple values for a given option are stored as a list. + 6. Keys and values are decoded to the system encoding. + ''' + cursect = None # None, or a dictionary + optname = None + lineno = 0 + e = None # None, or an exception + while True: + line = fp.readline() + if six.PY2: + line = line.decode(__salt_system_encoding__) + if not line: + break + lineno = lineno + 1 + # comment or blank line? + if line.strip() == u'' or line[0] in u'#;': + continue + if line.split(None, 1)[0].lower() == u'rem' and line[0] in u'rR': + # no leading whitespace + continue + # Replace space indentation with a tab. Allows parser to work + # properly in cases where someone has edited the git config by hand + # and indented using spaces instead of tabs. + if line.startswith(self.SPACEINDENT): + line = u'\t' + line[len(self.SPACEINDENT):] + # is it a section header? + mo = self.SECTCRE.match(line) + if mo: + sectname = mo.group(u'header') + if sectname in self._sections: + cursect = self._sections[sectname] + elif sectname == self.DEFAULTSECT: + cursect = self._defaults + else: + cursect = self._dict() + self._sections[sectname] = cursect + # So sections can't start with a continuation line + optname = None + # no section header in the file? + elif cursect is None: + raise configparser.MissingSectionHeaderError(fpname, lineno, line) + # an option line? + else: + mo = self._optcre.match(line.lstrip()) + if mo: + optname, vi, optval = mo.group(u'option', u'vi', u'value') + optname = self.optionxform(optname.rstrip()) + if optval is None: + optval = u'' + if optval: + if vi in (u'=', u':') and u';' in optval: + # ';' is a comment delimiter only if it follows + # a spacing character + pos = optval.find(u';') + if pos != -1 and optval[pos-1].isspace(): + optval = optval[:pos] + optval = optval.strip() + # Empty strings should be considered as blank strings + if optval in (u'""', u"''"): + optval = u'' + self._add_option(cursect, optname, optval) + else: + # a non-fatal parsing error occurred. set up the + # exception but keep going. the exception will be + # raised at the end of the file and will contain a + # list of all bogus lines + if not e: + e = configparser.ParsingError(fpname) + e.append(lineno, repr(line)) + # if any parsing errors occurred, raise an exception + if e: + raise e # pylint: disable=raising-bad-type + + def _string_check(self, value, allow_list=False): + ''' + Based on the string-checking code from the SafeConfigParser's set() + function, this enforces string values for config options. + ''' + if self._optcre is self.OPTCRE or value: + is_list = isinstance(value, list) + if is_list and not allow_list: + raise TypeError(u'option value cannot be a list unless ' + u'allow_list is True') + elif not is_list: + value = [value] + if not all(isinstance(x, six.string_types) for x in value): + raise TypeError(u'option values must be strings') + + def get(self, section, option, as_list=False): + ''' + Adds an optional "as_list" argument to ensure a list is returned. This + is helpful when iterating over an option which may or may not be a + multivar. + ''' + ret = super(GitConfigParser, self).get(section, option) + if as_list and not isinstance(ret, list): + ret = [ret] + return ret + + def set(self, section, option, value=u''): + ''' + This is overridden from the RawConfigParser merely to change the + default value for the 'value' argument. + ''' + self._string_check(value) + super(GitConfigParser, self).set(section, option, value) + + def _add_option(self, sectdict, key, value): + if isinstance(value, list): + sectdict[key] = value + elif isinstance(value, six.string_types): + try: + sectdict[key].append(value) + except KeyError: + # Key not present, set it + sectdict[key] = value + except AttributeError: + # Key is present but the value is not a list. Make it into a list + # and then append to it. + sectdict[key] = [sectdict[key]] + sectdict[key].append(value) + else: + raise TypeError(u'Expected str or list for option value, got %s' % type(value).__name__) + + def set_multivar(self, section, option, value=u''): + ''' + This function is unique to the GitConfigParser. It will add another + value for the option if it already exists, converting the option's + value to a list if applicable. + + If "value" is a list, then any existing values for the specified + section and option will be replaced with the list being passed. + ''' + self._string_check(value, allow_list=True) + if not section or section == self.DEFAULTSECT: + sectdict = self._defaults + else: + try: + sectdict = self._sections[section] + except KeyError: + raise configparser.NoSectionError(section) + key = self.optionxform(option) + self._add_option(sectdict, key, value) + + def remove_option_regexp(self, section, option, expr): + ''' + Remove an option with a value matching the expression. Works on single + values and multivars. + ''' + if not section or section == self.DEFAULTSECT: + sectdict = self._defaults + else: + try: + sectdict = self._sections[section] + except KeyError: + raise configparser.NoSectionError(section) + option = self.optionxform(option) + if option not in sectdict: + return False + regexp = re.compile(expr) + if isinstance(sectdict[option], list): + new_list = [x for x in sectdict[option] if not regexp.search(x)] + # Revert back to a list if we removed all but one item + if len(new_list) == 1: + new_list = new_list[0] + existed = new_list != sectdict[option] + if existed: + del sectdict[option] + sectdict[option] = new_list + del new_list + else: + existed = bool(regexp.search(sectdict[option])) + if existed: + del sectdict[option] + return existed + + def write(self, fp_): + ''' + Makes the following changes from the RawConfigParser: + + 1. Prepends options with a tab character. + 2. Does not write a blank line between sections. + 3. When an option's value is a list, a line for each option is written. + This allows us to support multivars like a remote's "fetch" option. + 4. Drops support for continuation lines. + ''' + convert = salt.utils.stringutils.to_bytes \ + if u'b' in fp_.mode \ + else salt.utils.stringutils.to_str + if self._defaults: + fp_.write(convert(u'[%s]\n' % self.DEFAULTSECT)) + for (key, value) in six.iteritems(self._defaults): + value = salt.utils.stringutils.to_unicode(value).replace(u'\n', u'\n\t') + fp_.write(convert(u'%s = %s\n' % (key, value))) + for section in self._sections: + fp_.write(convert(u'[%s]\n' % section)) + for (key, value) in six.iteritems(self._sections[section]): + if (value is not None) or (self._optcre == self.OPTCRE): + if not isinstance(value, list): + value = [value] + for item in value: + fp_.write(convert(u'\t%s\n' % u' = '.join((key, item)).rstrip())) diff --git a/tests/unit/utils/test_configparser.py b/tests/unit/utils/test_configparser.py new file mode 100644 index 0000000000..3b6384b0a3 --- /dev/null +++ b/tests/unit/utils/test_configparser.py @@ -0,0 +1,269 @@ +# -*- coding: utf-8 -*- +''' +tests.unit.utils.test_configparser +================================== + +Test the funcs in the custom parsers in salt.utils.configparser +''' +# Import Python Libs +from __future__ import absolute_import +import copy +import errno +import logging +import os + +log = logging.getLogger(__name__) + +# Import Salt Testing Libs +from tests.support.unit import TestCase +from tests.support.paths import TMP + +# Import salt libs +import salt.utils.files +import salt.utils.stringutils +from salt.utils.configparser import GitConfigParser + +# Import 3rd-party libs +from salt.ext.six.moves import configparser + +# The user.name param here is intentionally indented with spaces instead of a +# tab to test that we properly load a file with mixed indentation. +ORIG_CONFIG = u'''[user] + name = Артём Анисимов +\temail = foo@bar.com +[remote "origin"] +\turl = https://github.com/terminalmage/salt.git +\tfetch = +refs/heads/*:refs/remotes/origin/* +\tpushurl = git@github.com:terminalmage/salt.git +[color "diff"] +\told = 196 +\tnew = 39 +[core] +\tpager = less -R +\trepositoryformatversion = 0 +\tfilemode = true +\tbare = false +\tlogallrefupdates = true +[alias] +\tmodified = ! git status --porcelain | awk 'match($1, "M"){print $2}' +\tgraph = log --all --decorate --oneline --graph +\thist = log --pretty=format:\\"%h %ad | %s%d [%an]\\" --graph --date=short +[http] +\tsslverify = false'''.split(u'\n') # future lint: disable=non-unicode-string + + +class TestGitConfigParser(TestCase): + ''' + Tests for salt.utils.configparser.GitConfigParser + ''' + maxDiff = None + orig_config = os.path.join(TMP, u'test_gitconfig.orig') + new_config = os.path.join(TMP, u'test_gitconfig.new') + remote = u'remote "origin"' + + def tearDown(self): + del self.conf + try: + os.remove(self.new_config) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + def setUp(self): + if not os.path.exists(self.orig_config): + with salt.utils.files.fopen(self.orig_config, u'wb') as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + u'\n'.join(ORIG_CONFIG) + ) + ) + self.conf = GitConfigParser() + self.conf.read(self.orig_config) + + @classmethod + def tearDownClass(cls): + try: + os.remove(cls.orig_config) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + + @staticmethod + def fix_indent(lines): + ''' + Fixes the space-indented 'user' line, because when we write the config + object to a file space indentation will be replaced by tab indentation. + ''' + ret = copy.copy(lines) + for i, _ in enumerate(ret): + if ret[i].startswith(GitConfigParser.SPACEINDENT): + ret[i] = ret[i].replace(GitConfigParser.SPACEINDENT, u'\t') + return ret + + @staticmethod + def get_lines(path): + with salt.utils.files.fopen(path, u'r') as fp_: + return salt.utils.stringutils.to_unicode(fp_.read()).splitlines() + + def _test_write(self, mode): + with salt.utils.files.fopen(self.new_config, mode) as fp_: + self.conf.write(fp_) + self.assertEqual( + self.get_lines(self.new_config), + self.fix_indent(ORIG_CONFIG) + ) + + def test_get(self): + ''' + Test getting an option's value + ''' + # Numeric values should be loaded as strings + self.assertEqual(self.conf.get(u'color "diff"', u'old'), u'196') + # Complex strings should be loaded with their literal quotes and + # slashes intact + self.assertEqual( + self.conf.get(u'alias', u'modified'), + u"""! git status --porcelain | awk 'match($1, "M"){print $2}'""" + ) + self.assertEqual( + self.conf.get(u'alias', u'hist'), + salt.utils.stringutils.to_unicode( + r"""log --pretty=format:\"%h %ad | %s%d [%an]\" --graph --date=short""" + ) + ) + + def test_read_space_indent(self): + ''' + Test that user.name was successfully loaded despite being indented + using spaces instead of a tab. Additionally, this tests that the value + was loaded as a unicode type on PY2. + ''' + self.assertEqual(self.conf.get(u'user', u'name'), u'Артём Анисимов') + + def test_set_new_option(self): + ''' + Test setting a new option in an existing section + ''' + self.conf.set(u'http', u'useragent', u'myawesomeagent') + self.assertEqual(self.conf.get(u'http', u'useragent'), u'myawesomeagent') + + def test_add_section(self): + ''' + Test adding a section and adding an item to that section + ''' + self.conf.add_section(u'foo') + self.conf.set(u'foo', u'bar', u'baz') + self.assertEqual(self.conf.get(u'foo', u'bar'), u'baz') + + def test_replace_option(self): + ''' + Test replacing an existing option + ''' + # We're also testing the normalization of key names, here. Setting + # "sslVerify" should actually set an "sslverify" option. + self.conf.set(u'http', u'sslVerify', u'true') + self.assertEqual(self.conf.get(u'http', u'sslverify'), u'true') + + def test_set_multivar(self): + ''' + Test setting a multivar and then writing the resulting file + ''' + orig_refspec = u'+refs/heads/*:refs/remotes/origin/*' + new_refspec = u'+refs/tags/*:refs/tags/*' + # Make sure that the original value is a string + self.assertEqual( + self.conf.get(self.remote, u'fetch'), + orig_refspec + ) + # Add another refspec + self.conf.set_multivar(self.remote, u'fetch', new_refspec) + # The value should now be a list + self.assertEqual( + self.conf.get(self.remote, u'fetch'), + [orig_refspec, new_refspec] + ) + # Write the config object to a file + with salt.utils.files.fopen(self.new_config, 'w') as fp_: + self.conf.write(fp_) + # Confirm that the new file was written correctly + expected = self.fix_indent(ORIG_CONFIG) + expected.insert(6, u'\tfetch = %s' % new_refspec) # pylint: disable=string-substitution-usage-error + self.assertEqual(self.get_lines(self.new_config), expected) + + def test_remove_option(self): + ''' + test removing an option, including all items from a multivar + ''' + for item in (u'fetch', u'pushurl'): + self.conf.remove_option(self.remote, item) + # To confirm that the option is now gone, a get should raise an + # NoOptionError exception. + self.assertRaises( + configparser.NoOptionError, + self.conf.get, + self.remote, + item) + + def test_remove_option_regexp(self): + ''' + test removing an option, including all items from a multivar + ''' + orig_refspec = u'+refs/heads/*:refs/remotes/origin/*' + new_refspec_1 = u'+refs/tags/*:refs/tags/*' + new_refspec_2 = u'+refs/foo/*:refs/foo/*' + # First, add both refspecs + self.conf.set_multivar(self.remote, u'fetch', new_refspec_1) + self.conf.set_multivar(self.remote, u'fetch', new_refspec_2) + # Make sure that all three values are there + self.assertEqual( + self.conf.get(self.remote, u'fetch'), + [orig_refspec, new_refspec_1, new_refspec_2] + ) + # If the regex doesn't match, no items should be removed + self.assertFalse( + self.conf.remove_option_regexp( + self.remote, + u'fetch', + salt.utils.stringutils.to_unicode(r'\d{7,10}') # future lint: disable=non-unicode-string + ) + ) + # Make sure that all three values are still there (since none should + # have been removed) + self.assertEqual( + self.conf.get(self.remote, u'fetch'), + [orig_refspec, new_refspec_1, new_refspec_2] + ) + # Remove one of the values + self.assertTrue( + self.conf.remove_option_regexp(self.remote, u'fetch', u'tags')) + # Confirm that the value is gone + self.assertEqual( + self.conf.get(self.remote, u'fetch'), + [orig_refspec, new_refspec_2] + ) + # Remove the other one we added earlier + self.assertTrue( + self.conf.remove_option_regexp(self.remote, u'fetch', u'foo')) + # Since the option now only has one value, it should be a string + self.assertEqual(self.conf.get(self.remote, u'fetch'), orig_refspec) + # Remove the last remaining option + self.assertTrue( + self.conf.remove_option_regexp(self.remote, u'fetch', u'heads')) + # Trying to do a get now should raise an exception + self.assertRaises( + configparser.NoOptionError, + self.conf.get, + self.remote, + u'fetch') + + def test_write(self): + ''' + Test writing using non-binary filehandle + ''' + self._test_write(mode='w') + + def test_write_binary(self): + ''' + Test writing using binary filehandle + ''' + self._test_write(mode='wb') From 626129f5a31d859fa9861d03851a0c4525d72213 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 7 Sep 2017 02:38:45 -0500 Subject: [PATCH 455/639] Import all of configparser into module for easier access to error classes --- salt/utils/configparser.py | 24 +++++++++++++++--------- tests/unit/utils/test_configparser.py | 15 ++++++--------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/salt/utils/configparser.py b/salt/utils/configparser.py index 32246d5ede..5a103ee997 100644 --- a/salt/utils/configparser.py +++ b/salt/utils/configparser.py @@ -8,18 +8,24 @@ import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six -from salt.ext.six.moves import configparser # pylint: disable=redefined-builtin +from salt.ext.six.moves.configparser import * # pylint: disable=no-name-in-module,wildcard-import + +try: + from collections import OrderedDict as _default_dict +except ImportError: + # fallback for setup.py which hasn't yet built _collections + _default_dict = dict # pylint: disable=string-substitution-usage-error -class GitConfigParser(configparser.RawConfigParser, object): +class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-variable ''' Custom ConfigParser which reads and writes git config files. READ A GIT CONFIG FILE INTO THE PARSER OBJECT - >>> from salt.utils.configparser import GitConfigParser - >>> conf = GitConfigParser() + >>> import salt.utils.configparser + >>> conf = salt.utils.configparser.GitConfigParser() >>> conf.read('/home/user/.git/config') MAKE SOME CHANGES @@ -39,7 +45,7 @@ class GitConfigParser(configparser.RawConfigParser, object): DEFAULTSECT = u'DEFAULT' SPACEINDENT = u' ' * 8 - def __init__(self, defaults=None, dict_type=configparser._default_dict, + def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=True): ''' Changes default value for allow_no_value from False to True @@ -95,7 +101,7 @@ class GitConfigParser(configparser.RawConfigParser, object): optname = None # no section header in the file? elif cursect is None: - raise configparser.MissingSectionHeaderError(fpname, lineno, line) + raise MissingSectionHeaderError(fpname, lineno, line) # pylint: disable=undefined-variable # an option line? else: mo = self._optcre.match(line.lstrip()) @@ -122,7 +128,7 @@ class GitConfigParser(configparser.RawConfigParser, object): # raised at the end of the file and will contain a # list of all bogus lines if not e: - e = configparser.ParsingError(fpname) + e = ParsingError(fpname) # pylint: disable=undefined-variable e.append(lineno, repr(line)) # if any parsing errors occurred, raise an exception if e: @@ -195,7 +201,7 @@ class GitConfigParser(configparser.RawConfigParser, object): try: sectdict = self._sections[section] except KeyError: - raise configparser.NoSectionError(section) + raise NoSectionError(section) # pylint: disable=undefined-variable key = self.optionxform(option) self._add_option(sectdict, key, value) @@ -210,7 +216,7 @@ class GitConfigParser(configparser.RawConfigParser, object): try: sectdict = self._sections[section] except KeyError: - raise configparser.NoSectionError(section) + raise NoSectionError(section) # pylint: disable=undefined-variable option = self.optionxform(option) if option not in sectdict: return False diff --git a/tests/unit/utils/test_configparser.py b/tests/unit/utils/test_configparser.py index 3b6384b0a3..06794c11ca 100644 --- a/tests/unit/utils/test_configparser.py +++ b/tests/unit/utils/test_configparser.py @@ -21,10 +21,7 @@ from tests.support.paths import TMP # Import salt libs import salt.utils.files import salt.utils.stringutils -from salt.utils.configparser import GitConfigParser - -# Import 3rd-party libs -from salt.ext.six.moves import configparser +import salt.utils.configparser # The user.name param here is intentionally indented with spaces instead of a # tab to test that we properly load a file with mixed indentation. @@ -77,7 +74,7 @@ class TestGitConfigParser(TestCase): u'\n'.join(ORIG_CONFIG) ) ) - self.conf = GitConfigParser() + self.conf = salt.utils.configparser.GitConfigParser() self.conf.read(self.orig_config) @classmethod @@ -96,8 +93,8 @@ class TestGitConfigParser(TestCase): ''' ret = copy.copy(lines) for i, _ in enumerate(ret): - if ret[i].startswith(GitConfigParser.SPACEINDENT): - ret[i] = ret[i].replace(GitConfigParser.SPACEINDENT, u'\t') + if ret[i].startswith(salt.utils.configparser.GitConfigParser.SPACEINDENT): + ret[i] = ret[i].replace(salt.utils.configparser.GitConfigParser.SPACEINDENT, u'\t') return ret @staticmethod @@ -199,7 +196,7 @@ class TestGitConfigParser(TestCase): # To confirm that the option is now gone, a get should raise an # NoOptionError exception. self.assertRaises( - configparser.NoOptionError, + salt.utils.configparser.NoOptionError, self.conf.get, self.remote, item) @@ -251,7 +248,7 @@ class TestGitConfigParser(TestCase): self.conf.remove_option_regexp(self.remote, u'fetch', u'heads')) # Trying to do a get now should raise an exception self.assertRaises( - configparser.NoOptionError, + salt.utils.configparser.NoOptionError, self.conf.get, self.remote, u'fetch') From 27315c31c8cf002b20b655385f18aa7f0a0c7b08 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 12 Sep 2017 10:41:06 -0600 Subject: [PATCH 456/639] Add release note about ssh rosters --- doc/topics/releases/oxygen.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index d3cd440d45..7bcc72bb4e 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -51,6 +51,12 @@ New NaCl Renderer A new renderer has been added for encrypted data. +New salt-ssh roster +------------------- + +A new roster has been added that allows users to pull in a list of hosts +for salt-ssh targeting from a ~/.ssh configuration. + New GitFS Features ------------------ From 6c3005eead0190e33d00168d640935aedba0ccbc Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 7 Sep 2017 03:41:39 -0500 Subject: [PATCH 457/639] Manage git config using GitConfigParser insead of git CLI --- salt/utils/gitfs.py | 251 ++++++++++++++++---------------------------- 1 file changed, 91 insertions(+), 160 deletions(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 810ac35f1e..53482c4ccb 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -21,6 +21,7 @@ from datetime import datetime # Import salt libs import salt.utils +import salt.utils.configparser import salt.utils.files import salt.utils.itertools import salt.utils.path @@ -29,13 +30,12 @@ import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver -from salt.config import DEFAULT_MASTER_OPTS as __DEFAULT_MASTER_OPTS +from salt.config import DEFAULT_MASTER_OPTS as _DEFAULT_MASTER_OPTS from salt.utils.odict import OrderedDict from salt.utils.process import os_is_running as pid_exists from salt.exceptions import ( FileserverConfigError, GitLockError, - GitRemoteError, get_error_message ) from salt.utils.event import tagify @@ -44,7 +44,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion # Import third party libs from salt.ext import six -VALID_REF_TYPES = __DEFAULT_MASTER_OPTS['gitfs_ref_types'] +VALID_REF_TYPES = _DEFAULT_MASTER_OPTS['gitfs_ref_types'] # Optional per-remote params that can only be used on a per-remote basis, and # thus do not have defaults in salt/config.py. @@ -327,6 +327,28 @@ class GitProvider(object): setattr(self, '_' + key, self.conf[key]) self.add_conf_overlay(key) + if not hasattr(self, 'refspecs'): + # This was not specified as a per-remote overrideable parameter + # when instantiating an instance of a GitBase subclass. Make sure + # that we set this attribute so we at least have a sane default and + # are able to fetch. + key = '{0}_refspecs'.format(self.role) + try: + default_refspecs = _DEFAULT_MASTER_OPTS[key] + except KeyError: + log.critical( + 'The \'%s\' option has no default value in ' + 'salt/config/__init__.py.', key + ) + failhard(self.role) + + setattr(self, 'refspecs', default_refspecs) + log.debug( + 'The \'refspecs\' option was not explicitly defined as a ' + 'configurable parameter. Falling back to %s for %s remote ' + '\'%s\'.', default_refspecs, self.role, self.id + ) + for item in ('env_whitelist', 'env_blacklist'): val = getattr(self, item, None) if val: @@ -493,12 +515,6 @@ class GitProvider(object): return strip_sep(getattr(self, '_' + name)) setattr(cls, name, _getconf) - def add_refspecs(self, *refspecs): - ''' - This function must be overridden in a sub-class - ''' - raise NotImplementedError() - def check_root(self): ''' Check if the relative root path exists in the checked-out copy of the @@ -591,55 +607,74 @@ class GitProvider(object): success.append(msg) return success, failed - def configure_refspecs(self): + def enforce_git_config(self): ''' - Ensure that the configured refspecs are set + For the config options which need to be maintained in the git config, + ensure that the git config file is configured as desired. ''' - try: - refspecs = set(self.get_refspecs()) - except (git.exc.GitCommandError, GitRemoteError) as exc: - log.error( - 'Failed to get refspecs for %s remote \'%s\': %s', - self.role, - self.id, - exc - ) - return - - desired_refspecs = set(self.refspecs) - to_delete = refspecs - desired_refspecs if refspecs else set() - if to_delete: - # There is no native unset support in Pygit2, and GitPython just - # wraps the CLI anyway. So we'll just use the git CLI to - # --unset-all the config value. Then, we will add back all - # configured refspecs. This is more foolproof than trying to remove - # specific refspecs, as removing specific ones necessitates - # formulating a regex to match, and the fact that slashes and - # asterisks are in refspecs complicates this. - cmd_str = 'git config --unset-all remote.origin.fetch' - cmd = subprocess.Popen( - shlex.split(cmd_str), - close_fds=not salt.utils.platform.is_windows(), - cwd=os.path.dirname(self.gitdir), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - output = cmd.communicate()[0] - if cmd.returncode != 0: - log.error( - 'Failed to unset git config value for %s remote \'%s\'. ' - 'Output from \'%s\' follows:\n%s', - self.role, self.id, cmd_str, output - ) - return - # Since we had to remove all refspecs, we now need to add all - # desired refspecs to achieve the desired configuration. - to_add = desired_refspecs + git_config = os.path.join(self.gitdir, 'config') + conf = salt.utils.configparser.GitConfigParser() + if not conf.read(git_config): + log.error('Failed to read from git config file %s', git_config) else: - # We didn't need to delete any refspecs, so we'll only need to add - # the desired refspecs that aren't currently configured. - to_add = desired_refspecs - refspecs + # We are currently enforcing the following git config items: + # 1. refspecs used in fetch + # 2. http.sslVerify + conf_changed = False - self.add_refspecs(*to_add) + # 1. refspecs + try: + refspecs = sorted( + conf.get('remote "origin"', 'fetch', as_list=True)) + except salt.utils.configparser.NoSectionError: + # First time we've init'ed this repo, we need to add the + # section for the remote to the git config + conf.add_section('remote "origin"') + conf.set('remote "origin"', 'url', self.url) + conf_changed = True + refspecs = [] + desired_refspecs = sorted(self.refspecs) + log.debug( + 'Current refspecs for %s remote \'%s\': %s (desired: %s)', + self.role, self.id, refspecs, desired_refspecs + ) + if refspecs != desired_refspecs: + conf.set_multivar('remote "origin"', 'fetch', self.refspecs) + log.debug( + 'Refspecs for %s remote \'%s\' set to %s', + self.role, self.id, desired_refspecs + ) + conf_changed = True + + # 2. http.sslVerify + try: + ssl_verify = conf.get('http', 'sslVerify') + except salt.utils.configparser.NoSectionError: + conf.add_section('http') + ssl_verify = None + except salt.utils.configparser.NoOptionError: + ssl_verify = None + desired_ssl_verify = six.text_type(self.ssl_verify).lower() + log.debug( + 'Current http.sslVerify for %s remote \'%s\': %s (desired: %s)', + self.role, self.id, ssl_verify, desired_ssl_verify + ) + if ssl_verify != desired_ssl_verify: + conf.set('http', 'sslVerify', desired_ssl_verify) + log.debug( + 'http.sslVerify for %s remote \'%s\' set to %s', + self.role, self.id, desired_ssl_verify + ) + conf_changed = True + + # Write changes, if necessary + if conf_changed: + with salt.utils.files.fopen(git_config, 'w') as fp_: + conf.write(fp_) + log.debug( + 'Config updates for %s remote \'%s\' written to %s', + self.role, self.id, git_config + ) def fetch(self): ''' @@ -853,12 +888,6 @@ class GitProvider(object): else target return self.branch - def get_refspecs(self): - ''' - This function must be overridden in a sub-class - ''' - raise NotImplementedError() - def get_tree(self, tgt_env): ''' Return a tree object for the specified environment @@ -935,23 +964,6 @@ class GitPython(GitProvider): override_params, cache_root, role ) - def add_refspecs(self, *refspecs): - ''' - Add the specified refspecs to the "origin" remote - ''' - for refspec in refspecs: - try: - self.repo.git.config('--add', 'remote.origin.fetch', refspec) - log.debug( - 'Added refspec \'%s\' to %s remote \'%s\'', - refspec, self.role, self.id - ) - except git.exc.GitCommandError as exc: - log.error( - 'Failed to add refspec \'%s\' to %s remote \'%s\': %s', - refspec, self.role, self.id, exc - ) - def checkout(self): ''' Checkout the configured branch/tag. We catch an "Exception" class here @@ -1039,29 +1051,7 @@ class GitPython(GitProvider): return new self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git') - - if not self.repo.remotes: - try: - self.repo.create_remote('origin', self.url) - except os.error: - # This exception occurs when two processes are trying to write - # to the git config at once, go ahead and pass over it since - # this is the only write. This should place a lock down. - pass - else: - new = True - - try: - ssl_verify = self.repo.git.config('--get', 'http.sslVerify') - except git.exc.GitCommandError: - ssl_verify = '' - desired_ssl_verify = str(self.ssl_verify).lower() - if ssl_verify != desired_ssl_verify: - self.repo.git.config('http.sslVerify', desired_ssl_verify) - - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + self.enforce_git_config() return new @@ -1213,13 +1203,6 @@ class GitPython(GitProvider): return blob, blob.hexsha, blob.mode return None, None, None - def get_refspecs(self): - ''' - Return the configured refspecs - ''' - refspecs = self.repo.git.config('--get-all', 'remote.origin.fetch') - return [x.strip() for x in refspecs.splitlines()] - def get_tree_from_branch(self, ref): ''' Return a git.Tree object matching a head ref fetched into @@ -1272,27 +1255,6 @@ class Pygit2(GitProvider): override_params, cache_root, role ) - def add_refspecs(self, *refspecs): - ''' - Add the specified refspecs to the "origin" remote - ''' - for refspec in refspecs: - try: - self.repo.config.set_multivar( - 'remote.origin.fetch', - 'FOO', - refspec - ) - log.debug( - 'Added refspec \'%s\' to %s remote \'%s\'', - refspec, self.role, self.id - ) - except Exception as exc: - log.error( - 'Failed to add refspec \'%s\' to %s remote \'%s\': %s', - refspec, self.role, self.id, exc - ) - def checkout(self): ''' Checkout the configured branch/tag @@ -1519,30 +1481,7 @@ class Pygit2(GitProvider): return new self.gitdir = salt.utils.path.join(self.repo.workdir, '.git') - - if not self.repo.remotes: - try: - self.repo.create_remote('origin', self.url) - except os.error: - # This exception occurs when two processes are trying to write - # to the git config at once, go ahead and pass over it since - # this is the only write. This should place a lock down. - pass - else: - new = True - - try: - ssl_verify = self.repo.config.get_bool('http.sslVerify') - except KeyError: - ssl_verify = None - if ssl_verify != self.ssl_verify: - self.repo.config.set_multivar('http.sslVerify', - '', - str(self.ssl_verify).lower()) - - # Ensure that refspecs for the "origin" remote are set up as configured - if hasattr(self, 'refspecs'): - self.configure_refspecs() + self.enforce_git_config() return new @@ -1760,14 +1699,6 @@ class Pygit2(GitProvider): return blob, blob.hex, mode return None, None, None - def get_refspecs(self): - ''' - Return the configured refspecs - ''' - if not [x for x in self.repo.config if x.startswith('remote.origin.')]: - raise GitRemoteError('\'origin\' remote not not present') - return list(self.repo.config.get_multivar('remote.origin.fetch')) - def get_tree_from_branch(self, ref): ''' Return a pygit2.Tree object matching a head ref fetched into From 8d1be29068eb337235f8ab02b257e952cd508a6c Mon Sep 17 00:00:00 2001 From: "Hilberding, Rob" Date: Tue, 12 Sep 2017 13:16:03 -0500 Subject: [PATCH 458/639] Added save argument and removed merge --- salt/beacons/__init__.py | 4 ++-- salt/states/beacon.py | 23 ++++++++++++++++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index fac375dd78..655251bcb7 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -194,8 +194,8 @@ class Beacon(object): ''' # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - b_conf = self.functions['config.merge']('beacons') - self.opts['beacons'].update(b_conf) + #b_conf = self.functions['config.merge']('beacons') + #self.opts['beacons'].update(b_conf) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_list_complete') diff --git a/salt/states/beacon.py b/salt/states/beacon.py index 5b9730c2b9..37fa6ce1d7 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -9,6 +9,7 @@ Management of the Salt beacons ps: beacon.present: + - save: True - enable: False - salt-master: running - apache2: stopped @@ -35,12 +36,15 @@ log = logging.getLogger(__name__) def present(name, + save=False, **kwargs): ''' Ensure beacon is configured with the included beacon data. name The name of the beacon ensure is configured. + save + True/False, if True the beacons.conf file be updated too. Default is False. ''' @@ -58,7 +62,7 @@ def present(name, ret['comment'].append('Job {0} in correct state'.format(name)) else: if 'test' in __opts__ and __opts__['test']: - kwargs['test'] = True + test = True result = __salt__['beacons.modify'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) ret['changes'] = result['changes'] @@ -74,9 +78,10 @@ def present(name, ret['changes'] = result['changes'] else: ret['comment'].append(result['comment']) + else: if 'test' in __opts__ and __opts__['test']: - kwargs['test'] = True + test = True result = __salt__['beacons.add'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) else: @@ -88,16 +93,24 @@ def present(name, else: ret['comment'].append('Adding {0} to beacons'.format(name)) + if save == True: + result = __salt__['beacons.save']() + ret['comment'].append('Beacons saved'.format(name)) + ret['comment'] = '\n'.join(ret['comment']) return ret -def absent(name, **kwargs): +def absent(name, + save=False, + **kwargs): ''' Ensure beacon is absent. name The name of the beacon ensured absent. + save + True/False, if True the beacons.conf file be updated too. Default is False. ''' ### NOTE: The keyword arguments in **kwargs are ignored in this state, but @@ -126,6 +139,10 @@ def absent(name, **kwargs): else: ret['comment'].append('{0} not configured in beacons'.format(name)) + if save == True: + result = __salt__['beacons.save']() + ret['comment'].append('Beacons saved'.format(name)) + ret['comment'] = '\n'.join(ret['comment']) return ret From 6984b8fd60588a749d0a2a3d76c72f3e80927eb4 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 12 Sep 2017 12:49:20 -0600 Subject: [PATCH 459/639] Add /norestart to vcredist installer --- pkg/windows/installer/Salt-Minion-Setup.nsi | 420 +++++++++++--------- 1 file changed, 222 insertions(+), 198 deletions(-) diff --git a/pkg/windows/installer/Salt-Minion-Setup.nsi b/pkg/windows/installer/Salt-Minion-Setup.nsi index 46fb821fb8..a8efca2101 100644 --- a/pkg/windows/installer/Salt-Minion-Setup.nsi +++ b/pkg/windows/installer/Salt-Minion-Setup.nsi @@ -44,7 +44,7 @@ ${StrStrAdv} !define CPUARCH "x86" !endif -; Part of the Trim function for Strings +# Part of the Trim function for Strings !define Trim "!insertmacro Trim" !macro Trim ResultVar String Push "${String}" @@ -61,27 +61,27 @@ ${StrStrAdv} !define MUI_UNICON "salt.ico" !define MUI_WELCOMEFINISHPAGE_BITMAP "panel.bmp" -; Welcome page +# Welcome page !insertmacro MUI_PAGE_WELCOME -; License page +# License page !insertmacro MUI_PAGE_LICENSE "LICENSE.txt" -; Configure Minion page +# Configure Minion page Page custom pageMinionConfig pageMinionConfig_Leave -; Instfiles page +# Instfiles page !insertmacro MUI_PAGE_INSTFILES -; Finish page (Customized) +# Finish page (Customized) !define MUI_PAGE_CUSTOMFUNCTION_SHOW pageFinish_Show !define MUI_PAGE_CUSTOMFUNCTION_LEAVE pageFinish_Leave !insertmacro MUI_PAGE_FINISH -; Uninstaller pages +# Uninstaller pages !insertmacro MUI_UNPAGE_INSTFILES -; Language files +# Language files !insertmacro MUI_LANGUAGE "English" @@ -201,8 +201,8 @@ ShowInstDetails show ShowUnInstDetails show -; Check and install Visual C++ redist packages -; See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info +# Check and install Visual C++ redist packages +# See http://blogs.msdn.com/b/astebner/archive/2009/01/29/9384143.aspx for more info Section -Prerequisites Var /GLOBAL VcRedistName @@ -211,12 +211,12 @@ Section -Prerequisites Var /Global CheckVcRedist StrCpy $CheckVcRedist "False" - ; Visual C++ 2015 redist packages + # Visual C++ 2015 redist packages !define PY3_VC_REDIST_NAME "VC_Redist_2015" !define PY3_VC_REDIST_X64_GUID "{50A2BC33-C9CD-3BF1-A8FF-53C10A0B183C}" !define PY3_VC_REDIST_X86_GUID "{BBF2AC74-720C-3CB3-8291-5E34039232FA}" - ; Visual C++ 2008 SP1 MFC Security Update redist packages + # Visual C++ 2008 SP1 MFC Security Update redist packages !define PY2_VC_REDIST_NAME "VC_Redist_2008_SP1_MFC" !define PY2_VC_REDIST_X64_GUID "{5FCE6D76-F5DC-37AB-B2B8-22AB8CEDB1D4}" !define PY2_VC_REDIST_X86_GUID "{9BE518E6-ECC6-35A9-88E4-87755C07200F}" @@ -239,7 +239,7 @@ Section -Prerequisites StrCpy $VcRedistGuid ${PY2_VC_REDIST_X86_GUID} ${EndIf} - ; VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below + # VCRedist 2008 only needed on Windows Server 2008R2/Windows 7 and below ${If} ${AtMostWin2008R2} StrCpy $CheckVcRedist "True" ${EndIf} @@ -255,20 +255,41 @@ Section -Prerequisites "$VcRedistName is currently not installed. Would you like to install?" \ /SD IDYES IDNO endVcRedist - ClearErrors - ; The Correct version of VCRedist is copied over by "build_pkg.bat" + # The Correct version of VCRedist is copied over by "build_pkg.bat" SetOutPath "$INSTDIR\" File "..\prereqs\vcredist.exe" - ; /passive used by 2015 installer - ; /qb! used by 2008 installer - ; It just ignores the unrecognized switches... - ExecWait "$INSTDIR\vcredist.exe /qb! /passive" - IfErrors 0 endVcRedist + # If an output variable is specified ($0 in the case below), + # ExecWait sets the variable with the exit code (and only sets the + # error flag if an error occurs; if an error occurs, the contents + # of the user variable are undefined). + # http://nsis.sourceforge.net/Reference/ExecWait + # /passive used by 2015 installer + # /qb! used by 2008 installer + # It just ignores the unrecognized switches... + ClearErrors + ExecWait '"$INSTDIR\vcredist.exe" /qb! /passive /norestart' $0 + IfErrors 0 CheckVcRedistErrorCode MessageBox MB_OK \ "$VcRedistName failed to install. Try installing the package manually." \ /SD IDOK + Goto endVcRedist + + CheckVcRedistErrorCode: + # Check for Reboot Error Code (3010) + ${If} $0 == 3010 + MessageBox MB_OK \ + "$VcRedistName installed but requires a restart to complete." \ + /SD IDOK + + # Check for any other errors + ${ElseIfNot} $0 == 0 + MessageBox MB_OK \ + "$VcRedistName failed with ErrorCode: $0. Try installing the package manually." \ + /SD IDOK + ${EndIf} endVcRedist: + ${EndIf} ${EndIf} @@ -294,12 +315,12 @@ Function .onInit Call parseCommandLineSwitches - ; Check for existing installation + # Check for existing installation ReadRegStr $R0 HKLM \ "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME}" \ "UninstallString" StrCmp $R0 "" checkOther - ; Found existing installation, prompt to uninstall + # Found existing installation, prompt to uninstall MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \ "${PRODUCT_NAME} is already installed.$\n$\n\ Click `OK` to remove the existing installation." \ @@ -307,12 +328,12 @@ Function .onInit Abort checkOther: - ; Check for existing installation of full salt + # Check for existing installation of full salt ReadRegStr $R0 HKLM \ "Software\Microsoft\Windows\CurrentVersion\Uninstall\${PRODUCT_NAME_OTHER}" \ "UninstallString" StrCmp $R0 "" skipUninstall - ; Found existing installation, prompt to uninstall + # Found existing installation, prompt to uninstall MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION \ "${PRODUCT_NAME_OTHER} is already installed.$\n$\n\ Click `OK` to remove the existing installation." \ @@ -321,22 +342,22 @@ Function .onInit uninst: - ; Get current Silent status + # Get current Silent status StrCpy $R0 0 ${If} ${Silent} StrCpy $R0 1 ${EndIf} - ; Turn on Silent mode + # Turn on Silent mode SetSilent silent - ; Don't remove all directories + # Don't remove all directories StrCpy $DeleteInstallDir 0 - ; Uninstall silently + # Uninstall silently Call uninstallSalt - ; Set it back to Normal mode, if that's what it was before + # Set it back to Normal mode, if that's what it was before ${If} $R0 == 0 SetSilent normal ${EndIf} @@ -350,7 +371,7 @@ Section -Post WriteUninstaller "$INSTDIR\uninst.exe" - ; Uninstall Registry Entries + # Uninstall Registry Entries WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ "DisplayName" "$(^Name)" WriteRegStr ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ @@ -366,19 +387,19 @@ Section -Post WriteRegStr HKLM "SYSTEM\CurrentControlSet\services\salt-minion" \ "DependOnService" "nsi" - ; Set the estimated size + # Set the estimated size ${GetSize} "$INSTDIR\bin" "/S=OK" $0 $1 $2 IntFmt $0 "0x%08X" $0 WriteRegDWORD ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" \ "EstimatedSize" "$0" - ; Commandline Registry Entries + # Commandline Registry Entries WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "" "$INSTDIR\salt-call.bat" WriteRegStr HKLM "${PRODUCT_CALL_REGKEY}" "Path" "$INSTDIR\bin\" WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "" "$INSTDIR\salt-minion.bat" WriteRegStr HKLM "${PRODUCT_MINION_REGKEY}" "Path" "$INSTDIR\bin\" - ; Register the Salt-Minion Service + # Register the Salt-Minion Service nsExec::Exec "nssm.exe install salt-minion $INSTDIR\bin\python.exe -E -s $INSTDIR\bin\Scripts\salt-minion -c $INSTDIR\conf -l quiet" nsExec::Exec "nssm.exe set salt-minion Description Salt Minion from saltstack.com" nsExec::Exec "nssm.exe set salt-minion Start SERVICE_AUTO_START" @@ -398,12 +419,12 @@ SectionEnd Function .onInstSuccess - ; If StartMinionDelayed is 1, then set the service to start delayed + # If StartMinionDelayed is 1, then set the service to start delayed ${If} $StartMinionDelayed == 1 nsExec::Exec "nssm.exe set salt-minion Start SERVICE_DELAYED_AUTO_START" ${EndIf} - ; If start-minion is 1, then start the service + # If start-minion is 1, then start the service ${If} $StartMinion == 1 nsExec::Exec 'net start salt-minion' ${EndIf} @@ -413,10 +434,11 @@ FunctionEnd Function un.onInit - ; Load the parameters + # Load the parameters ${GetParameters} $R0 # Uninstaller: Remove Installation Directory + ClearErrors ${GetOptions} $R0 "/delete-install-dir" $R1 IfErrors delete_install_dir_not_found StrCpy $DeleteInstallDir 1 @@ -434,7 +456,7 @@ Section Uninstall Call un.uninstallSalt - ; Remove C:\salt from the Path + # Remove C:\salt from the Path Push "C:\salt" Call un.RemoveFromPath @@ -444,27 +466,27 @@ SectionEnd !macro uninstallSalt un Function ${un}uninstallSalt - ; Make sure we're in the right directory + # Make sure we're in the right directory ${If} $INSTDIR == "c:\salt\bin\Scripts" StrCpy $INSTDIR "C:\salt" ${EndIf} - ; Stop and Remove salt-minion service + # Stop and Remove salt-minion service nsExec::Exec 'net stop salt-minion' nsExec::Exec 'sc delete salt-minion' - ; Stop and remove the salt-master service + # Stop and remove the salt-master service nsExec::Exec 'net stop salt-master' nsExec::Exec 'sc delete salt-master' - ; Remove files + # Remove files Delete "$INSTDIR\uninst.exe" Delete "$INSTDIR\nssm.exe" Delete "$INSTDIR\salt*" Delete "$INSTDIR\vcredist.exe" RMDir /r "$INSTDIR\bin" - ; Remove Registry entries + # Remove Registry entries DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_UNINST_KEY_OTHER}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_CALL_REGKEY}" @@ -474,17 +496,17 @@ Function ${un}uninstallSalt DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_MINION_REGKEY}" DeleteRegKey ${PRODUCT_UNINST_ROOT_KEY} "${PRODUCT_RUN_REGKEY}" - ; Automatically close when finished + # Automatically close when finished SetAutoClose true - ; Prompt to remove the Installation directory + # Prompt to remove the Installation directory ${IfNot} $DeleteInstallDir == 1 MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 \ "Would you like to completely remove $INSTDIR and all of its contents?" \ /SD IDNO IDNO finished ${EndIf} - ; Make sure you're not removing Program Files + # Make sure you're not removing Program Files ${If} $INSTDIR != 'Program Files' ${AndIf} $INSTDIR != 'Program Files (x86)' RMDir /r "$INSTDIR" @@ -526,7 +548,7 @@ FunctionEnd Function Trim - Exch $R1 ; Original string + Exch $R1 # Original string Push $R2 Loop: @@ -558,36 +580,36 @@ Function Trim FunctionEnd -;------------------------------------------------------------------------------ -; StrStr Function -; - find substring in a string -; -; Usage: -; Push "this is some string" -; Push "some" -; Call StrStr -; Pop $0 ; "some string" -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# StrStr Function +# - find substring in a string +# +# Usage: +# Push "this is some string" +# Push "some" +# Call StrStr +# Pop $0 ; "some string" +#------------------------------------------------------------------------------ !macro StrStr un Function ${un}StrStr - Exch $R1 ; $R1=substring, stack=[old$R1,string,...] - Exch ; stack=[string,old$R1,...] - Exch $R2 ; $R2=string, stack=[old$R2,old$R1,...] - Push $R3 ; $R3=strlen(substring) - Push $R4 ; $R4=count - Push $R5 ; $R5=tmp - StrLen $R3 $R1 ; Get the length of the Search String - StrCpy $R4 0 ; Set the counter to 0 + Exch $R1 # $R1=substring, stack=[old$R1,string,...] + Exch # stack=[string,old$R1,...] + Exch $R2 # $R2=string, stack=[old$R2,old$R1,...] + Push $R3 # $R3=strlen(substring) + Push $R4 # $R4=count + Push $R5 # $R5=tmp + StrLen $R3 $R1 # Get the length of the Search String + StrCpy $R4 0 # Set the counter to 0 loop: - StrCpy $R5 $R2 $R3 $R4 ; Create a moving window of the string that is - ; the size of the length of the search string - StrCmp $R5 $R1 done ; Is the contents of the window the same as - ; search string, then done - StrCmp $R5 "" done ; Is the window empty, then done - IntOp $R4 $R4 + 1 ; Shift the windows one character - Goto loop ; Repeat + StrCpy $R5 $R2 $R3 $R4 # Create a moving window of the string that is + # the size of the length of the search string + StrCmp $R5 $R1 done # Is the contents of the window the same as + # search string, then done + StrCmp $R5 "" done # Is the window empty, then done + IntOp $R4 $R4 + 1 # Shift the windows one character + Goto loop # Repeat done: StrCpy $R1 $R2 "" $R4 @@ -595,7 +617,7 @@ Function ${un}StrStr Pop $R4 Pop $R3 Pop $R2 - Exch $R1 ; $R1=old$R1, stack=[result,...] + Exch $R1 # $R1=old$R1, stack=[result,...] FunctionEnd !macroend @@ -603,74 +625,74 @@ FunctionEnd !insertmacro StrStr "un." -;------------------------------------------------------------------------------ -; AddToPath Function -; - Adds item to Path for All Users -; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native -; Windows Commands -; -; Usage: -; Push "C:\path\to\add" -; Call AddToPath -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# AddToPath Function +# - Adds item to Path for All Users +# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native +# Windows Commands +# +# Usage: +# Push "C:\path\to\add" +# Call AddToPath +#------------------------------------------------------------------------------ !define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"' Function AddToPath - Exch $0 ; Path to add - Push $1 ; Current Path - Push $2 ; Results of StrStr / Length of Path + Path to Add - Push $3 ; Handle to Reg / Length of Path - Push $4 ; Result of Registry Call + Exch $0 # Path to add + Push $1 # Current Path + Push $2 # Results of StrStr / Length of Path + Path to Add + Push $3 # Handle to Reg / Length of Path + Push $4 # Result of Registry Call - ; Open a handle to the key in the registry, handle in $3, Error in $4 + # Open a handle to the key in the registry, handle in $3, Error in $4 System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4" - ; Make sure registry handle opened successfully (returned 0) + # Make sure registry handle opened successfully (returned 0) IntCmp $4 0 0 done done - ; Load the contents of path into $1, Error Code into $4, Path length into $2 + # Load the contents of path into $1, Error Code into $4, Path length into $2 System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4" - ; Close the handle to the registry ($3) + # Close the handle to the registry ($3) System::Call "advapi32::RegCloseKey(i $3)" - ; Check for Error Code 234, Path too long for the variable - IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA + # Check for Error Code 234, Path too long for the variable + IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA DetailPrint "AddToPath Failed: original length $2 > ${NSIS_MAX_STRLEN}" MessageBox MB_OK \ "You may add C:\salt to the %PATH% for convenience when issuing local salt commands from the command line." \ /SD IDOK Goto done - ; If no error, continue - IntCmp $4 0 +5 ; $4 != NO_ERROR - ; Error 2 means the Key was not found - IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND + # If no error, continue + IntCmp $4 0 +5 # $4 != NO_ERROR + # Error 2 means the Key was not found + IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND DetailPrint "AddToPath: unexpected error code $4" Goto done StrCpy $1 "" - ; Check if already in PATH - Push "$1;" ; The string to search - Push "$0;" ; The string to find + # Check if already in PATH + Push "$1;" # The string to search + Push "$0;" # The string to find Call StrStr - Pop $2 ; The result of the search - StrCmp $2 "" 0 done ; String not found, try again with ';' at the end - ; Otherwise, it's already in the path - Push "$1;" ; The string to search - Push "$0\;" ; The string to find + Pop $2 # The result of the search + StrCmp $2 "" 0 done # String not found, try again with ';' at the end + # Otherwise, it's already in the path + Push "$1;" # The string to search + Push "$0\;" # The string to find Call StrStr - Pop $2 ; The result - StrCmp $2 "" 0 done ; String not found, continue (add) - ; Otherwise, it's already in the path + Pop $2 # The result + StrCmp $2 "" 0 done # String not found, continue (add) + # Otherwise, it's already in the path - ; Prevent NSIS string overflow - StrLen $2 $0 ; Length of path to add ($2) - StrLen $3 $1 ; Length of current path ($3) - IntOp $2 $2 + $3 ; Length of current path + path to add ($2) - IntOp $2 $2 + 2 ; Account for the additional ';' - ; $2 = strlen(dir) + strlen(PATH) + sizeof(";") + # Prevent NSIS string overflow + StrLen $2 $0 # Length of path to add ($2) + StrLen $3 $1 # Length of current path ($3) + IntOp $2 $2 + $3 # Length of current path + path to add ($2) + IntOp $2 $2 + 2 # Account for the additional ';' + # $2 = strlen(dir) + strlen(PATH) + sizeof(";") - ; Make sure the new length isn't over the NSIS_MAX_STRLEN + # Make sure the new length isn't over the NSIS_MAX_STRLEN IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0 DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}" MessageBox MB_OK \ @@ -678,18 +700,18 @@ Function AddToPath /SD IDOK Goto done - ; Append dir to PATH + # Append dir to PATH DetailPrint "Add to PATH: $0" - StrCpy $2 $1 1 -1 ; Copy the last character of the existing path - StrCmp $2 ";" 0 +2 ; Check for trailing ';' - StrCpy $1 $1 -1 ; remove trailing ';' - StrCmp $1 "" +2 ; Make sure Path is not empty - StrCpy $0 "$1;$0" ; Append new path at the end ($0) + StrCpy $2 $1 1 -1 # Copy the last character of the existing path + StrCmp $2 ";" 0 +2 # Check for trailing ';' + StrCpy $1 $1 -1 # remove trailing ';' + StrCmp $1 "" +2 # Make sure Path is not empty + StrCpy $0 "$1;$0" # Append new path at the end ($0) - ; We can use the NSIS command here. Only 'ReadRegStr' is affected + # We can use the NSIS command here. Only 'ReadRegStr' is affected WriteRegExpandStr ${Environ} "PATH" $0 - ; Broadcast registry change to open programs + # Broadcast registry change to open programs SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 done: @@ -702,16 +724,16 @@ Function AddToPath FunctionEnd -;------------------------------------------------------------------------------ -; RemoveFromPath Function -; - Removes item from Path for All Users -; - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native -; Windows Commands -; -; Usage: -; Push "C:\path\to\add" -; Call un.RemoveFromPath -;------------------------------------------------------------------------------ +#------------------------------------------------------------------------------ +# RemoveFromPath Function +# - Removes item from Path for All Users +# - Overcomes NSIS ReadRegStr limitation of 1024 characters by using Native +# Windows Commands +# +# Usage: +# Push "C:\path\to\add" +# Call un.RemoveFromPath +#------------------------------------------------------------------------------ Function un.RemoveFromPath Exch $0 @@ -722,59 +744,59 @@ Function un.RemoveFromPath Push $5 Push $6 - ; Open a handle to the key in the registry, handle in $3, Error in $4 + # Open a handle to the key in the registry, handle in $3, Error in $4 System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4" - ; Make sure registry handle opened successfully (returned 0) + # Make sure registry handle opened successfully (returned 0) IntCmp $4 0 0 done done - ; Load the contents of path into $1, Error Code into $4, Path length into $2 + # Load the contents of path into $1, Error Code into $4, Path length into $2 System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4" - ; Close the handle to the registry ($3) + # Close the handle to the registry ($3) System::Call "advapi32::RegCloseKey(i $3)" - ; Check for Error Code 234, Path too long for the variable - IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA + # Check for Error Code 234, Path too long for the variable + IntCmp $4 234 0 +4 +4 # $4 == ERROR_MORE_DATA DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}" Goto done - ; If no error, continue - IntCmp $4 0 +5 ; $4 != NO_ERROR - ; Error 2 means the Key was not found - IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND + # If no error, continue + IntCmp $4 0 +5 # $4 != NO_ERROR + # Error 2 means the Key was not found + IntCmp $4 2 +3 # $4 != ERROR_FILE_NOT_FOUND DetailPrint "AddToPath: unexpected error code $4" Goto done StrCpy $1 "" - ; Ensure there's a trailing ';' - StrCpy $5 $1 1 -1 ; Copy the last character of the path - StrCmp $5 ";" +2 ; Check for trailing ';', if found continue - StrCpy $1 "$1;" ; ensure trailing ';' + # Ensure there's a trailing ';' + StrCpy $5 $1 1 -1 # Copy the last character of the path + StrCmp $5 ";" +2 # Check for trailing ';', if found continue + StrCpy $1 "$1;" # ensure trailing ';' - ; Check for our directory inside the path - Push $1 ; String to Search - Push "$0;" ; Dir to Find + # Check for our directory inside the path + Push $1 # String to Search + Push "$0;" # Dir to Find Call un.StrStr - Pop $2 ; The results of the search - StrCmp $2 "" done ; If results are empty, we're done, otherwise continue + Pop $2 # The results of the search + StrCmp $2 "" done # If results are empty, we're done, otherwise continue - ; Remove our Directory from the Path + # Remove our Directory from the Path DetailPrint "Remove from PATH: $0" - StrLen $3 "$0;" ; Get the length of our dir ($3) - StrLen $4 $2 ; Get the length of the return from StrStr ($4) - StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove - StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove - StrCpy $3 "$5$6" ; Combine $5 and $6 + StrLen $3 "$0;" # Get the length of our dir ($3) + StrLen $4 $2 # Get the length of the return from StrStr ($4) + StrCpy $5 $1 -$4 # $5 is now the part before the path to remove + StrCpy $6 $2 "" $3 # $6 is now the part after the path to remove + StrCpy $3 "$5$6" # Combine $5 and $6 - ; Check for Trailing ';' - StrCpy $5 $3 1 -1 ; Load the last character of the string - StrCmp $5 ";" 0 +2 ; Check for ';' - StrCpy $3 $3 -1 ; remove trailing ';' + # Check for Trailing ';' + StrCpy $5 $3 1 -1 # Load the last character of the string + StrCmp $5 ";" 0 +2 # Check for ';' + StrCpy $3 $3 -1 # remove trailing ';' - ; Write the new path to the registry + # Write the new path to the registry WriteRegExpandStr ${Environ} "PATH" $3 - ; Broadcast the change to all open applications + # Broadcast the change to all open applications SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000 done: @@ -808,6 +830,7 @@ Function getMinionConfig confFound: FileOpen $0 "$INSTDIR\conf\minion" r + ClearErrors confLoop: FileRead $0 $1 IfErrors EndOfFile @@ -838,68 +861,69 @@ FunctionEnd Function updateMinionConfig ClearErrors - FileOpen $0 "$INSTDIR\conf\minion" "r" ; open target file for reading - GetTempFileName $R0 ; get new temp file name - FileOpen $1 $R0 "w" ; open temp file for writing + FileOpen $0 "$INSTDIR\conf\minion" "r" # open target file for reading + GetTempFileName $R0 # get new temp file name + FileOpen $1 $R0 "w" # open temp file for writing - loop: ; loop through each line - FileRead $0 $2 ; read line from target file - IfErrors done ; end if errors are encountered (end of line) + loop: # loop through each line + FileRead $0 $2 # read line from target file + IfErrors done # end if errors are encountered (end of line) - ${If} $MasterHost_State != "" ; if master is empty - ${AndIf} $MasterHost_State != "salt" ; and if master is not 'salt' - ${StrLoc} $3 $2 "master:" ">" ; where is 'master:' in this line - ${If} $3 == 0 ; is it in the first... - ${OrIf} $3 == 1 ; or second position (account for comments) - StrCpy $2 "master: $MasterHost_State$\r$\n" ; write the master - ${EndIf} ; close if statement - ${EndIf} ; close if statement + ${If} $MasterHost_State != "" # if master is empty + ${AndIf} $MasterHost_State != "salt" # and if master is not 'salt' + ${StrLoc} $3 $2 "master:" ">" # where is 'master:' in this line + ${If} $3 == 0 # is it in the first... + ${OrIf} $3 == 1 # or second position (account for comments) + StrCpy $2 "master: $MasterHost_State$\r$\n" # write the master + ${EndIf} # close if statement + ${EndIf} # close if statement - ${If} $MinionName_State != "" ; if minion is empty - ${AndIf} $MinionName_State != "hostname" ; and if minion is not 'hostname' - ${StrLoc} $3 $2 "id:" ">" ; where is 'id:' in this line - ${If} $3 == 0 ; is it in the first... - ${OrIf} $3 == 1 ; or the second position (account for comments) - StrCpy $2 "id: $MinionName_State$\r$\n" ; change line - ${EndIf} ; close if statement - ${EndIf} ; close if statement + ${If} $MinionName_State != "" # if minion is empty + ${AndIf} $MinionName_State != "hostname" # and if minion is not 'hostname' + ${StrLoc} $3 $2 "id:" ">" # where is 'id:' in this line + ${If} $3 == 0 # is it in the first... + ${OrIf} $3 == 1 # or the second position (account for comments) + StrCpy $2 "id: $MinionName_State$\r$\n" # change line + ${EndIf} # close if statement + ${EndIf} # close if statement - FileWrite $1 $2 ; write changed or unchanged line to temp file + FileWrite $1 $2 # write changed or unchanged line to temp file Goto loop done: - FileClose $0 ; close target file - FileClose $1 ; close temp file - Delete "$INSTDIR\conf\minion" ; delete target file - CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" ; copy temp file to target file - Delete $R0 ; delete temp file + FileClose $0 # close target file + FileClose $1 # close temp file + Delete "$INSTDIR\conf\minion" # delete target file + CopyFiles /SILENT $R0 "$INSTDIR\conf\minion" # copy temp file to target file + Delete $R0 # delete temp file FunctionEnd Function parseCommandLineSwitches - ; Load the parameters + # Load the parameters ${GetParameters} $R0 - ; Check for start-minion switches - ; /start-service is to be deprecated, so we must check for both + # Check for start-minion switches + # /start-service is to be deprecated, so we must check for both ${GetOptions} $R0 "/start-service=" $R1 ${GetOptions} $R0 "/start-minion=" $R2 # Service: Start Salt Minion ${IfNot} $R2 == "" - ; If start-minion was passed something, then set it + # If start-minion was passed something, then set it StrCpy $StartMinion $R2 ${ElseIfNot} $R1 == "" - ; If start-service was passed something, then set StartMinion to that + # If start-service was passed something, then set StartMinion to that StrCpy $StartMinion $R1 ${Else} - ; Otherwise default to 1 + # Otherwise default to 1 StrCpy $StartMinion 1 ${EndIf} # Service: Minion Startup Type Delayed + ClearErrors ${GetOptions} $R0 "/start-minion-delayed" $R1 IfErrors start_minion_delayed_not_found StrCpy $StartMinionDelayed 1 From dc1768239dbbdf7aa118c112307458e207364d7b Mon Sep 17 00:00:00 2001 From: Peter Sagerson Date: Tue, 12 Sep 2017 13:17:20 -0700 Subject: [PATCH 460/639] acme.cert: avoid IOError on failure. --- salt/states/acme.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/states/acme.py b/salt/states/acme.py index 1ab6b57dfb..43649a6426 100644 --- a/salt/states/acme.py +++ b/salt/states/acme.py @@ -116,9 +116,14 @@ def cert(name, if res['result'] is None: ret['changes'] = {} else: + if not __salt__['acme.has'](name): + new = None + else: + new = __salt__['acme.info'](name) + ret['changes'] = { 'old': old, - 'new': __salt__['acme.info'](name) + 'new': new } return ret From eba40686e81e4e3170e16c90bf73178ef1389ab2 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 12 Sep 2017 14:39:00 -0600 Subject: [PATCH 461/639] Add information about cimc to release notes --- doc/topics/releases/oxygen.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index d3cd440d45..597b4e9252 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -51,6 +51,12 @@ New NaCl Renderer A new renderer has been added for encrypted data. +New support for Cisco UCS Chassis +--------------------------------- + +The salt proxy minion now allows for control of Cisco USC chassis. See +the `cimc` modules for details. + New GitFS Features ------------------ From 6885c6b45867a608b85e96cc78906c5c6e4d06dd Mon Sep 17 00:00:00 2001 From: "Hilberding, Rob" Date: Tue, 12 Sep 2017 16:00:24 -0500 Subject: [PATCH 462/639] removed commented code --- salt/beacons/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index ff56a6ffa5..54bea7aa96 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -205,10 +205,6 @@ class Beacon(object): ''' # Fire the complete event back along with the list of beacons evt = salt.utils.event.get_event('minion', opts=self.opts) - #b_conf = self.functions['config.merge']('beacons') - #if not isinstance(self.opts['beacons'], dict): - # self.opts['beacons'] = {} - #self.opts['beacons'].update(b_conf) evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, tag='/salt/minion/minion_beacons_list_complete') From 575ce9eb737fa323825885e53115ac1bc37d7222 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Tue, 12 Sep 2017 16:37:15 -0500 Subject: [PATCH 463/639] Fix unicode handling on PR 43378 The strings passed to exceptions should not be unicode types, as this can cause problems elsewhere when exception class instances are referenced. --- salt/utils/configparser.py | 18 +++++++++++------- tests/unit/utils/test_configparser.py | 8 +++++--- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/salt/utils/configparser.py b/salt/utils/configparser.py index 5a103ee997..d060f9ddd2 100644 --- a/salt/utils/configparser.py +++ b/salt/utils/configparser.py @@ -101,7 +101,10 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var optname = None # no section header in the file? elif cursect is None: - raise MissingSectionHeaderError(fpname, lineno, line) # pylint: disable=undefined-variable + raise MissingSectionHeaderError( # pylint: disable=undefined-variable + salt.utils.stringutils.to_str(fpname), + lineno, + salt.utils.stringutils.to_str(line)) # an option line? else: mo = self._optcre.match(line.lstrip()) @@ -142,12 +145,11 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var if self._optcre is self.OPTCRE or value: is_list = isinstance(value, list) if is_list and not allow_list: - raise TypeError(u'option value cannot be a list unless ' - u'allow_list is True') + raise TypeError('option value cannot be a list unless allow_list is True') # future lint: disable=non-unicode-string elif not is_list: value = [value] if not all(isinstance(x, six.string_types) for x in value): - raise TypeError(u'option values must be strings') + raise TypeError('option values must be strings') # future lint: disable=non-unicode-string def get(self, section, option, as_list=False): ''' @@ -183,7 +185,7 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var sectdict[key] = [sectdict[key]] sectdict[key].append(value) else: - raise TypeError(u'Expected str or list for option value, got %s' % type(value).__name__) + raise TypeError('Expected str or list for option value, got %s' % type(value).__name__) # future lint: disable=non-unicode-string def set_multivar(self, section, option, value=u''): ''' @@ -201,7 +203,8 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) # pylint: disable=undefined-variable + raise NoSectionError( # pylint: disable=undefined-variable + salt.utils.stringutils.to_str(section)) key = self.optionxform(option) self._add_option(sectdict, key, value) @@ -216,7 +219,8 @@ class GitConfigParser(RawConfigParser, object): # pylint: disable=undefined-var try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) # pylint: disable=undefined-variable + raise NoSectionError( # pylint: disable=undefined-variable + salt.utils.stringutils.to_str(section)) option = self.optionxform(option) if option not in sectdict: return False diff --git a/tests/unit/utils/test_configparser.py b/tests/unit/utils/test_configparser.py index 06794c11ca..8fe98aee11 100644 --- a/tests/unit/utils/test_configparser.py +++ b/tests/unit/utils/test_configparser.py @@ -122,12 +122,14 @@ class TestGitConfigParser(TestCase): self.conf.get(u'alias', u'modified'), u"""! git status --porcelain | awk 'match($1, "M"){print $2}'""" ) + # future lint: disable=non-unicode-string self.assertEqual( self.conf.get(u'alias', u'hist'), salt.utils.stringutils.to_unicode( r"""log --pretty=format:\"%h %ad | %s%d [%an]\" --graph --date=short""" ) ) + # future lint: enable=non-unicode-string def test_read_space_indent(self): ''' @@ -180,7 +182,7 @@ class TestGitConfigParser(TestCase): [orig_refspec, new_refspec] ) # Write the config object to a file - with salt.utils.files.fopen(self.new_config, 'w') as fp_: + with salt.utils.files.fopen(self.new_config, u'w') as fp_: self.conf.write(fp_) # Confirm that the new file was written correctly expected = self.fix_indent(ORIG_CONFIG) @@ -257,10 +259,10 @@ class TestGitConfigParser(TestCase): ''' Test writing using non-binary filehandle ''' - self._test_write(mode='w') + self._test_write(mode=u'w') def test_write_binary(self): ''' Test writing using binary filehandle ''' - self._test_write(mode='wb') + self._test_write(mode=u'wb') From 15ffecd4a752a0146671ad8eb363d7bc9d5d4d31 Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 12 Sep 2017 15:44:26 -0600 Subject: [PATCH 464/639] Give the name of the roster --- doc/topics/releases/oxygen.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index 7bcc72bb4e..bb5e42a9a7 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -55,7 +55,8 @@ New salt-ssh roster ------------------- A new roster has been added that allows users to pull in a list of hosts -for salt-ssh targeting from a ~/.ssh configuration. +for salt-ssh targeting from a ~/.ssh configuration. For full details, +please see the `sshconfig` roster. New GitFS Features ------------------ From 4cadfdcb567f3a7fb2ddcb109503c999cb9d946c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 11 Sep 2017 08:37:38 -0400 Subject: [PATCH 465/639] Updated kubernetes tests to be skipped if library is not loaded --- tests/unit/modules/test_kubernetes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unit/modules/test_kubernetes.py b/tests/unit/modules/test_kubernetes.py index 1de939f6b0..f53ce7845a 100644 --- a/tests/unit/modules/test_kubernetes.py +++ b/tests/unit/modules/test_kubernetes.py @@ -20,6 +20,8 @@ try: from salt.modules import kubernetes except ImportError: kubernetes = False +if not kubernetes.HAS_LIBS: + kubernetes = False @skipIf(NO_MOCK, NO_MOCK_REASON) From 5c3dd00692192ac7e413c8fa5c117e018dd03e2c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 11 Sep 2017 08:43:09 -0400 Subject: [PATCH 466/639] Updated config.test_api tests to take into account a ROOT_DIR override --- tests/unit/config/test_api.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/tests/unit/config/test_api.py b/tests/unit/config/test_api.py index a7c55b18f5..89a8f03c35 100644 --- a/tests/unit/config/test_api.py +++ b/tests/unit/config/test_api.py @@ -19,17 +19,20 @@ from tests.support.mock import ( # Import Salt libs import salt.config import salt.utils.platform +import salt.syspaths MOCK_MASTER_DEFAULT_OPTS = { - 'log_file': '/var/log/salt/master', - 'pidfile': '/var/run/salt-master.pid', - 'root_dir': '/' + 'log_file': '{0}/var/log/salt/master'.format(salt.syspaths.ROOT_DIR), + 'pidfile': '{0}/var/run/salt-master.pid'.format(salt.syspaths.ROOT_DIR), + 'root_dir': format(salt.syspaths.ROOT_DIR) } if salt.utils.platform.is_windows(): MOCK_MASTER_DEFAULT_OPTS = { - 'log_file': 'c:\\salt\\var\\log\\salt\\master', - 'pidfile': 'c:\\salt\\var\\run\\salt-master.pid', - 'root_dir': 'c:\\salt' + 'log_file': '{0}\\var\\log\\salt\\master'.format( + salt.syspaths.ROOT_DIR), + 'pidfile': '{0}\\var\\run\\salt-master.pid'.format( + salt.syspaths.ROOT_DIR), + 'root_dir': format(salt.syspaths.ROOT_DIR) } @@ -54,9 +57,10 @@ class APIConfigTestCase(TestCase): ''' with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): - expected = '/var/log/salt/api' + expected = '{0}/var/log/salt/api'.format(salt.syspaths.ROOT_DIR) if salt.utils.platform.is_windows(): - expected = 'c:\\salt\\var\\log\\salt\\api' + expected = '{0}\\var\\log\\salt\\api'.format( + salt.syspaths.ROOT_DIR) ret = salt.config.api_config('/some/fake/path') self.assertEqual(ret['log_file'], expected) @@ -69,9 +73,10 @@ class APIConfigTestCase(TestCase): ''' with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): - expected = '/var/run/salt-api.pid' + expected = '{0}/var/run/salt-api.pid'.format(salt.syspaths.ROOT_DIR) if salt.utils.platform.is_windows(): - expected = 'c:\\salt\\var\\run\\salt-api.pid' + expected = '{0}\\var\\run\\salt-api.pid'.format( + salt.syspaths.ROOT_DIR) ret = salt.config.api_config('/some/fake/path') self.assertEqual(ret['pidfile'], expected) From f11a618274f2d72e5d5df6a264369965bd973504 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 11 Sep 2017 10:19:42 -0400 Subject: [PATCH 467/639] Fixed Linux sysctl tests to be run in a local environment --- tests/unit/modules/test_saltcheck.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index d2ca89c685..9834eb84a1 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -3,9 +3,12 @@ # Import Python libs from __future__ import absolute_import +import os.path try: import salt.modules.saltcheck as saltcheck + import salt.config + import salt.syspaths as syspaths except: raise @@ -30,6 +33,14 @@ class LinuxSysctlTestCase(TestCase, LoaderModuleMockMixin): ''' def setup_loader_modules(self): + # Setting the environment to be local + local_opts = salt.config.minion_config( + os.path.join(syspaths.CONFIG_DIR, u'minion')) + local_opts['file_client']= 'local' + patcher = patch('salt.config.minion_config', + MagicMock(return_value=local_opts)) + patcher.start() + self.addCleanup(patcher.stop) return {saltcheck: {}} def test_call_salt_command(self): From 7f08983288cbad5e605368d3ba1cc1f761551864 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 14:55:24 -0400 Subject: [PATCH 468/639] Implemented being able to have proxy data both in the esxdatacenter proxy config and in its pillar --- salt/proxy/esxdatacenter.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/salt/proxy/esxdatacenter.py b/salt/proxy/esxdatacenter.py index 186b880c2c..5460863d84 100644 --- a/salt/proxy/esxdatacenter.py +++ b/salt/proxy/esxdatacenter.py @@ -153,6 +153,7 @@ import os # Import Salt Libs import salt.exceptions from salt.config.schemas.esxdatacenter import EsxdatacenterProxySchema +from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['esxdatacenter'] @@ -195,42 +196,44 @@ def init(opts): log.trace('Validating esxdatacenter proxy input') schema = EsxdatacenterProxySchema.serialize() log.trace('schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) try: - jsonschema.validate(opts['proxy'], schema) + jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'mechanism'): - DETAILS[key] = opts['proxy'][key] + DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': - if 'username' not in opts['proxy']: + if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') - if 'passwords' not in opts['proxy']: + if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): - DETAILS[key] = opts['proxy'][key] + DETAILS[key] = proxy_conf[key] else: - if 'domain' not in opts['proxy']: + if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') - if 'principal' not in opts['proxy']: + if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): - DETAILS[key] = opts['proxy'][key] + DETAILS[key] = proxy_conf[key] # Save optional - DETAILS['protocol'] = opts['proxy'].get('protocol') - DETAILS['port'] = opts['proxy'].get('port') + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': From 0e7b8e0c92f34a783d7e2a0c8f77c956a7a13438 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 16:37:09 -0400 Subject: [PATCH 469/639] Adjusted tests for esxdatacenter proxy --- tests/unit/proxy/test_esxdatacenter.py | 66 ++++++++++++++++++-------- 1 file changed, 47 insertions(+), 19 deletions(-) diff --git a/tests/unit/proxy/test_esxdatacenter.py b/tests/unit/proxy/test_esxdatacenter.py index fb44851a9f..ea1658d5ac 100644 --- a/tests/unit/proxy/test_esxdatacenter.py +++ b/tests/unit/proxy/test_esxdatacenter.py @@ -38,7 +38,7 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {esxdatacenter: {'__virtual__': MagicMock(return_value='esxdatacenter'), - 'DETAILS': {}}} + 'DETAILS': {}, '__pillar__': {}}} def setUp(self): self.opts_userpass = {'proxy': {'proxytype': 'esxdatacenter', @@ -57,6 +57,22 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): 'principal': 'fake_principal', 'protocol': 'fake_protocol', 'port': 100}} + patches = (('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=self.opts_sspi['proxy'])),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def test_merge(self): + mock_pillar_proxy = MagicMock() + mock_opts_proxy = MagicMock() + mock_merge = MagicMock(return_value=self.opts_sspi['proxy']) + with patch.dict(esxdatacenter.__pillar__, + {'proxy': mock_pillar_proxy}): + with patch('salt.proxy.esxdatacenter.merge', mock_merge): + esxdatacenter.init(opts={'proxy': mock_opts_proxy}) + mock_merge.assert_called_once_with(mock_opts_proxy, mock_pillar_proxy) def test_esxdatacenter_schema(self): mock_json_validate = MagicMock() @@ -80,9 +96,11 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def test_no_username(self): opts = self.opts_userpass.copy() del opts['proxy']['username'] - with self.assertRaises(salt.exceptions.InvalidConfigError) as \ - excinfo: - esxdatacenter.init(opts) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) self.assertEqual(excinfo.exception.strerror, 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') @@ -90,9 +108,11 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def test_no_passwords(self): opts = self.opts_userpass.copy() del opts['proxy']['passwords'] - with self.assertRaises(salt.exceptions.InvalidConfigError) as \ - excinfo: - esxdatacenter.init(opts) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) self.assertEqual(excinfo.exception.strerror, 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') @@ -100,9 +120,11 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def test_no_domain(self): opts = self.opts_sspi.copy() del opts['proxy']['domain'] - with self.assertRaises(salt.exceptions.InvalidConfigError) as \ - excinfo: - esxdatacenter.init(opts) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) self.assertEqual(excinfo.exception.strerror, 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') @@ -110,9 +132,11 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def test_no_principal(self): opts = self.opts_sspi.copy() del opts['proxy']['principal'] - with self.assertRaises(salt.exceptions.InvalidConfigError) as \ - excinfo: - esxdatacenter.init(opts) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxdatacenter.init(opts) self.assertEqual(excinfo.exception.strerror, 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') @@ -120,17 +144,21 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): def test_find_credentials(self): mock_find_credentials = MagicMock(return_value=('fake_username', 'fake_password')) - with patch('salt.proxy.esxdatacenter.find_credentials', - mock_find_credentials): - esxdatacenter.init(self.opts_userpass) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=self.opts_userpass['proxy'])): + with patch('salt.proxy.esxdatacenter.find_credentials', + mock_find_credentials): + esxdatacenter.init(self.opts_userpass) mock_find_credentials.assert_called_once_with() def test_details_userpass(self): mock_find_credentials = MagicMock(return_value=('fake_username', 'fake_password')) - with patch('salt.proxy.esxdatacenter.find_credentials', - mock_find_credentials): - esxdatacenter.init(self.opts_userpass) + with patch('salt.proxy.esxdatacenter.merge', + MagicMock(return_value=self.opts_userpass['proxy'])): + with patch('salt.proxy.esxdatacenter.find_credentials', + mock_find_credentials): + esxdatacenter.init(self.opts_userpass) self.assertDictEqual(esxdatacenter.DETAILS, {'vcenter': 'fake_vcenter', 'datacenter': 'fake_dc', From ba515df63152416c2920ef046d5c8fab931f7aae Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 17:35:25 -0400 Subject: [PATCH 470/639] Added esxcluster proxy input JSON schema --- salt/config/schemas/esxcluster.py | 44 +++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 salt/config/schemas/esxcluster.py diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py new file mode 100644 index 0000000000..732bd6e778 --- /dev/null +++ b/salt/config/schemas/esxcluster.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + + + salt.config.schemas.esxcluster + ~~~~~~~~~~~~~~~~~~~~~~~ + + ESX Cluster configuration schemas +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +from salt.utils.schema import (Schema, + ArrayItem, + IntegerItem, + StringItem) + + +class EsxclusterProxySchema(Schema): + ''' + Schema of the esxcluster proxy input + ''' + + title = 'Esxcluster Proxy Schema' + description = 'Esxcluster proxy schema' + additional_properties = False + proxytype = StringItem(required=True, + enum=['esxcluster']) + vcenter = StringItem(required=True, pattern='[^\s]+') + datacenter = StringItem(required=True) + cluster = StringItem(required=True) + mechanism = StringItem(required=True, enum=['userpass', 'sspi']) + username = StringItem() + passwords = ArrayItem(min_items=1, + items=StringItem(), + unique_items=True) + # TODO Should be changed when anyOf is supported for schemas + domain = StringItem() + principal = StringItem() + protocol = StringItem() + port = IntegerItem(minimum=1) From 0302948fff9e491bbe37e54758002e1978f02c0f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 17:21:05 -0400 Subject: [PATCH 471/639] Added esxcluster proxy used to manage ESX clusters --- salt/proxy/esxcluster.py | 310 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 310 insertions(+) create mode 100644 salt/proxy/esxcluster.py diff --git a/salt/proxy/esxcluster.py b/salt/proxy/esxcluster.py new file mode 100644 index 0000000000..5270db97fe --- /dev/null +++ b/salt/proxy/esxcluster.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +''' +Proxy Minion interface module for managing VMWare ESXi clusters. + +Dependencies +============ + +- pyVmomi +- jsonschema + +Configuration +============= +To use this integration proxy module, please configure the following: + +Pillar +------ + +Proxy minions get their configuration from Salt's Pillar. This can now happen +from the proxy's configuration file. + +Example pillars: + +``userpass`` mechanism: + +.. code-block:: yaml + + proxy: + proxytype: esxcluster + cluster: + datacenter: + vcenter: + mechanism: userpass + username: + passwords: (required if userpass is used) + - first_password + - second_password + - third_password + +``sspi`` mechanism: + +.. code-block:: yaml + + proxy: + proxytype: esxcluster + cluster: + datacenter: + vcenter: + mechanism: sspi + domain: + principal: + +proxytype +^^^^^^^^^ +To use this Proxy Module, set this to ``esxdatacenter``. + +cluster +^^^^^^^ +Name of the managed cluster. Required. + +datacenter +^^^^^^^^^^ +Name of the datacenter the managed cluster is in. Required. + +vcenter +^^^^^^^ +The location of the VMware vCenter server (host of ip) where the datacenter +should be managed. Required. + +mechanism +^^^^^^^^ +The mechanism used to connect to the vCenter server. Supported values are +``userpass`` and ``sspi``. Required. + +Note: + Connections are attempted using all (``username``, ``password``) + combinations on proxy startup. + +username +^^^^^^^^ +The username used to login to the host, such as ``root``. Required if mechanism +is ``userpass``. + +passwords +^^^^^^^^^ +A list of passwords to be used to try and login to the vCenter server. At least +one password in this list is required if mechanism is ``userpass``. When the +proxy comes up, it will try the passwords listed in order. + +domain +^^^^^^ +User domain. Required if mechanism is ``sspi``. + +principal +^^^^^^^^ +Kerberos principal. Rquired if mechanism is ``sspi``. + +protocol +^^^^^^^^ +If the ESXi host is not using the default protocol, set this value to an +alternate protocol. Default is ``https``. + +port +^^^^ +If the ESXi host is not using the default port, set this value to an +alternate port. Default is ``443``. + +Salt Proxy +---------- + +After your pillar is in place, you can test the proxy. The proxy can run on +any machine that has network connectivity to your Salt Master and to the +vCenter server in the pillar. SaltStack recommends that the machine running the +salt-proxy process also run a regular minion, though it is not strictly +necessary. + +To start a proxy minion one needs to establish its identity : + +.. code-block:: bash + + salt-proxy --proxyid + +On the machine that will run the proxy, make sure there is a configuration file +present. By default this is ``/etc/salt/proxy``. If in a different location, the +```` has to be specified when running the proxy: +file with at least the following in it: + +.. code-block:: bash + + salt-proxy --proxyid -c + +Commands +-------- + +Once the proxy is running it will connect back to the specified master and +individual commands can be runs against it: + +.. code-block:: bash + + # Master - minion communication + salt test.ping + + # Test vcenter connection + salt vsphere.test_vcenter_connection + +States +------ + +Associated states are documented in +:mod:`salt.states.esxcluster `. +Look there to find an example structure for Pillar as well as an example +``.sls`` file for configuring an ESX cluster from scratch. +''' + + +# Import Python Libs +from __future__ import absolute_import +import logging +import os + +# Import Salt Libs +import salt.exceptions +from salt.config.schemas.esxcluster import EsxclusterProxySchema +from salt.utils.dictupdate import merge + +# This must be present or the Salt loader won't load this module. +__proxyenabled__ = ['esxcluster'] + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Variables are scoped to this module so we can have persistent data +# across calls to fns in here. +GRAINS_CACHE = {} +DETAILS = {} + + +# Set up logging +log = logging.getLogger(__name__) +# Define the module's virtual name +__virtualname__ = 'esxcluster' + + +def __virtual__(): + ''' + Only load if the vsphere execution module is available. + ''' + if HAS_JSONSCHEMA: + return __virtualname__ + + return False, 'The esxcluster proxy module did not load.' + + +def init(opts): + ''' + This function gets called when the proxy starts up. For + login + the protocol and port are cached. + ''' + log.debug('Initting esxcluster proxy module in process ' + '{}'.format(os.getpid())) + log.debug('Validating esxcluster proxy input') + schema = EsxclusterProxySchema.serialize() + log.trace('schema = {}'.format(schema)) + proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) + log.trace('proxy_conf = {0}'.format(proxy_conf)) + try: + jsonschema.validate(proxy_conf, schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidConfigError(exc) + + # Save mandatory fields in cache + for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): + DETAILS[key] = proxy_conf[key] + + # Additional validation + if DETAILS['mechanism'] == 'userpass': + if 'username' not in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\', but no ' + '\'username\' key found in proxy config.') + if not 'passwords' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'userpass\', but no ' + '\'passwords\' key found in proxy config.') + for key in ('username', 'passwords'): + DETAILS[key] = proxy_conf[key] + else: + if not 'domain' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\', but no ' + '\'domain\' key found in proxy config.') + if not 'principal' in proxy_conf: + raise salt.exceptions.InvalidConfigError( + 'Mechanism is set to \'sspi\', but no ' + '\'principal\' key found in proxy config.') + for key in ('domain', 'principal'): + DETAILS[key] = proxy_conf[key] + + # Save optional + DETAILS['protocol'] = proxy_conf.get('protocol') + DETAILS['port'] = proxy_conf.get('port') + + # Test connection + if DETAILS['mechanism'] == 'userpass': + # Get the correct login details + log.debug('Retrieving credentials and testing vCenter connection for ' + 'mehchanism \'userpass\'') + try: + username, password = find_credentials() + DETAILS['password'] = password + except salt.exceptions.SaltSystemExit as err: + log.critical('Error: {0}'.format(err)) + return False + return True + + +def ping(): + ''' + Returns True. + + CLI Example: + + .. code-block:: bash + + salt esx-cluster test.ping + ''' + return True + + +def shutdown(): + ''' + Shutdown the connection to the proxy device. For this proxy, + shutdown is a no-op. + ''' + log.debug('esxcluster proxy shutdown() called...') + + +def find_credentials(): + ''' + Cycle through all the possible credentials and return the first one that + works. + ''' + + # if the username and password were already found don't fo though the + # connection process again + if 'username' in DETAILS and 'password' in DETAILS: + return DETAILS['username'], DETAILS['password'] + + passwords = DETAILS['passwords'] + for password in passwords: + DETAILS['password'] = password + if not __salt__['vsphere.test_vcenter_connection'](): + # We are unable to authenticate + continue + # If we have data returned from above, we've successfully authenticated. + return DETAILS['username'], password + # We've reached the end of the list without successfully authenticating. + raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' + 'incorrect credentials.') + + +def get_details(): + ''' + Function that returns the cached details + ''' + return DETAILS From 4113aa3ca797d7f5d0c3e6745631a6841aedd42f Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 17:48:43 -0400 Subject: [PATCH 472/639] Added tests for esxcluster proxy --- tests/unit/proxy/test_esxcluster.py | 185 ++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 tests/unit/proxy/test_esxcluster.py diff --git a/tests/unit/proxy/test_esxcluster.py b/tests/unit/proxy/test_esxcluster.py new file mode 100644 index 0000000000..e13faab69e --- /dev/null +++ b/tests/unit/proxy/test_esxcluster.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for esxcluster proxy +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import external libs +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Import Salt Libs +import salt.proxy.esxcluster as esxcluster +import salt.exceptions +from salt.config.schemas.esxcluster import EsxclusterProxySchema + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_JSONSCHEMA, 'jsonschema is required') +class InitTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.proxy.esxcluster.init''' + def setup_loader_modules(self): + return {esxcluster: {'__virtual__': + MagicMock(return_value='esxcluster'), + 'DETAILS': {}, '__pillar__': {}}} + + def setUp(self): + self.opts_userpass = {'proxy': {'proxytype': 'esxcluster', + 'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'cluster': 'fake_cluster', + 'mechanism': 'userpass', + 'username': 'fake_username', + 'passwords': ['fake_password'], + 'protocol': 'fake_protocol', + 'port': 100}} + self.opts_sspi = {'proxy': {'proxytype': 'esxcluster', + 'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'cluster': 'fake_cluster', + 'mechanism': 'sspi', + 'domain': 'fake_domain', + 'principal': 'fake_principal', + 'protocol': 'fake_protocol', + 'port': 100}} + patches = (('salt.proxy.esxcluster.merge', + MagicMock(return_value=self.opts_sspi['proxy'])),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def test_merge(self): + mock_pillar_proxy = MagicMock() + mock_opts_proxy = MagicMock() + mock_merge = MagicMock(return_value=self.opts_sspi['proxy']) + with patch.dict(esxcluster.__pillar__, + {'proxy': mock_pillar_proxy}): + with patch('salt.proxy.esxcluster.merge', mock_merge): + esxcluster.init(opts={'proxy': mock_opts_proxy}) + mock_merge.assert_called_once_with(mock_opts_proxy, mock_pillar_proxy) + + def test_esxcluster_schema(self): + mock_json_validate = MagicMock() + serialized_schema = EsxclusterProxySchema().serialize() + with patch('salt.proxy.esxcluster.jsonschema.validate', + mock_json_validate): + esxcluster.init(self.opts_sspi) + mock_json_validate.assert_called_once_with( + self.opts_sspi['proxy'], serialized_schema) + + def test_invalid_proxy_input_error(self): + with patch('salt.proxy.esxcluster.jsonschema.validate', + MagicMock(side_effect=jsonschema.exceptions.ValidationError( + 'Validation Error'))): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxcluster.init(self.opts_userpass) + self.assertEqual(excinfo.exception.strerror.message, + 'Validation Error') + + def test_no_username(self): + opts = self.opts_userpass.copy() + del opts['proxy']['username'] + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxcluster.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'userpass\', but no ' + '\'username\' key found in proxy config.') + + def test_no_passwords(self): + opts = self.opts_userpass.copy() + del opts['proxy']['passwords'] + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxcluster.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'userpass\', but no ' + '\'passwords\' key found in proxy config.') + + def test_no_domain(self): + opts = self.opts_sspi.copy() + del opts['proxy']['domain'] + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxcluster.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'sspi\', but no ' + '\'domain\' key found in proxy config.') + + def test_no_principal(self): + opts = self.opts_sspi.copy() + del opts['proxy']['principal'] + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=opts['proxy'])): + with self.assertRaises(salt.exceptions.InvalidConfigError) as \ + excinfo: + esxcluster.init(opts) + self.assertEqual(excinfo.exception.strerror, + 'Mechanism is set to \'sspi\', but no ' + '\'principal\' key found in proxy config.') + + def test_find_credentials(self): + mock_find_credentials = MagicMock(return_value=('fake_username', + 'fake_password')) + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=self.opts_userpass['proxy'])): + with patch('salt.proxy.esxcluster.find_credentials', + mock_find_credentials): + esxcluster.init(self.opts_userpass) + mock_find_credentials.assert_called_once_with() + + def test_details_userpass(self): + mock_find_credentials = MagicMock(return_value=('fake_username', + 'fake_password')) + with patch('salt.proxy.esxcluster.merge', + MagicMock(return_value=self.opts_userpass['proxy'])): + with patch('salt.proxy.esxcluster.find_credentials', + mock_find_credentials): + esxcluster.init(self.opts_userpass) + self.assertDictEqual(esxcluster.DETAILS, + {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'cluster': 'fake_cluster', + 'mechanism': 'userpass', + 'username': 'fake_username', + 'password': 'fake_password', + 'passwords': ['fake_password'], + 'protocol': 'fake_protocol', + 'port': 100}) + + def test_details_sspi(self): + esxcluster.init(self.opts_sspi) + self.assertDictEqual(esxcluster.DETAILS, + {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'cluster': 'fake_cluster', + 'mechanism': 'sspi', + 'domain': 'fake_domain', + 'principal': 'fake_principal', + 'protocol': 'fake_protocol', + 'port': 100}) From 679e3886323ab80b2b82535b9b6ae88ad2f0917c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 18:08:25 -0400 Subject: [PATCH 473/639] Added esxcluster shim execution module --- salt/modules/esxcluster.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 salt/modules/esxcluster.py diff --git a/salt/modules/esxcluster.py b/salt/modules/esxcluster.py new file mode 100644 index 0000000000..fca68d775f --- /dev/null +++ b/salt/modules/esxcluster.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +''' +Module used to access the esxcluster proxy connection methods +''' +from __future__ import absolute_import + +# Import python libs +import logging +import salt.utils.platform + + +log = logging.getLogger(__name__) + +__proxyenabled__ = ['esxcluster'] +# Define the module's virtual name +__virtualname__ = 'esxcluster' + + +def __virtual__(): + ''' + Only work on proxy + ''' + if salt.utils.platform.is_proxy(): + return __virtualname__ + return (False, 'Must be run on a proxy minion') + + +def get_details(): + return __proxy__['esxcluster.get_details']() From 26710fa8fc9ef5f585464ecb023aacfc486e3dc8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 18:13:03 -0400 Subject: [PATCH 474/639] Added tests for esxcluster execution module --- tests/unit/modules/test_esxcluster.py | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 tests/unit/modules/test_esxcluster.py diff --git a/tests/unit/modules/test_esxcluster.py b/tests/unit/modules/test_esxcluster.py new file mode 100644 index 0000000000..5a32980d16 --- /dev/null +++ b/tests/unit/modules/test_esxcluster.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for functions in salt.modules.esxcluster +''' + +# Import Python Libs +from __future__ import absolute_import + +# Import Salt Libs +import salt.modules.esxcluster as esxcluster + +# Import Salt Testing Libs +from tests.support.mixins import LoaderModuleMockMixin +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + MagicMock, + patch, + NO_MOCK, + NO_MOCK_REASON +) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class GetDetailsTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.modules.esxcluster.get_details''' + def setup_loader_modules(self): + return {esxcluster: {'__virtual__': + MagicMock(return_value='esxcluster'), + '__proxy__': {}}} + + def test_get_details(self): + mock_get_details = MagicMock() + with patch.dict(esxcluster.__proxy__, + {'esxcluster.get_details': mock_get_details}): + esxcluster.get_details() + mock_get_details.assert_called_once_with() From 91c22163b6ab5c00693f3a56b1c684ea53588b9b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 18:46:05 -0400 Subject: [PATCH 475/639] Added vsphere._get_esxcluster_proxy_details that retrieves a esxcluster proxy details --- salt/modules/vsphere.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 326294c7e6..76e9434866 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4294,3 +4294,14 @@ def _get_esxdatacenter_proxy_details(): return det.get('vcenter'), det.get('username'), det.get('password'), \ det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter') + + +def _get_esxcluster_proxy_details(): + ''' + Returns the running esxcluster's proxy details + ''' + det = __salt__['esxcluster.get_details']() + return det.get('vcenter'), det.get('username'), det.get('password'), \ + det.get('protocol'), det.get('port'), det.get('mechanism'), \ + det.get('principal'), det.get('domain'), det.get('datacenter'), \ + det.get('cluster') From b88095646864f4e388a0c82ec149959de3d28d66 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 18:46:43 -0400 Subject: [PATCH 476/639] Added esxcluster to supported proxies --- salt/modules/vsphere.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 76e9434866..a24bc0ee51 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -195,7 +195,7 @@ else: log = logging.getLogger(__name__) __virtualname__ = 'vsphere' -__proxyenabled__ = ['esxi', 'esxdatacenter'] +__proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter'] def __virtual__(): @@ -227,6 +227,8 @@ def _get_proxy_connection_details(): proxytype = get_proxy_type() if proxytype == 'esxi': details = __salt__['esxi.get_details']() + elif proxytype == 'esxcluster': + details = __salt__['esxcluster.get_details']() elif proxytype == 'esxdatacenter': details = __salt__['esxdatacenter.get_details']() else: @@ -267,7 +269,7 @@ def gets_service_instance_via_proxy(fn): proxy details and passes the connection (vim.ServiceInstance) to the decorated function. - Supported proxies: esxi, esxdatacenter. + Supported proxies: esxi, esxcluster, esxdatacenter. Notes: 1. The decorated function must have a ``service_instance`` parameter @@ -354,7 +356,7 @@ def gets_service_instance_via_proxy(fn): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') def get_service_instance_via_proxy(service_instance=None): ''' Returns a service instance to the proxied endpoint (vCenter/ESXi host). @@ -374,7 +376,7 @@ def get_service_instance_via_proxy(service_instance=None): @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') def disconnect(service_instance): ''' Disconnects from a vCenter or ESXi host @@ -1909,7 +1911,7 @@ def get_vsan_eligible_disks(host, username, password, protocol=None, port=None, @depends(HAS_PYVMOMI) -@supports_proxies('esxi', 'esxdatacenter') +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') @gets_service_instance_via_proxy def test_vcenter_connection(service_instance=None): ''' @@ -3598,7 +3600,7 @@ def vsan_enable(host, username, password, protocol=None, port=None, host_names=N @depends(HAS_PYVMOMI) -@supports_proxies('esxdatacenter') +@supports_proxies('esxdatacenter', 'esxcluster') @gets_service_instance_via_proxy def list_datacenters_via_proxy(datacenter_names=None, service_instance=None): ''' From afa4921d378e2a1fa68e4e32b59cff6a658bafcc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 10 Sep 2017 19:04:27 -0400 Subject: [PATCH 477/639] Fixed vsphere tests to support the esxcluster proxy --- tests/unit/modules/test_vsphere.py | 48 +++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index e7ac51dfa0..93196e4233 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -620,6 +620,7 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'principal': 'fake_principal', 'domain': 'fake_domain'} self.esxdatacenter_details = {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', 'username': 'fake_username', 'password': 'fake_password', 'protocol': 'fake_protocol', @@ -627,9 +628,22 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'mechanism': 'fake_mechanism', 'principal': 'fake_principal', 'domain': 'fake_domain'} + self.esxcluster_details = {'vcenter': 'fake_vcenter', + 'datacenter': 'fake_dc', + 'cluster', 'fake_cluster', + 'username': 'fake_username', + 'password': 'fake_password', + 'protocol': 'fake_protocol', + 'port': 'fake_port', + 'mechanism': 'fake_mechanism', + 'principal': 'fake_principal', + 'domain': 'fake_domain'} + + def tearDown(self): - for attrname in ('esxi_host_details', 'esxi_vcenter_details'): + for attrname in ('esxi_host_details', 'esxi_vcenter_details', + 'esxdatacenter_details', 'esxcluster_details'): try: delattr(self, attrname) except AttributeError: @@ -651,8 +665,22 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): MagicMock(return_value='esxdatacenter')): with patch.dict(vsphere.__salt__, {'esxdatacenter.get_details': MagicMock( - return_value=self.esxdatacenter_details)}): + return_value=self.esxdatacenter_details)}): ret = vsphere._get_proxy_connection_details() + self.assertEqual(('fake_vcenter', 'fake_username', 'fake_password', + 'fake_protocol', 'fake_port', 'fake_mechanism', + 'fake_principal', 'fake_domain'), ret) + + def test_esxcluster_proxy_details(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxcluster')): + with patch.dict(vsphere.__salt__, + {'esxcluster.get_details': MagicMock( + return_value=self.esxcluster_details)}): + ret = vsphere._get_proxy_connection_details() + self.assertEqual(('fake_vcenter', 'fake_username', 'fake_password', + 'fake_protocol', 'fake_port', 'fake_mechanism', + 'fake_principal', 'fake_domain'), ret) def test_esxi_proxy_vcenter_details(self): with patch('salt.modules.vsphere.get_proxy_type', @@ -862,8 +890,8 @@ class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin): } } - def test_supported_proxes(self): - supported_proxies = ['esxi', 'esxdatacenter'] + def test_supported_proxies(self): + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -905,8 +933,8 @@ class DisconnectTestCase(TestCase, LoaderModuleMockMixin): } } - def test_supported_proxes(self): - supported_proxies = ['esxi', 'esxdatacenter'] + def test_supported_proxies(self): + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -946,8 +974,8 @@ class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin): } } - def test_supported_proxes(self): - supported_proxies = ['esxi', 'esxdatacenter'] + def test_supported_proxies(self): + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1022,7 +1050,7 @@ class ListDatacentersViaProxyTestCase(TestCase, LoaderModuleMockMixin): } def test_supported_proxies(self): - supported_proxies = ['esxdatacenter'] + supported_proxies = ['esxcluster', 'esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', MagicMock(return_value=proxy_type)): @@ -1099,7 +1127,7 @@ class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin): } } - def test_supported_proxes(self): + def test_supported_proxies(self): supported_proxies = ['esxdatacenter'] for proxy_type in supported_proxies: with patch('salt.modules.vsphere.get_proxy_type', From 0222a5a056c2af6f840a509f3a0b98764f960033 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Mon, 11 Sep 2017 05:57:42 -0400 Subject: [PATCH 478/639] Fix to vsphere tests --- tests/unit/modules/test_vsphere.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index 93196e4233..af5f241cdc 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -630,7 +630,7 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'domain': 'fake_domain'} self.esxcluster_details = {'vcenter': 'fake_vcenter', 'datacenter': 'fake_dc', - 'cluster', 'fake_cluster', + 'cluster': 'fake_cluster', 'username': 'fake_username', 'password': 'fake_password', 'protocol': 'fake_protocol', @@ -639,8 +639,6 @@ class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin): 'principal': 'fake_principal', 'domain': 'fake_domain'} - - def tearDown(self): for attrname in ('esxi_host_details', 'esxi_vcenter_details', 'esxdatacenter_details', 'esxcluster_details'): From 1b877a6c6011955c10f82c85820419fcf83a50b8 Mon Sep 17 00:00:00 2001 From: V3XATI0N Date: Tue, 12 Sep 2017 19:46:02 -0700 Subject: [PATCH 479/639] Make aptpkg work with Deepin Linux Simple change, but it bugs me having to add this every time I update. Also I never contribute so proposing this change here is probably the wrong place to do it. Sue me if you want I'm super broke. --- salt/modules/aptpkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py index 6892dc4625..d056c12a58 100644 --- a/salt/modules/aptpkg.py +++ b/salt/modules/aptpkg.py @@ -99,7 +99,7 @@ def __virtual__(): ''' Confirm this module is on a Debian based system ''' - if __grains__.get('os_family') in ('Kali', 'Debian', 'neon'): + if __grains__.get('os_family') in ('Kali', 'Debian', 'neon', 'Deepin'): return __virtualname__ elif __grains__.get('os_family', False) == 'Cumulus': return __virtualname__ From b861d5e85ea925b360500edb25afb0a6a75d2f71 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 05:06:58 -0400 Subject: [PATCH 480/639] pylint --- tests/unit/proxy/test_esxdatacenter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/proxy/test_esxdatacenter.py b/tests/unit/proxy/test_esxdatacenter.py index ea1658d5ac..bda93182af 100644 --- a/tests/unit/proxy/test_esxdatacenter.py +++ b/tests/unit/proxy/test_esxdatacenter.py @@ -71,7 +71,7 @@ class InitTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(esxdatacenter.__pillar__, {'proxy': mock_pillar_proxy}): with patch('salt.proxy.esxdatacenter.merge', mock_merge): - esxdatacenter.init(opts={'proxy': mock_opts_proxy}) + esxdatacenter.init(opts={'proxy': mock_opts_proxy}) mock_merge.assert_called_once_with(mock_opts_proxy, mock_pillar_proxy) def test_esxdatacenter_schema(self): From 449dccdaa10fcc65989651b316b1f846923448e4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 05:09:44 -0400 Subject: [PATCH 481/639] pylint --- tests/unit/modules/test_saltcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_saltcheck.py b/tests/unit/modules/test_saltcheck.py index 9834eb84a1..5907af17ef 100644 --- a/tests/unit/modules/test_saltcheck.py +++ b/tests/unit/modules/test_saltcheck.py @@ -36,7 +36,7 @@ class LinuxSysctlTestCase(TestCase, LoaderModuleMockMixin): # Setting the environment to be local local_opts = salt.config.minion_config( os.path.join(syspaths.CONFIG_DIR, u'minion')) - local_opts['file_client']= 'local' + local_opts['file_client'] = 'local' patcher = patch('salt.config.minion_config', MagicMock(return_value=local_opts)) patcher.start() From 59cb07269c7005e11db08cc5ec761616d1f65941 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 05:17:06 -0400 Subject: [PATCH 482/639] pylint --- salt/config/schemas/esxcluster.py | 2 +- salt/proxy/esxcluster.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index 732bd6e778..f9eb70fb67 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -29,7 +29,7 @@ class EsxclusterProxySchema(Schema): additional_properties = False proxytype = StringItem(required=True, enum=['esxcluster']) - vcenter = StringItem(required=True, pattern='[^\s]+') + vcenter = StringItem(required=True, pattern=r'[^\s]+') datacenter = StringItem(required=True) cluster = StringItem(required=True) mechanism = StringItem(required=True, enum=['userpass', 'sspi']) diff --git a/salt/proxy/esxcluster.py b/salt/proxy/esxcluster.py index 5270db97fe..af3740d8d5 100644 --- a/salt/proxy/esxcluster.py +++ b/salt/proxy/esxcluster.py @@ -159,7 +159,7 @@ import os # Import Salt Libs import salt.exceptions -from salt.config.schemas.esxcluster import EsxclusterProxySchema +from salt.config.schemas.esxcluster import EsxclusterProxySchema from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. @@ -222,18 +222,18 @@ def init(opts): raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') - if not 'passwords' in proxy_conf: + if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: - if not 'domain' in proxy_conf: + if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') - if not 'principal' in proxy_conf: + if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') From ade3f9ad97fca2997b45506191ffc35a87f10e5a Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Wed, 13 Sep 2017 14:01:38 +0200 Subject: [PATCH 483/639] Update zabbix_return.py Applied suggested changes --- salt/returners/zabbix_return.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index 2415f37514..a78f784c56 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -26,6 +26,7 @@ import os # Import Salt libs from salt.ext import six +import salt.utils.files # Get logging started log = logging.getLogger(__name__) @@ -55,7 +56,7 @@ def zbx(): def zabbix_send(key, host, output): - f = open('/etc/zabbix/zabbix_agentd.conf','r') + f = open(zbx()['zabbix_config'],'r') for line in f: if "ServerActive" in line: flag = "true" From c2c65b4ac25be8e706cdd1ed48978ba7b53e567b Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 17:10:42 +0200 Subject: [PATCH 484/639] Fix checking for newline on end of file by properly checking the last byte of the file if the file is non empty. --- salt/modules/ssh.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 7f1d042c01..0c06d38e78 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -769,10 +769,13 @@ def set_auth_key( with salt.utils.files.fopen(fconfig, 'ab+') as _fh: if new_file is False: # Let's make sure we have a new line at the end of the file - _fh.seek(1024, 2) - if not _fh.read(1024).rstrip(six.b(' ')).endswith(six.b('\n')): - _fh.seek(0, 2) - _fh.write(six.b('\n')) + _fh.seek(0,2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1,2) + if _fh.read(1) != six.b('\n') + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From 7c0f4b889ca54b89b407f83bb7017272a8c230c9 Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 17:35:39 +0200 Subject: [PATCH 485/639] Copy paste typo --- salt/modules/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 0c06d38e78..5101d9973c 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -774,7 +774,7 @@ def set_auth_key( # File isn't empty, check if last byte is a newline # If not, add one _fh.seek(-1,2) - if _fh.read(1) != six.b('\n') + if _fh.read(1) != six.b('\n'): _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) From ed8113dfe8cde96eb40f02d230a8edc613e7bb7b Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 13 Sep 2017 12:06:47 -0400 Subject: [PATCH 486/639] Move "salt.utils" calls to __utils__ in linode driver --- salt/cloud/clouds/linode.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/salt/cloud/clouds/linode.py b/salt/cloud/clouds/linode.py index 6b761cb985..b64b69685c 100644 --- a/salt/cloud/clouds/linode.py +++ b/salt/cloud/clouds/linode.py @@ -44,9 +44,6 @@ from salt.exceptions import ( SaltCloudSystemExit ) -# Import Salt-Cloud Libs -import salt.utils.cloud - # Get logging started log = logging.getLogger(__name__) @@ -1193,7 +1190,7 @@ def list_nodes_select(call=None): ''' Return a list of the VMs that are on the provider, with select fields. ''' - return salt.utils.cloud.list_nodes_select( + return __utils__['cloud.list_nodes_select']( list_nodes_full(), __opts__['query.selection'], call, ) @@ -1503,7 +1500,7 @@ def _query(action=None, if LASTCALL >= now: time.sleep(ratelimit_sleep) - result = salt.utils.http.query( + result = __utils__['http.query']( url, method, params=args, From bf3785739e8a867f1ac8d8b15c80b956c9e38af9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 13:01:15 -0400 Subject: [PATCH 487/639] Fixed failing config.test_api tests for when the ROOT_DIR is / --- tests/unit/config/test_api.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/config/test_api.py b/tests/unit/config/test_api.py index 89a8f03c35..b7e2d5d7a7 100644 --- a/tests/unit/config/test_api.py +++ b/tests/unit/config/test_api.py @@ -57,7 +57,8 @@ class APIConfigTestCase(TestCase): ''' with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): - expected = '{0}/var/log/salt/api'.format(salt.syspaths.ROOT_DIR) + expected = '{0}/var/log/salt/api'.format( + salt.syspaths.ROOT_DIR if salt.syspaths.ROOT_DIR != '/' else '') if salt.utils.platform.is_windows(): expected = '{0}\\var\\log\\salt\\api'.format( salt.syspaths.ROOT_DIR) @@ -73,7 +74,8 @@ class APIConfigTestCase(TestCase): ''' with patch('salt.config.client_config', MagicMock(return_value=MOCK_MASTER_DEFAULT_OPTS)): - expected = '{0}/var/run/salt-api.pid'.format(salt.syspaths.ROOT_DIR) + expected = '{0}/var/run/salt-api.pid'.format( + salt.syspaths.ROOT_DIR if salt.syspaths.ROOT_DIR != '/' else '') if salt.utils.platform.is_windows(): expected = '{0}\\var\\run\\salt-api.pid'.format( salt.syspaths.ROOT_DIR) From 132b56d785d33f8b25168cda008f672d24ee60a7 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Sun, 27 Aug 2017 20:50:48 -0700 Subject: [PATCH 488/639] Updating mount to allow opts in the state file to be removed and have the volume remounted --- salt/modules/mount.py | 56 +++++++++++++++++++++++++++++++++ salt/states/mount.py | 55 ++++++++++++++++++++++++++++++++ tests/unit/states/test_mount.py | 4 +++ 3 files changed, 115 insertions(+) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index ee263c4d77..7272f57917 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -14,6 +14,7 @@ import salt.utils # Can be removed once test_mode is moved import salt.utils.files import salt.utils.path import salt.utils.platform +import salt.utils.mount from salt.exceptions import CommandNotFoundError, CommandExecutionError # Import 3rd-party libs @@ -1262,3 +1263,58 @@ def is_mounted(name): return True else: return False + + +def read_mount_cache(name): + ''' + .. versionadded:: Oxygen + + Provide information if the path is mounted + + CLI Example: + + .. code-block:: bash + + salt '*' mount.read_mount_cache /mnt/share + ''' + cache = salt.utils.mount.read_cache(__opts__) + if cache: + if 'mounts' in cache and cache['mounts']: + if name in cache['mounts']: + return cache['mounts'][name] + return {} + + +def write_mount_cache(real_name, + device, + mkmnt, + fstype, + opts): + ''' + .. versionadded:: Oxygen + + Provide information if the path is mounted + + CLI Example: + + .. code-block:: bash + + salt '*' mount.write_mount_cache /mnt/share + ''' + cache = salt.utils.mount.read_cache(__opts__) + + if 'mounts' in cache: + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} + else: + cache['mounts'] = {} + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} + + log.debug('=== cache {} ==='.format(cache)) + cache = salt.utils.mount.write_cache(cache, __opts__) + return True diff --git a/salt/states/mount.py b/salt/states/mount.py index fef674ddbc..cab9a719e3 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -197,6 +197,8 @@ def mounted(name, 'result': True, 'comment': ''} + update_mount_cache = False + if device_name_regex is None: device_name_regex = [] @@ -439,6 +441,50 @@ def mounted(name, # don't write remount into fstab if 'remount' in opts: opts.remove('remount') + + # Update the cache + update_mount_cache = True + + mount_cache = __salt__['mount.read_mount_cache'](real_name) + if 'opts' in mount_cache: + _missing = [opt for opt in mount_cache['opts'] + if opt not in opts] + + if _missing: + if __opts__['test']: + ret['result'] = None + ret['comment'] = ('Remount would be forced because' + ' options ({0})' + 'changed'.format(','.join(_missing))) + return ret + else: + # Some file systems require umounting and mounting if options change + # add others to list that require similiar functionality + if fstype in ['nfs', 'cvfs'] or fstype.startswith('fuse'): + ret['changes']['umount'] = "Forced unmount and mount because " \ + + "options ({0}) changed".format(opt) + unmount_result = __salt__['mount.umount'](real_name) + if unmount_result is True: + mount_result = __salt__['mount.mount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) + ret['result'] = mount_result + else: + ret['result'] = False + ret['comment'] = 'Unable to unmount {0}: {1}.'.format(real_name, unmount_result) + return ret + else: + ret['changes']['umount'] = "Forced remount because " \ + + "options ({0}) changed".format(opt) + remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) + ret['result'] = remount_result + # Cleanup after the remount, so we + # don't write remount into fstab + if 'remount' in opts: + opts.remove('remount') + + update_mount_cache = True + else: + update_mount_cache = True + if real_device not in device_list: # name matches but device doesn't - need to umount _device_mismatch_is_ignored = None @@ -469,6 +515,7 @@ def mounted(name, ret['comment'] = "Unable to unmount" ret['result'] = None return ret + update_mount_cache = True else: ret['comment'] = 'Target was already mounted' # using a duplicate check so I can catch the results of a umount @@ -492,6 +539,7 @@ def mounted(name, out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts, user=user) active = __salt__['mount.active'](extended=True) + update_mount_cache = True if isinstance(out, string_types): # Failed to (re)mount, the state has failed! ret['comment'] = out @@ -591,6 +639,13 @@ def mounted(name, config, match_on=match_on) + if update_mount_cache: + cache_result = __salt__['mount.write_mount_cache'](real_name, + device, + mkmnt=mkmnt, + fstype=fstype, + opts=opts) + if out == 'present': ret['comment'] += '. Entry already exists in the fstab.' return ret diff --git a/tests/unit/states/test_mount.py b/tests/unit/states/test_mount.py index 65173ea698..1e1886001f 100644 --- a/tests/unit/states/test_mount.py +++ b/tests/unit/states/test_mount.py @@ -62,6 +62,8 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): mock_str = MagicMock(return_value='salt') mock_user = MagicMock(return_value={'uid': 510}) mock_group = MagicMock(return_value={'gid': 100}) + mock_read_cache = MagicMock(return_value={}) + mock_write_cache = MagicMock(return_value=True) umount1 = ("Forced unmount because devices don't match. " "Wanted: /dev/sdb6, current: /dev/sdb5, /dev/sdb5") with patch.dict(mount.__grains__, {'os': 'Darwin'}): @@ -163,6 +165,8 @@ class MountTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(mount.__salt__, {'mount.active': mock_mnt, 'mount.mount': mock_str, 'mount.umount': mock_f, + 'mount.read_mount_cache': mock_read_cache, + 'mount.write_mount_cache': mock_write_cache, 'mount.set_fstab': mock, 'user.info': mock_user, 'group.info': mock_group}): From df571b90aa7583db66d1f95b8dfd4af2ca3c61dc Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 28 Aug 2017 08:59:08 -0700 Subject: [PATCH 489/639] adding utils/mount.py --- salt/utils/mount.py | 73 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 salt/utils/mount.py diff --git a/salt/utils/mount.py b/salt/utils/mount.py new file mode 100644 index 0000000000..86e695bd3c --- /dev/null +++ b/salt/utils/mount.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +''' +Common functions for managing mounts +''' + +# Import python libs +from __future__ import absolute_import +import logging +import os +import yaml + +# Import Salt libs +import salt.utils # Can be removed once is_true is moved +import salt.utils.files +import salt.utils.versions + +from salt.utils.yamldumper import SafeOrderedDumper + +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Confine this module to Debian based distros + ''' + return True + + +def _read_file(path): + ''' + Reads and returns the contents of a text file + ''' + try: + with salt.utils.files.fopen(path, 'rb') as contents: + return yaml.safe_load(contents.read()) + except (OSError, IOError): + return {} + + +def get_cache(opts): + ''' + Return the mount cache file location. + ''' + return os.path.join(opts['cachedir'], 'mounts') + + +def read_cache(opts): + ''' + Write the mount cache file. + ''' + cache_file = get_cache(opts) + return _read_file(cache_file) + + +def write_cache(cache, opts): + ''' + Write the mount cache file. + ''' + cache_file = get_cache(opts) + + try: + _cache = salt.utils.stringutils.to_bytes( + yaml.dump( + cache, + Dumper=SafeOrderedDumper + ) + ) + log.debug('=== cache {} ==='.format(_cache)) + with salt.utils.files.fopen(cache_file, 'wb+') as fp_: + fp_.write(_cache) + except (IOError, OSError): + log.error('Failed to cache mounts', + exc_info_on_loglevel=logging.DEBUG) From 594c324beb2b538d56552edb144a4aeca349db1e Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 28 Aug 2017 12:11:11 -0700 Subject: [PATCH 490/639] Updating unmounted state to ensure the cache is updated and mount is cleared out on unmount. --- salt/modules/mount.py | 22 +++++++++++++++++++++- salt/states/mount.py | 8 ++++++++ salt/utils/mount.py | 1 - 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index 7272f57917..1ab49c941d 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -1315,6 +1315,26 @@ def write_mount_cache(real_name, 'mkmnt': mkmnt, 'opts': opts} - log.debug('=== cache {} ==='.format(cache)) cache = salt.utils.mount.write_cache(cache, __opts__) return True + + +def delete_mount_cache(real_name): + ''' + .. versionadded:: Oxygen + + Provide information if the path is mounted + + CLI Example: + + .. code-block:: bash + + salt '*' mount.delete_mount_cache /mnt/share + ''' + cache = salt.utils.mount.read_cache(__opts__) + + if 'mounts' in cache: + if real_name in cache['mounts']: + del cache['mounts'][real_name] + cache = salt.utils.mount.write_cache(cache, __opts__) + return True diff --git a/salt/states/mount.py b/salt/states/mount.py index cab9a719e3..a37e98ee8c 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -640,6 +640,7 @@ def mounted(name, match_on=match_on) if update_mount_cache: + log.debug('=== opts {} ==='.format(opts)) cache_result = __salt__['mount.write_mount_cache'](real_name, device, mkmnt=mkmnt, @@ -785,6 +786,8 @@ def unmounted(name, 'result': True, 'comment': ''} + update_mount_cache = False + # Get the active data active = __salt__['mount.active'](extended=True) if name not in active: @@ -799,8 +802,10 @@ def unmounted(name, return ret if device: out = __salt__['mount.umount'](name, device, user=user) + update_mount_cache = True else: out = __salt__['mount.umount'](name, user=user) + update_mount_cache = True if isinstance(out, string_types): # Failed to umount, the state has failed! ret['comment'] = out @@ -813,6 +818,9 @@ def unmounted(name, ret['comment'] = 'Execute set to False, Target was not unmounted' ret['result'] = True + if update_mount_cache: + cache_result = __salt__['mount.delete_mount_cache'](name) + if persist: # Override default for Mac OS if __grains__['os'] in ['MacOS', 'Darwin'] and config == '/etc/fstab': diff --git a/salt/utils/mount.py b/salt/utils/mount.py index 86e695bd3c..a08b1caff1 100644 --- a/salt/utils/mount.py +++ b/salt/utils/mount.py @@ -65,7 +65,6 @@ def write_cache(cache, opts): Dumper=SafeOrderedDumper ) ) - log.debug('=== cache {} ==='.format(_cache)) with salt.utils.files.fopen(cache_file, 'wb+') as fp_: fp_.write(_cache) except (IOError, OSError): From b2475b1c35448be70f27e0785582cee693caa17f Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 4 Sep 2017 18:06:11 -0700 Subject: [PATCH 491/639] Adding a check in case the cache file is empty. --- salt/modules/mount.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index 1ab49c941d..fc8c7a5836 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -1303,12 +1303,20 @@ def write_mount_cache(real_name, ''' cache = salt.utils.mount.read_cache(__opts__) - if 'mounts' in cache: - cache['mounts'][real_name] = {'device': device, - 'fstype': fstype, - 'mkmnt': mkmnt, - 'opts': opts} + if cache: + if 'mounts' in cache: + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} + else: + cache['mounts'] = {} + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': opts} else: + cache = {} cache['mounts'] = {} cache['mounts'][real_name] = {'device': device, 'fstype': fstype, @@ -1333,8 +1341,9 @@ def delete_mount_cache(real_name): ''' cache = salt.utils.mount.read_cache(__opts__) - if 'mounts' in cache: - if real_name in cache['mounts']: - del cache['mounts'][real_name] - cache = salt.utils.mount.write_cache(cache, __opts__) + if cache: + if 'mounts' in cache: + if real_name in cache['mounts']: + del cache['mounts'][real_name] + cache = salt.utils.mount.write_cache(cache, __opts__) return True From b71e7e673f5371ed20157fa97626985f5f5fe2eb Mon Sep 17 00:00:00 2001 From: "Hilberding, Rob" Date: Wed, 13 Sep 2017 13:19:12 -0500 Subject: [PATCH 492/639] Made the requested changes --- salt/states/beacon.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/states/beacon.py b/salt/states/beacon.py index e26a8b4dc9..feb1931192 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -65,7 +65,7 @@ def present(name, else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True - result = __salt__['beacons.modify'](name, beacon_data, **kwargs) + result = __salt__['beacons.modify'](name, beacon_data) ret['comment'].append(result['comment']) ret['changes'] = result['changes'] else: @@ -97,7 +97,7 @@ def present(name, if save == True: result = __salt__['beacons.save']() - ret['comment'].append('Beacons saved'.format(name)) + ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret @@ -143,7 +143,7 @@ def absent(name, if save == True: result = __salt__['beacons.save']() - ret['comment'].append('Beacons saved'.format(name)) + ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret From 999deacb0aa05a62ca5e45b64e0ac355debfa571 Mon Sep 17 00:00:00 2001 From: 3add3287 <3add3287@users.noreply.github.com> Date: Wed, 13 Sep 2017 20:38:39 +0200 Subject: [PATCH 493/639] Fix indentation from tabs to spaces --- salt/modules/ssh.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 5101d9973c..86689013a8 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -768,14 +768,14 @@ def set_auth_key( try: with salt.utils.files.fopen(fconfig, 'ab+') as _fh: if new_file is False: - # Let's make sure we have a new line at the end of the file - _fh.seek(0,2) - if _fh.tell() > 0: - # File isn't empty, check if last byte is a newline - # If not, add one - _fh.seek(-1,2) - if _fh.read(1) != six.b('\n'): - _fh.write(six.b('\n')) + # Let's make sure we have a new line at the end of the file + _fh.seek(0,2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1,2) + if _fh.read(1) != six.b('\n'): + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From 810899d413943e237d46f3f6cbcfc4fe79be5505 Mon Sep 17 00:00:00 2001 From: "Hilberding, Rob" Date: Wed, 13 Sep 2017 13:44:05 -0500 Subject: [PATCH 494/639] Simplified if statement --- salt/states/beacon.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/states/beacon.py b/salt/states/beacon.py index feb1931192..64d1905dc2 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -95,7 +95,7 @@ def present(name, else: ret['comment'].append('Adding {0} to beacons'.format(name)) - if save == True: + if save: result = __salt__['beacons.save']() ret['comment'].append('Beacon {0} saved'.format(name)) @@ -141,7 +141,7 @@ def absent(name, else: ret['comment'].append('{0} not configured in beacons'.format(name)) - if save == True: + if save: result = __salt__['beacons.save']() ret['comment'].append('Beacon {0} saved'.format(name)) From 6e7149cd711977bd3da1cdb148f385582895a1d4 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 13 Sep 2017 16:29:31 -0400 Subject: [PATCH 495/639] Move salt.utils.human_to_bytes to salt.utils.stringutils --- salt/modules/purefa.py | 13 ++++++------- salt/utils/__init__.py | 25 ------------------------- salt/utils/stringutils.py | 27 +++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 32 deletions(-) diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index 279cdcd836..d39e6df09a 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -32,19 +32,18 @@ Installation Prerequisites :requires: purestorage :platform: all ''' -from __future__ import absolute_import +# Import Python libs +from __future__ import absolute_import import os import platform from datetime import datetime -# 3rd party modules -# pylint: disable=import-error,no-name-in-module,redefined-builtin +# Import Salt libs from salt.exceptions import CommandExecutionError -from salt.utils import xor, human_to_bytes -# pylint: enable=import-error,no-name-in-module - +from salt.utils import xor +# Import 3rd party modules try: import purestorage HAS_PURESTORAGE = True @@ -430,7 +429,7 @@ def volume_extend(name, size): array = _get_system() vol = _get_volume(name, array) if vol is not None: - if human_to_bytes(size) > vol['size']: + if __utils__['stringutils.human_to_bytes'](size) > vol['size']: try: array.extend_volume(name, size) return True diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 4a902651ec..c1cbd3dd09 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -3331,28 +3331,3 @@ def xor(*vars): for value in vars: sum = sum ^ bool(value) return sum - - -def human_to_bytes(size): - ''' - Given a human-readable byte string (e.g. 2G, 30M), - return the number of bytes. Will return 0 if the argument has - unexpected form. - ''' - sbytes = size[:-1] - unit = size[-1] - if sbytes.isdigit(): - sbytes = int(sbytes) - if unit == 'P': - sbytes *= 1125899906842624 - elif unit == 'T': - sbytes *= 1099511627776 - elif unit == 'G': - sbytes *= 1073741824 - elif unit == 'M': - sbytes *= 1048576 - else: - sbytes = 0 - else: - sbytes = 0 - return sbytes diff --git a/salt/utils/stringutils.py b/salt/utils/stringutils.py index fc194930b9..89e91b0be7 100644 --- a/salt/utils/stringutils.py +++ b/salt/utils/stringutils.py @@ -170,3 +170,30 @@ def contains_whitespace(text): Returns True if there are any whitespace characters in the string ''' return any(x.isspace() for x in text) + + +def human_to_bytes(size): + ''' + Given a human-readable byte string (e.g. 2G, 30M), + return the number of bytes. Will return 0 if the argument has + unexpected form. + + .. versionadded:: Oxygen + ''' + sbytes = size[:-1] + unit = size[-1] + if sbytes.isdigit(): + sbytes = int(sbytes) + if unit == 'P': + sbytes *= 1125899906842624 + elif unit == 'T': + sbytes *= 1099511627776 + elif unit == 'G': + sbytes *= 1073741824 + elif unit == 'M': + sbytes *= 1048576 + else: + sbytes = 0 + else: + sbytes = 0 + return sbytes From 82feaea913a151de037cb0020fae2b5e42907048 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 13 Sep 2017 16:49:16 -0400 Subject: [PATCH 496/639] Move salt.utils.xor to salt.utils.value --- salt/modules/purefa.py | 3 +-- salt/utils/__init__.py | 10 ---------- salt/utils/value.py | 19 +++++++++++++++++++ 3 files changed, 20 insertions(+), 12 deletions(-) create mode 100644 salt/utils/value.py diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index d39e6df09a..02f4ea049b 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -41,7 +41,6 @@ from datetime import datetime # Import Salt libs from salt.exceptions import CommandExecutionError -from salt.utils import xor # Import 3rd party modules try: @@ -967,7 +966,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True): return False else: return False - elif xor(hostgroup, host, volume): + elif __utils__['value.xor'](hostgroup, host, volume): if _get_pgroup(name, array) is None: try: array.create_pgroup(name) diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index c1cbd3dd09..02da64405b 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -3321,13 +3321,3 @@ def check_state_result(running, recurse=False, highstate=None): return salt.utils.state.check_result( running, recurse=recurse, highstate=highstate ) - - -def xor(*vars): - ''' - XOR definition for multiple variables - ''' - sum = bool(False) - for value in vars: - sum = sum ^ bool(value) - return sum diff --git a/salt/utils/value.py b/salt/utils/value.py new file mode 100644 index 0000000000..222dd10813 --- /dev/null +++ b/salt/utils/value.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +''' +Utility functions used for values. + +.. versionadded:: Oxygen +''' + +# Import Python libs +from __future__ import absolute_import + + +def xor(*variables): + ''' + XOR definition for multiple variables + ''' + sum_ = False + for value in variables: + sum_ = sum_ ^ bool(value) + return sum_ From 9df868930a1b2d31576e15a062af6b51eb363246 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 13 Sep 2017 17:37:49 -0400 Subject: [PATCH 497/639] Move salt.utils.arg_lookup to salt.utils.args.py --- salt/auth/__init__.py | 5 +++-- salt/modules/mine.py | 2 +- salt/utils/__init__.py | 36 +++++++++++++++++-------------- salt/utils/args.py | 14 ++++++++++++ tests/unit/test_auth.py | 3 ++- tests/unit/utils/test_args.py | 39 +++++++++++++++++++++++++++++++++- tests/unit/utils/test_utils.py | 34 ++--------------------------- 7 files changed, 80 insertions(+), 53 deletions(-) diff --git a/salt/auth/__init__.py b/salt/auth/__init__.py index 2b91e3d9e1..085e3bfcc4 100644 --- a/salt/auth/__init__.py +++ b/salt/auth/__init__.py @@ -29,6 +29,7 @@ import salt.config import salt.loader import salt.transport.client import salt.utils +import salt.utils.args import salt.utils.files import salt.utils.minions import salt.utils.versions @@ -69,7 +70,7 @@ class LoadAuth(object): if fstr not in self.auth: return '' try: - pname_arg = salt.utils.arg_lookup(self.auth[fstr])['args'][0] + pname_arg = salt.utils.args.arg_lookup(self.auth[fstr])['args'][0] return load[pname_arg] except IndexError: return '' @@ -642,7 +643,7 @@ class Resolver(object): 'not available').format(eauth)) return ret - args = salt.utils.arg_lookup(self.auth[fstr]) + args = salt.utils.args.arg_lookup(self.auth[fstr]) for arg in args['args']: if arg in self.opts: ret[arg] = self.opts[arg] diff --git a/salt/modules/mine.py b/salt/modules/mine.py index 063f577a44..b9ba6271cf 100644 --- a/salt/modules/mine.py +++ b/salt/modules/mine.py @@ -204,7 +204,7 @@ def send(func, *args, **kwargs): if mine_func not in __salt__: return False data = {} - arg_data = salt.utils.arg_lookup(__salt__[mine_func]) + arg_data = salt.utils.args.arg_lookup(__salt__[mine_func]) func_data = copy.deepcopy(kwargs) for ind, _ in enumerate(arg_data.get('args', [])): try: diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 4a902651ec..719ff4c3fe 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -860,7 +860,7 @@ def format_call(fun, aspec = salt.utils.args.get_function_argspec(fun, is_class_method=is_class_method) - arg_data = arg_lookup(fun, aspec) + arg_data = salt.utils.args.arg_lookup(fun, aspec) args = arg_data['args'] kwargs = arg_data['kwargs'] @@ -968,21 +968,6 @@ def format_call(fun, return ret -def arg_lookup(fun, aspec=None): - ''' - Return a dict containing the arguments and default arguments to the - function. - ''' - import salt.utils.args - ret = {'kwargs': {}} - if aspec is None: - aspec = salt.utils.args.get_function_argspec(fun) - if aspec.defaults: - ret['kwargs'] = dict(zip(aspec.args[::-1], aspec.defaults[::-1])) - ret['args'] = [arg for arg in aspec.args if arg not in ret['kwargs']] - return ret - - @jinja_filter('sorted_ignorecase') def isorted(to_sort): ''' @@ -2549,6 +2534,25 @@ def shlex_split(s, **kwargs): return salt.utils.args.shlex_split(s, **kwargs) +def arg_lookup(fun, aspec=None): + ''' + Return a dict containing the arguments and default arguments to the + function. + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.args + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.arg_lookup\' detected. This function has been ' + 'moved to \'salt.utils.args.arg_lookup\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.args.arg_lookup(fun, aspec=aspec) + + def which(exe=None): ''' Python clone of /usr/bin/which diff --git a/salt/utils/args.py b/salt/utils/args.py index 8c55f845c6..397eefc51a 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py @@ -268,3 +268,17 @@ def shlex_split(s, **kwargs): return shlex.split(s, **kwargs) else: return s + + +def arg_lookup(fun, aspec=None): + ''' + Return a dict containing the arguments and default arguments to the + function. + ''' + ret = {'kwargs': {}} + if aspec is None: + aspec = get_function_argspec(fun) + if aspec.defaults: + ret['kwargs'] = dict(zip(aspec.args[::-1], aspec.defaults[::-1])) + ret['args'] = [arg for arg in aspec.args if arg not in ret['kwargs']] + return ret diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index e25ab79c17..d08f9e71b1 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -49,7 +49,8 @@ class LoadAuthTestCase(TestCase): self.assertEqual(ret, '', "Did not bail when the auth loader didn't have the auth type.") # Test a case with valid params - with patch('salt.utils.arg_lookup', MagicMock(return_value={'args': ['username', 'password']})) as format_call_mock: + with patch('salt.utils.args.arg_lookup', + MagicMock(return_value={'args': ['username', 'password']})) as format_call_mock: expected_ret = call('fake_func_str') ret = self.lauth.load_name(valid_eauth_load) format_call_mock.assert_has_calls((expected_ret,), any_order=True) diff --git a/tests/unit/utils/test_args.py b/tests/unit/utils/test_args.py index a92851a025..8daffc7dee 100644 --- a/tests/unit/utils/test_args.py +++ b/tests/unit/utils/test_args.py @@ -5,10 +5,17 @@ from __future__ import absolute_import from collections import namedtuple # Import Salt Libs +from salt.exceptions import SaltInvocationError import salt.utils.args # Import Salt Testing Libs -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf +from tests.support.mock import ( + DEFAULT, + NO_MOCK, + NO_MOCK_REASON, + patch +) class ArgsTestCase(TestCase): @@ -45,3 +52,33 @@ class ArgsTestCase(TestCase): ret = salt.utils.args.parse_kwarg('foobar') self.assertEqual(ret, (None, None)) + + def test_arg_lookup(self): + def dummy_func(first, second, third, fourth='fifth'): + pass + + expected_dict = {'args': ['first', 'second', 'third'], 'kwargs': {'fourth': 'fifth'}} + ret = salt.utils.args.arg_lookup(dummy_func) + self.assertEqual(expected_dict, ret) + + @skipIf(NO_MOCK, NO_MOCK_REASON) + def test_format_call(self): + with patch('salt.utils.args.arg_lookup') as arg_lookup: + def dummy_func(first=None, second=None, third=None): + pass + + arg_lookup.return_value = {'args': ['first', 'second', 'third'], 'kwargs': {}} + get_function_argspec = DEFAULT + get_function_argspec.return_value = namedtuple('ArgSpec', 'args varargs keywords defaults')( + args=['first', 'second', 'third', 'fourth'], varargs=None, keywords=None, defaults=('fifth',)) + + # Make sure we raise an error if we don't pass in the requisite number of arguments + self.assertRaises(SaltInvocationError, salt.utils.format_call, dummy_func, {'1': 2}) + + # Make sure we warn on invalid kwargs + ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}) + self.assertGreaterEqual(len(ret['warnings']), 1) + + ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}, + expected_extra_kws=('first', 'second', 'third')) + self.assertDictEqual(ret, {'args': [], 'kwargs': {}}) diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 8fd89fa4aa..4879a80d95 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -9,7 +9,7 @@ from __future__ import absolute_import # Import Salt Testing libs from tests.support.unit import TestCase, skipIf from tests.support.mock import ( - patch, DEFAULT, + patch, create_autospec, NO_MOCK, NO_MOCK_REASON @@ -20,14 +20,13 @@ import salt.utils import salt.utils.jid import salt.utils.yamlencoding import salt.utils.zeromq -from salt.exceptions import (SaltInvocationError, SaltSystemExit, CommandNotFoundError) +from salt.exceptions import SaltSystemExit, CommandNotFoundError # Import Python libraries import datetime import os import yaml import zmq -from collections import namedtuple # Import 3rd-party libs try: @@ -100,35 +99,6 @@ class UtilsTestCase(TestCase): ret = salt.utils.build_whitespace_split_regex(' '.join(LOREM_IPSUM.split()[:5])) self.assertEqual(ret, expected_regex) - def test_arg_lookup(self): - def dummy_func(first, second, third, fourth='fifth'): - pass - - expected_dict = {'args': ['first', 'second', 'third'], 'kwargs': {'fourth': 'fifth'}} - ret = salt.utils.arg_lookup(dummy_func) - self.assertEqual(expected_dict, ret) - - @skipIf(NO_MOCK, NO_MOCK_REASON) - def test_format_call(self): - with patch('salt.utils.arg_lookup') as arg_lookup: - def dummy_func(first=None, second=None, third=None): - pass - arg_lookup.return_value = {'args': ['first', 'second', 'third'], 'kwargs': {}} - get_function_argspec = DEFAULT - get_function_argspec.return_value = namedtuple('ArgSpec', 'args varargs keywords defaults')( - args=['first', 'second', 'third', 'fourth'], varargs=None, keywords=None, defaults=('fifth',)) - - # Make sure we raise an error if we don't pass in the requisite number of arguments - self.assertRaises(SaltInvocationError, salt.utils.format_call, dummy_func, {'1': 2}) - - # Make sure we warn on invalid kwargs - ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}) - self.assertGreaterEqual(len(ret['warnings']), 1) - - ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}, - expected_extra_kws=('first', 'second', 'third')) - self.assertDictEqual(ret, {'args': [], 'kwargs': {}}) - def test_isorted(self): test_list = ['foo', 'Foo', 'bar', 'Bar'] expected_list = ['bar', 'Bar', 'foo', 'Foo'] From acb98bbce28eaa06c651034aa94b0cfd8c5cdab4 Mon Sep 17 00:00:00 2001 From: rallytime Date: Wed, 13 Sep 2017 18:12:52 -0400 Subject: [PATCH 498/639] Move salt.utils.argspec_report to salt.utils.args.py --- salt/client/api.py | 11 +++--- salt/modules/sysmod.py | 34 +++++++++--------- salt/utils/__init__.py | 66 ++++++++++------------------------ salt/utils/args.py | 50 ++++++++++++++++++++++++-- tests/unit/utils/test_args.py | 12 +++++++ tests/unit/utils/test_utils.py | 12 ------- 6 files changed, 102 insertions(+), 83 deletions(-) diff --git a/salt/client/api.py b/salt/client/api.py index 55a6e32728..27ad6a46f3 100644 --- a/salt/client/api.py +++ b/salt/client/api.py @@ -14,8 +14,9 @@ client applications. http://docs.saltstack.com/ref/clients/index.html ''' -from __future__ import absolute_import + # Import Python libs +from __future__ import absolute_import import os # Import Salt libs @@ -24,9 +25,9 @@ import salt.auth import salt.client import salt.runner import salt.wheel -import salt.utils +import salt.utils.args +import salt.utils.event import salt.syspaths as syspaths -from salt.utils.event import tagify from salt.exceptions import EauthAuthenticationError @@ -229,7 +230,7 @@ class APIClient(object): functions = self.wheelClient.functions elif client == u'runner': functions = self.runnerClient.functions - result = {u'master': salt.utils.argspec_report(functions, module)} + result = {u'master': salt.utils.args.argspec_report(functions, module)} return result def create_token(self, creds): @@ -322,4 +323,4 @@ class APIClient(object): Need to convert this to a master call with appropriate authentication ''' - return self.event.fire_event(data, tagify(tag, u'wui')) + return self.event.fire_event(data, salt.utils.event.tagify(tag, u'wui')) diff --git a/salt/modules/sysmod.py b/salt/modules/sysmod.py index 93a0c99321..31de9fa67b 100644 --- a/salt/modules/sysmod.py +++ b/salt/modules/sysmod.py @@ -12,8 +12,8 @@ import logging import salt.loader import salt.runner import salt.state -import salt.utils -import salt.utils.schema as S +import salt.utils.args +import salt.utils.schema from salt.utils.doc import strip_rst as _strip_rst from salt.ext.six.moves import zip @@ -450,7 +450,7 @@ def argspec(module=''): salt '*' sys.argspec 'pkg.*' ''' - return salt.utils.argspec_report(__salt__, module) + return salt.utils.args.argspec_report(__salt__, module) def state_argspec(module=''): @@ -476,7 +476,7 @@ def state_argspec(module=''): ''' st_ = salt.state.State(__opts__) - return salt.utils.argspec_report(st_.states, module) + return salt.utils.args.argspec_report(st_.states, module) def returner_argspec(module=''): @@ -502,7 +502,7 @@ def returner_argspec(module=''): ''' returners_ = salt.loader.returners(__opts__, []) - return salt.utils.argspec_report(returners_, module) + return salt.utils.args.argspec_report(returners_, module) def runner_argspec(module=''): @@ -527,7 +527,7 @@ def runner_argspec(module=''): salt '*' sys.runner_argspec 'winrepo.*' ''' run_ = salt.runner.Runner(__opts__) - return salt.utils.argspec_report(run_.functions, module) + return salt.utils.args.argspec_report(run_.functions, module) def list_state_functions(*args, **kwargs): # pylint: disable=unused-argument @@ -844,28 +844,28 @@ def _argspec_to_schema(mod, spec): } for i in args_req: - types[i] = S.OneOfItem(items=( - S.BooleanItem(title=i, description=i, required=True), - S.IntegerItem(title=i, description=i, required=True), - S.NumberItem(title=i, description=i, required=True), - S.StringItem(title=i, description=i, required=True), + types[i] = salt.utils.schema.OneOfItem(items=( + salt.utils.schema.BooleanItem(title=i, description=i, required=True), + salt.utils.schema.IntegerItem(title=i, description=i, required=True), + salt.utils.schema.NumberItem(title=i, description=i, required=True), + salt.utils.schema.StringItem(title=i, description=i, required=True), # S.ArrayItem(title=i, description=i, required=True), # S.DictItem(title=i, description=i, required=True), )) for i, j in args_defaults: - types[i] = S.OneOfItem(items=( - S.BooleanItem(title=i, description=i, default=j), - S.IntegerItem(title=i, description=i, default=j), - S.NumberItem(title=i, description=i, default=j), - S.StringItem(title=i, description=i, default=j), + types[i] = salt.utils.schema.OneOfItem(items=( + salt.utils.schema.BooleanItem(title=i, description=i, default=j), + salt.utils.schema.IntegerItem(title=i, description=i, default=j), + salt.utils.schema.NumberItem(title=i, description=i, default=j), + salt.utils.schema.StringItem(title=i, description=i, default=j), # S.ArrayItem(title=i, description=i, default=j), # S.DictItem(title=i, description=i, default=j), )) - return type(mod, (S.Schema,), types).serialize() + return type(mod, (salt.utils.schema.Schema,), types).serialize() def state_schema(module=''): diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index 719ff4c3fe..596d380ce7 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -38,7 +38,6 @@ from salt.ext import six # pylint: disable=import-error # pylint: disable=redefined-builtin from salt.ext.six.moves import range -from salt.ext.six.moves import zip # pylint: enable=import-error,redefined-builtin if six.PY3: @@ -1693,52 +1692,6 @@ def compare_lists(old=None, new=None): return ret -def argspec_report(functions, module=''): - ''' - Pass in a functions dict as it is returned from the loader and return the - argspec function signatures - ''' - import salt.utils.args - ret = {} - if '*' in module or '.' in module: - for fun in fnmatch.filter(functions, module): - try: - aspec = salt.utils.args.get_function_argspec(functions[fun]) - except TypeError: - # this happens if not callable - continue - - args, varargs, kwargs, defaults = aspec - - ret[fun] = {} - ret[fun]['args'] = args if args else None - ret[fun]['defaults'] = defaults if defaults else None - ret[fun]['varargs'] = True if varargs else None - ret[fun]['kwargs'] = True if kwargs else None - - else: - # "sys" should just match sys without also matching sysctl - moduledot = module + '.' - - for fun in functions: - if fun.startswith(moduledot): - try: - aspec = salt.utils.args.get_function_argspec(functions[fun]) - except TypeError: - # this happens if not callable - continue - - args, varargs, kwargs, defaults = aspec - - ret[fun] = {} - ret[fun]['args'] = args if args else None - ret[fun]['defaults'] = defaults if defaults else None - ret[fun]['varargs'] = True if varargs else None - ret[fun]['kwargs'] = True if kwargs else None - - return ret - - @jinja_filter('json_decode_list') def decode_list(data): ''' @@ -2553,6 +2506,25 @@ def arg_lookup(fun, aspec=None): return salt.utils.args.arg_lookup(fun, aspec=aspec) +def argspec_report(functions, module=''): + ''' + Pass in a functions dict as it is returned from the loader and return the + argspec function signatures + + .. deprecated:: Oxygen + ''' + # Late import to avoid circular import. + import salt.utils.versions + import salt.utils.args + salt.utils.versions.warn_until( + 'Neon', + 'Use of \'salt.utils.argspec_report\' detected. This function has been ' + 'moved to \'salt.utils.args.argspec_report\' as of Salt Oxygen. This ' + 'warning will be removed in Salt Neon.' + ) + return salt.utils.args.argspec_report(functions, module=module) + + def which(exe=None): ''' Python clone of /usr/bin/which diff --git a/salt/utils/args.py b/salt/utils/args.py index 397eefc51a..a91763fd6e 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py @@ -2,12 +2,13 @@ ''' Functions used for CLI argument handling ''' -from __future__ import absolute_import # Import python libs +from __future__ import absolute_import +import fnmatch +import inspect import re import shlex -import inspect # Import salt libs import salt.utils.jid @@ -282,3 +283,48 @@ def arg_lookup(fun, aspec=None): ret['kwargs'] = dict(zip(aspec.args[::-1], aspec.defaults[::-1])) ret['args'] = [arg for arg in aspec.args if arg not in ret['kwargs']] return ret + + +def argspec_report(functions, module=''): + ''' + Pass in a functions dict as it is returned from the loader and return the + argspec function signatures + ''' + ret = {} + if '*' in module or '.' in module: + for fun in fnmatch.filter(functions, module): + try: + aspec = get_function_argspec(functions[fun]) + except TypeError: + # this happens if not callable + continue + + args, varargs, kwargs, defaults = aspec + + ret[fun] = {} + ret[fun]['args'] = args if args else None + ret[fun]['defaults'] = defaults if defaults else None + ret[fun]['varargs'] = True if varargs else None + ret[fun]['kwargs'] = True if kwargs else None + + else: + # "sys" should just match sys without also matching sysctl + module_dot = module + '.' + + for fun in functions: + if fun.startswith(module_dot): + try: + aspec = get_function_argspec(functions[fun]) + except TypeError: + # this happens if not callable + continue + + args, varargs, kwargs, defaults = aspec + + ret[fun] = {} + ret[fun]['args'] = args if args else None + ret[fun]['defaults'] = defaults if defaults else None + ret[fun]['varargs'] = True if varargs else None + ret[fun]['kwargs'] = True if kwargs else None + + return ret diff --git a/tests/unit/utils/test_args.py b/tests/unit/utils/test_args.py index 8daffc7dee..7389978265 100644 --- a/tests/unit/utils/test_args.py +++ b/tests/unit/utils/test_args.py @@ -11,6 +11,7 @@ import salt.utils.args # Import Salt Testing Libs from tests.support.unit import TestCase, skipIf from tests.support.mock import ( + create_autospec, DEFAULT, NO_MOCK, NO_MOCK_REASON, @@ -82,3 +83,14 @@ class ArgsTestCase(TestCase): ret = salt.utils.format_call(dummy_func, {'first': 2, 'second': 2, 'third': 3}, expected_extra_kws=('first', 'second', 'third')) self.assertDictEqual(ret, {'args': [], 'kwargs': {}}) + + @skipIf(NO_MOCK, NO_MOCK_REASON) + def test_argspec_report(self): + def _test_spec(arg1, arg2, kwarg1=None): + pass + + sys_mock = create_autospec(_test_spec) + test_functions = {'test_module.test_spec': sys_mock} + ret = salt.utils.args.argspec_report(test_functions, 'test_module.test_spec') + self.assertDictEqual(ret, {'test_module.test_spec': + {'kwargs': True, 'args': None, 'defaults': None, 'varargs': True}}) diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py index 4879a80d95..f13e9cdf73 100644 --- a/tests/unit/utils/test_utils.py +++ b/tests/unit/utils/test_utils.py @@ -10,7 +10,6 @@ from __future__ import absolute_import from tests.support.unit import TestCase, skipIf from tests.support.mock import ( patch, - create_autospec, NO_MOCK, NO_MOCK_REASON ) @@ -353,17 +352,6 @@ class UtilsTestCase(TestCase): expected_ret = {'foo': {'new': 'woz', 'old': 'bar'}} self.assertDictEqual(ret, expected_ret) - @skipIf(NO_MOCK, NO_MOCK_REASON) - def test_argspec_report(self): - def _test_spec(arg1, arg2, kwarg1=None): - pass - - sys_mock = create_autospec(_test_spec) - test_functions = {'test_module.test_spec': sys_mock} - ret = salt.utils.argspec_report(test_functions, 'test_module.test_spec') - self.assertDictEqual(ret, {'test_module.test_spec': - {'kwargs': True, 'args': None, 'defaults': None, 'varargs': True}}) - def test_decode_list(self): test_data = [u'unicode_str', [u'unicode_item_in_list', 'second_item_in_list'], {'dict_key': u'dict_val'}] expected_ret = ['unicode_str', ['unicode_item_in_list', 'second_item_in_list'], {'dict_key': 'dict_val'}] From 7addeb4b9ccae2951489416282564b530b851fab Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Wed, 13 Sep 2017 18:35:49 -0400 Subject: [PATCH 499/639] Lint: fix spacing --- salt/modules/ssh.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/modules/ssh.py b/salt/modules/ssh.py index 86689013a8..cba2035534 100644 --- a/salt/modules/ssh.py +++ b/salt/modules/ssh.py @@ -768,14 +768,14 @@ def set_auth_key( try: with salt.utils.files.fopen(fconfig, 'ab+') as _fh: if new_file is False: - # Let's make sure we have a new line at the end of the file - _fh.seek(0,2) - if _fh.tell() > 0: - # File isn't empty, check if last byte is a newline - # If not, add one - _fh.seek(-1,2) - if _fh.read(1) != six.b('\n'): - _fh.write(six.b('\n')) + # Let's make sure we have a new line at the end of the file + _fh.seek(0, 2) + if _fh.tell() > 0: + # File isn't empty, check if last byte is a newline + # If not, add one + _fh.seek(-1, 2) + if _fh.read(1) != six.b('\n'): + _fh.write(six.b('\n')) if six.PY3: auth_line = auth_line.encode(__salt_system_encoding__) _fh.write(auth_line) From 1b5b91b077fe147d6802bc413a862c7576a91380 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 13 Sep 2017 18:49:22 -0700 Subject: [PATCH 500/639] Ensure write_cache function in utils/mount.py returns something useful. --- salt/modules/mount.py | 11 ++++++++--- salt/utils/mount.py | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index fc8c7a5836..c5b85dae5f 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -1323,8 +1323,11 @@ def write_mount_cache(real_name, 'mkmnt': mkmnt, 'opts': opts} - cache = salt.utils.mount.write_cache(cache, __opts__) - return True + cache_write = salt.utils.mount.write_cache(cache, __opts__) + if cache_write: + return True + else: + raise CommandExecutionError('Unable to write mount cache.') def delete_mount_cache(real_name): @@ -1345,5 +1348,7 @@ def delete_mount_cache(real_name): if 'mounts' in cache: if real_name in cache['mounts']: del cache['mounts'][real_name] - cache = salt.utils.mount.write_cache(cache, __opts__) + cache_write = salt.utils.mount.write_cache(cache, __opts__) + if not cache_write: + raise CommandExecutionError('Unable to write mount cache.') return True diff --git a/salt/utils/mount.py b/salt/utils/mount.py index a08b1caff1..ba782c63d5 100644 --- a/salt/utils/mount.py +++ b/salt/utils/mount.py @@ -67,6 +67,8 @@ def write_cache(cache, opts): ) with salt.utils.files.fopen(cache_file, 'wb+') as fp_: fp_.write(_cache) + return True except (IOError, OSError): log.error('Failed to cache mounts', exc_info_on_loglevel=logging.DEBUG) + return False From 05fe5bed7c4a6af1c9a171e874e72be7df2ae49d Mon Sep 17 00:00:00 2001 From: vernoncole Date: Wed, 13 Sep 2017 20:10:49 -0600 Subject: [PATCH 501/639] preflight_cmds should use passed opts --- salt/utils/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index d14698a6a4..b013c48f30 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -459,7 +459,7 @@ def bootstrap(vm_, opts=None): 'wait_for_passwd_maxtries', vm_, opts, default=15 ), 'preflight_cmds': salt.config.get_cloud_config_value( - 'preflight_cmds', vm_, __opts__, default=[] + 'preflight_cmds', vm_, opts, default=[] ), 'cloud_grains': {'driver': vm_['driver'], 'provider': vm_['provider'], From 888c853fc40a265d316b579a3fe3d9befe3223e5 Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 14 Sep 2017 13:40:39 +0200 Subject: [PATCH 502/639] highstate output minor cleanup and code duplication removal --- salt/output/highstate.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/salt/output/highstate.py b/salt/output/highstate.py index 60adeb961e..f58fc86f84 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -242,8 +242,15 @@ def _format_host(host, data): if ret['result'] is None: hcolor = colors['LIGHT_YELLOW'] tcolor = colors['LIGHT_YELLOW'] + + state_output = __opts__.get('state_output', 'full').lower() comps = [sdecode(comp) for comp in tname.split('_|-')] - if __opts__.get('state_output', 'full').lower() == 'filter': + + if state_output == 'mixed_id': + # Swap in the ID for the name. Refs #35137 + comps[2] = comps[1] + + if state_output.startswith('filter'): # By default, full data is shown for all types. However, return # data may be excluded by setting state_output_exclude to a # comma-separated list of True, False or None, or including the @@ -276,28 +283,17 @@ def _format_host(host, data): continue if str(ret['result']) in exclude: continue - elif __opts__.get('state_output', 'full').lower() == 'terse': - # Print this chunk in a terse way and continue in the - # loop - msg = _format_terse(tcolor, comps, ret, colors, tabular) - hstrs.append(msg) - continue - elif __opts__.get('state_output', 'full').lower().startswith('mixed'): - if __opts__['state_output'] == 'mixed_id': - # Swap in the ID for the name. Refs #35137 - comps[2] = comps[1] - # Print terse unless it failed - if ret['result'] is not False: - msg = _format_terse(tcolor, comps, ret, colors, tabular) - hstrs.append(msg) - continue - elif __opts__.get('state_output', 'full').lower() == 'changes': - # Print terse if no error and no changes, otherwise, be - # verbose - if ret['result'] and not schanged: + + elif any(( + state_output.startswith('terse'), + state_output.startswith('mixed') and ret['result'] is not False, # only non-error'd + state_output.startswith('changes') and ret['result'] and not schanged # non-error'd non-changed + )): + # Print this chunk in a terse way and continue in the loop msg = _format_terse(tcolor, comps, ret, colors, tabular) hstrs.append(msg) continue + state_lines = [ u'{tcolor}----------{colors[ENDC]}', u' {tcolor} ID: {comps[1]}{colors[ENDC]}', From e1428dec5f68e7546ee8c69a779b1d7bc7247c5f Mon Sep 17 00:00:00 2001 From: Robin Lutz Date: Thu, 14 Sep 2017 14:21:31 +0200 Subject: [PATCH 503/639] corrected some PEP8 problems reported by jenkins --- tests/unit/modules/test_portage_config.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/unit/modules/test_portage_config.py b/tests/unit/modules/test_portage_config.py index bdff520f45..3ed708b0ac 100644 --- a/tests/unit/modules/test_portage_config.py +++ b/tests/unit/modules/test_portage_config.py @@ -23,8 +23,6 @@ import salt.modules.portage_config as portage_config @skipIf(NO_MOCK, NO_MOCK_REASON) class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): class DummyAtom(object): - - def __init__(self): self.cp = None self.repo = None @@ -39,11 +37,11 @@ class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): atom, self.repo = atom.split('::') if '::' in atom else (atom, None) # remove '>, >=, <=, =, ~' etc. - atom = re.sub('[<>~+=]', '', atom) + atom = re.sub(r'[<>~+=]', '', atom) # remove slots - atom = re.sub(':[0-9][^:]*', '', atom) + atom = re.sub(r':[0-9][^:]*', '', atom) # remove version - atom = re.sub('-[0-9][\.0-9]*', '', atom) + atom = re.sub(r'-[0-9][\.0-9]*', '', atom) self.cp = atom return self @@ -52,7 +50,7 @@ class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): try: import portage return {} - except: + except ImportError: dummy_atom = self.DummyAtom() self.portage = MagicMock() self.portage.dep.Atom = MagicMock(side_effect=dummy_atom) @@ -121,5 +119,4 @@ class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): for line in fh: self.assertTrue(atom in line, msg="'{}' not in '{}'".format(addition, line)) for addition in additions: - self.assertTrue(addition in line, msg="'{}' not in '{}'".format(addition, line)) - + self.assertTrue(addition in line, msg="'{}' not in '{}'".format(addition, line)) \ No newline at end of file From efb23cd71790759e280a7db29ad90d239a571d80 Mon Sep 17 00:00:00 2001 From: Ronald van Zantvoort Date: Thu, 14 Sep 2017 15:32:32 +0200 Subject: [PATCH 504/639] highstate output pylint fix --- salt/output/highstate.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/output/highstate.py b/salt/output/highstate.py index f58fc86f84..c003cfc32c 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -288,11 +288,11 @@ def _format_host(host, data): state_output.startswith('terse'), state_output.startswith('mixed') and ret['result'] is not False, # only non-error'd state_output.startswith('changes') and ret['result'] and not schanged # non-error'd non-changed - )): - # Print this chunk in a terse way and continue in the loop - msg = _format_terse(tcolor, comps, ret, colors, tabular) - hstrs.append(msg) - continue + )): + # Print this chunk in a terse way and continue in the loop + msg = _format_terse(tcolor, comps, ret, colors, tabular) + hstrs.append(msg) + continue state_lines = [ u'{tcolor}----------{colors[ENDC]}', From aee52a20e0ca58f51047aa90b5870ad12d27b6bb Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Thu, 14 Sep 2017 09:48:09 -0400 Subject: [PATCH 505/639] Lint: Add empty line at end of file --- tests/unit/modules/test_portage_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_portage_config.py b/tests/unit/modules/test_portage_config.py index 3ed708b0ac..02a76b63d2 100644 --- a/tests/unit/modules/test_portage_config.py +++ b/tests/unit/modules/test_portage_config.py @@ -119,4 +119,4 @@ class PortageConfigTestCase(TestCase, LoaderModuleMockMixin): for line in fh: self.assertTrue(atom in line, msg="'{}' not in '{}'".format(addition, line)) for addition in additions: - self.assertTrue(addition in line, msg="'{}' not in '{}'".format(addition, line)) \ No newline at end of file + self.assertTrue(addition in line, msg="'{}' not in '{}'".format(addition, line)) From dcd3dcc734fd16c9a6069545aa4f6c7ddbc57b19 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 14 Sep 2017 10:15:20 -0400 Subject: [PATCH 506/639] Lint: Add missing import to utils.args.py --- salt/utils/args.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/utils/args.py b/salt/utils/args.py index a91763fd6e..7d47c52862 100644 --- a/salt/utils/args.py +++ b/salt/utils/args.py @@ -11,11 +11,10 @@ import re import shlex # Import salt libs -import salt.utils.jid from salt.exceptions import SaltInvocationError - -# Import 3rd-party libs from salt.ext import six +from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin +import salt.utils.jid if six.PY3: From e24513f0f367b6f8e84e592ae6390ed715fb7185 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 04:45:44 -0400 Subject: [PATCH 507/639] Added salt.vsphere._get_proxy_target that retrieved the vCenter target from the proxy details --- salt/modules/vsphere.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index a24bc0ee51..fe3e2f0750 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4288,6 +4288,39 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, return ret +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter') +def _get_proxy_target(service_instance): + ''' + Returns the target object of a proxy. + + If the object doesn't exist a VMwareObjectRetrievalError is raised + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + ''' + proxy_type = get_proxy_type() + if not salt.utils.vmware.is_connection_to_a_vcenter(service_instance): + raise CommandExecutionError('\'_get_proxy_target\' not supported ' + 'when connected via the ESXi host') + reference = None + if proxy_type == 'esxcluster': + host, username, password, protocol, port, mechanism, principal, \ + domain, datacenter, cluster = _get_esxcluster_proxy_details() + + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + reference = salt.utils.vmware.get_cluster(dc_ref, cluster) + elif proxy_type == 'esxdatacenter': + # esxdatacenter proxy + host, username, password, protocol, port, mechanism, principal, \ + domain, datacenter = _get_esxdatacenter_proxy_details() + + reference = salt.utils.vmware.get_datacenter(service_instance, + datacenter) + log.trace('reference = {0}'.format(reference)) + return reference + + def _get_esxdatacenter_proxy_details(): ''' Returns the running esxdatacenter's proxy details From 15a2499d278ebe8d17aa9bfa86a5988bde7905b0 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 04:47:22 -0400 Subject: [PATCH 508/639] Added tests for salt.vsphere._get_proxy_target --- tests/unit/modules/test_vsphere.py | 87 ++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index af5f241cdc..3162251f5b 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -1151,3 +1151,90 @@ class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin): def test_returned_value(self): res = vsphere.create_datacenter('fake_dc1') self.assertEqual(res, {'create_datacenter': True}) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.modules.vsphere._get_proxy_target''' + def setup_loader_modules(self): + return { + vsphere: { + '__virtual__': MagicMock(return_value='vsphere'), + '_get_proxy_connection_details': MagicMock(), + 'get_proxy_type': MagicMock(return_value='esxdatacenter') + } + } + + def setUp(self): + attrs = (('mock_si', MagicMock()), + ('mock_dc', MagicMock()), + ('mock_cl', MagicMock())) + for attr, mock_obj in attrs: + setattr(self, attr, mock_obj) + self.addCleanup(delattr, self, attr) + attrs = (('mock_get_datacenter', MagicMock(return_value=self.mock_dc)), + ('mock_get_cluster', MagicMock(return_value=self.mock_cl))) + for attr, mock_obj in attrs: + setattr(self, attr, mock_obj) + self.addCleanup(delattr, self, attr) + patches = ( + ('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxcluster')), + ('salt.utils.vmware.is_connection_to_a_vcenter', + MagicMock(return_value=True)), + ('salt.modules.vsphere._get_esxcluster_proxy_details', + MagicMock(return_value=(None, None, None, None, None, None, None, + None, 'datacenter', 'cluster'))), + ('salt.modules.vsphere._get_esxdatacenter_proxy_details', + MagicMock(return_value=(None, None, None, None, None, None, None, + None, 'datacenter'))), + ('salt.utils.vmware.get_datacenter', self.mock_get_datacenter), + ('salt.utils.vmware.get_cluster', self.mock_get_cluster)) + for module, mock_obj in patches: + patcher = patch(module, mock_obj) + patcher.start() + self.addCleanup(patcher.stop) + + def test_supported_proxies(self): + supported_proxies = ['esxcluster', 'esxdatacenter'] + for proxy_type in supported_proxies: + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value=proxy_type)): + vsphere._get_proxy_target(self.mock_si) + + def test_connected_to_esxi(self): + with patch('salt.utils.vmware.is_connection_to_a_vcenter', + MagicMock(return_value=False)): + with self.assertRaises(CommandExecutionError) as excinfo: + vsphere._get_proxy_target(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + '\'_get_proxy_target\' not supported when ' + 'connected via the ESXi host') + + def test_get_cluster_call(self): + #with patch('salt.modules.vsphere.get_proxy_type', + # MagicMock(return_value='esxcluster')): + vsphere._get_proxy_target(self.mock_si) + self.mock_get_datacenter.assert_called_once_with(self.mock_si, + 'datacenter') + self.mock_get_cluster.assert_called_once_with(self.mock_dc, 'cluster') + + def test_esxcluster_proxy_return(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxcluster')): + ret = vsphere._get_proxy_target(self.mock_si) + self.assertEqual(ret, self.mock_cl) + + def test_get_datacenter_call(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxdatacenter')): + vsphere._get_proxy_target(self.mock_si) + self.mock_get_datacenter.assert_called_once_with(self.mock_si, + 'datacenter') + self.assertEqual(self.mock_get_cluster.call_count, 0) + + def test_esxdatacenter_proxy_return(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxdatacenter')): + ret = vsphere._get_proxy_target(self.mock_si) + self.assertEqual(ret, self.mock_dc) From e6ab4d5690f2ffd70f8f3e5b7ad7d8b843bb01e3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 04:50:51 -0400 Subject: [PATCH 509/639] Added modules.vsphere._get_cluster_dict that converts a cluster VMware object to a dictionary --- salt/modules/vsphere.py | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index fe3e2f0750..4419b55722 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3661,6 +3661,88 @@ def create_datacenter(datacenter_name, service_instance=None): return {'create_datacenter': True} +def _get_cluster_dict(cluster_name, cluster_ref): + ''' + Returns a cluster dict representation from + a vim.ClusterComputeResource object. + + cluster_name + Name of the cluster + + cluster_ref + Reference to the cluster + ''' + + log.trace('Building a dictionary representation of cluster ' + '\'{0}\''.format(cluster_name)) + props = salt.utils.vmware.get_properties_of_managed_object( + cluster_ref, + properties=['configurationEx']) + res = {'ha': {'enabled': props['configurationEx'].dasConfig.enabled}, + 'drs': {'enabled': props['configurationEx'].drsConfig.enabled}} + # Convert HA properties of interest + ha_conf = props['configurationEx'].dasConfig + log.trace('ha_conf = {0}'.format(ha_conf)) + res['ha']['admission_control_enabled'] = ha_conf.admissionControlEnabled + if ha_conf.admissionControlPolicy and \ + isinstance(ha_conf.admissionControlPolicy, + vim.ClusterFailoverResourcesAdmissionControlPolicy): + pol = ha_conf.admissionControlPolicy + res['ha']['admission_control_policy'] = \ + {'cpu_failover_percent': pol.cpuFailoverResourcesPercent, + 'memory_failover_percent': pol.memoryFailoverResourcesPercent} + if ha_conf.defaultVmSettings: + def_vm_set = ha_conf.defaultVmSettings + res['ha']['default_vm_settings'] = \ + {'isolation_response': def_vm_set.isolationResponse, + 'restart_priority': def_vm_set.restartPriority} + res['ha']['hb_ds_candidate_policy'] = \ + ha_conf.hBDatastoreCandidatePolicy + if ha_conf.hostMonitoring: + res['ha']['host_monitoring'] = ha_conf.hostMonitoring + if ha_conf.option: + res['ha']['options'] = [{'key': o.key, 'value': o.value} + for o in ha_conf.option] + res['ha']['vm_monitoring'] = ha_conf.vmMonitoring + # Convert DRS properties + drs_conf = props['configurationEx'].drsConfig + log.trace('drs_conf = {0}'.format(drs_conf)) + res['drs']['vmotion_rate'] = 6 - drs_conf.vmotionRate + res['drs']['default_vm_behavior'] = drs_conf.defaultVmBehavior + # vm_swap_placement + res['vm_swap_placement'] = props['configurationEx'].vmSwapPlacement + # Convert VSAN properties + si = salt.utils.vmware.get_service_instance_from_managed_object( + cluster_ref) + + if salt.utils.vsan.vsan_supported(si): + # XXX The correct way of retrieving the VSAN data (on the if branch) + # is not supported before 60u2 vcenter + vcenter_info = salt.utils.vmware.get_service_info(si) + if int(vcenter_info.build) >= 3634794: # 60u2 + # VSAN API is fully supported by the VC starting with 60u2 + vsan_conf = salt.utils.vsan.get_cluster_vsan_info(cluster_ref) + log.trace('vsan_conf = {0}'.format(vsan_conf)) + res['vsan'] = {'enabled': vsan_conf.enabled, + 'auto_claim_storage': + vsan_conf.defaultConfig.autoClaimStorage} + if vsan_conf.dataEfficiencyConfig: + data_eff = vsan_conf.dataEfficiencyConfig + res['vsan'].update({ + # We force compression_enabled to be True/False + 'compression_enabled': + data_eff.compressionEnabled or False, + 'dedup_enabled': data_eff.dedupEnabled}) + else: # before 60u2 (no advanced vsan info) + if props['configurationEx'].vsanConfigInfo: + default_config = \ + props['configurationEx'].vsanConfigInfo.defaultConfig + res['vsan'] = { + 'enabled': props['configurationEx'].vsanConfigInfo.enabled, + 'auto_claim_storage': default_config.autoClaimStorage} + return res + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 75abf0a2a9cd65e2478075746777e93c699a66dd Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 14 Sep 2017 11:01:06 -0400 Subject: [PATCH 510/639] Test fix: update mock util path --- tests/unit/modules/test_genesis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_genesis.py b/tests/unit/modules/test_genesis.py index 0ab9eb03d9..ae9f3e7b16 100644 --- a/tests/unit/modules/test_genesis.py +++ b/tests/unit/modules/test_genesis.py @@ -93,7 +93,7 @@ class GenesisTestCase(TestCase, LoaderModuleMockMixin): 'file.directory_exists': MagicMock(), 'cmd.run': MagicMock(), 'disk.blkid': MagicMock(return_value={})}): - with patch('salt.modules.genesis.salt.utils.which', return_value=True): + with patch('salt.modules.genesis.salt.utils.path.which', return_value=True): with patch('salt.modules.genesis.salt.utils.validate.path.is_executable', return_value=True): param_set['params'].update(common_parms) From 728690b783e5eeb9663c747f27c24d2b07dc60fe Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 04:52:00 -0400 Subject: [PATCH 511/639] Added modules.vsphere.list_cluster that lists a dict representation of a VMware cluster --- salt/modules/vsphere.py | 50 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 4419b55722..99289326ef 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -176,7 +176,9 @@ import salt.utils.dictupdate as dictupdate import salt.utils.http import salt.utils.path import salt.utils.vmware -from salt.exceptions import CommandExecutionError, VMwareSaltError +import salt.utils.vsan +from salt.exceptions import CommandExecutionError, VMwareSaltError, \ + ArgumentValueError from salt.utils.decorators import depends, ignores_kwargs # Import Third Party Libs @@ -3743,6 +3745,52 @@ def _get_cluster_dict(cluster_name, cluster_ref): return res +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def list_cluster(datacenter=None, cluster=None, service_instance=None): + ''' + Returns a dict representation of an ESX cluster. + + datacenter + Name of datacenter containing the cluster. + Ignored if already contained by proxy details. + Default value is None. + + cluster + Name of cluster. + Ignored if already contained by proxy details. + Default value is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + # vcenter proxy + salt '*' vsphere.list_cluster datacenter=dc1 cluster=cl1 + + # esxdatacenter proxy + salt '*' vsphere.list_cluster cluster=cl1 + + # esxcluster proxy + salt '*' vsphere.list_cluster + ''' + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + dc_ref = _get_proxy_target(service_instance) + if not cluster: + raise ArgumentValueError('\'cluster\' needs to be specified') + cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster) + elif proxy_type == 'esxcluster': + cluster_ref = _get_proxy_target(service_instance) + cluster = __salt__['esxcluster.get_details']()['cluster'] + log.trace('Retrieving representation of cluster \'{0}\' in a ' + '{1} proxy'.format(cluster, proxy_type)) + return _get_cluster_dict(cluster, cluster_ref) + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From c3d6af502b3c9e17688915af08b84300c0b43818 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 04:53:18 -0400 Subject: [PATCH 512/639] Added tests for modules.vsphere.list_cluster --- tests/unit/modules/test_vsphere.py | 92 +++++++++++++++++++++++++++++- 1 file changed, 91 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index 3162251f5b..ce9f813094 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -11,7 +11,8 @@ from __future__ import absolute_import # Import Salt Libs import salt.modules.vsphere as vsphere -from salt.exceptions import CommandExecutionError, VMwareSaltError +from salt.exceptions import CommandExecutionError, VMwareSaltError, \ + ArgumentValueError # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -1153,6 +1154,95 @@ class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(res, {'create_datacenter': True}) +@skipIf(NO_MOCK, NO_MOCK_REASON) +class ListClusterTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.modules.vsphere.list_cluster''' + def setup_loader_modules(self): + return { + vsphere: { + '__virtual__': MagicMock(return_value='vsphere'), + '_get_proxy_connection_details': MagicMock(), + '__salt__': {} + } + } + + def setUp(self): + attrs = (('mock_si', MagicMock()), + ('mock_dc', MagicMock()), + ('mock_cl', MagicMock()), + ('mock__get_cluster_dict', MagicMock())) + for attr, mock_obj in attrs: + setattr(self, attr, mock_obj) + self.addCleanup(delattr, self, attr) + attrs = (('mock_get_cluster', MagicMock(return_value=self.mock_cl)),) + for attr, mock_obj in attrs: + setattr(self, attr, mock_obj) + self.addCleanup(delattr, self, attr) + patches = ( + ('salt.utils.vmware.get_service_instance', + MagicMock(return_value=self.mock_si)), + ('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxcluster')), + ('salt.modules.vsphere._get_proxy_target', + MagicMock(return_value=self.mock_cl)), + ('salt.utils.vmware.get_cluster', self.mock_get_cluster), + ('salt.modules.vsphere._get_cluster_dict', + self.mock__get_cluster_dict)) + for module, mock_obj in patches: + patcher = patch(module, mock_obj) + patcher.start() + self.addCleanup(patcher.stop) + # Patch __salt__ dunder + patcher = patch.dict(vsphere.__salt__, + {'esxcluster.get_details': + MagicMock(return_value={'cluster': 'cl'})}) + patcher.start() + self.addCleanup(patcher.stop) + + def test_supported_proxies(self): + supported_proxies = ['esxcluster', 'esxdatacenter'] + for proxy_type in supported_proxies: + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value=proxy_type)): + vsphere.list_cluster(cluster='cl') + + def test_default_service_instance(self): + mock__get_proxy_target = MagicMock() + with patch('salt.modules.vsphere._get_proxy_target', + mock__get_proxy_target): + vsphere.list_cluster() + mock__get_proxy_target.assert_called_once_with(self.mock_si) + + def test_defined_service_instance(self): + mock_si = MagicMock() + mock__get_proxy_target = MagicMock() + with patch('salt.modules.vsphere._get_proxy_target', + mock__get_proxy_target): + vsphere.list_cluster(service_instance=mock_si) + mock__get_proxy_target.assert_called_once_with(mock_si) + + def test_no_cluster_raises_argument_value_error(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxdatacenter')): + with patch('salt.modules.vsphere._get_proxy_target', MagicMock()): + with self.assertRaises(ArgumentValueError) as excinfo: + vsphere.list_cluster() + self.assertEqual(excinfo.exception.strerror, + '\'cluster\' needs to be specified') + + def test_get_cluster_call(self): + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value='esxdatacenter')): + with patch('salt.modules.vsphere._get_proxy_target', + MagicMock(return_value=self.mock_dc)): + vsphere.list_cluster(cluster='cl') + self.mock_get_cluster.assert_called_once_with(self.mock_dc, 'cl') + + def test__get_cluster_dict_call(self): + vsphere.list_cluster() + self.mock__get_cluster_dict.assert_called_once_with('cl', self.mock_cl) + + @skipIf(NO_MOCK, NO_MOCK_REASON) class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): '''Tests for salt.modules.vsphere._get_proxy_target''' From 324883993f846dd48f59c93101f74ac7ad5911e6 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 05:14:36 -0400 Subject: [PATCH 513/639] Added external VMware VSAN libraries --- salt/ext/vsan/__init__.py | 7 ++ salt/ext/vsan/vsanapiutils.py | 165 +++++++++++++++++++++++++++++++ salt/ext/vsan/vsanmgmtObjects.py | 142 ++++++++++++++++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 salt/ext/vsan/__init__.py create mode 100644 salt/ext/vsan/vsanapiutils.py create mode 100644 salt/ext/vsan/vsanmgmtObjects.py diff --git a/salt/ext/vsan/__init__.py b/salt/ext/vsan/__init__.py new file mode 100644 index 0000000000..84c0a7eb58 --- /dev/null +++ b/salt/ext/vsan/__init__.py @@ -0,0 +1,7 @@ +# coding: utf-8 -*- +''' +This directory contains the object model and utils for the vsan VMware SDK +extension. + +They are governed under their respective licenses. +''' diff --git a/salt/ext/vsan/vsanapiutils.py b/salt/ext/vsan/vsanapiutils.py new file mode 100644 index 0000000000..2e4c79a498 --- /dev/null +++ b/salt/ext/vsan/vsanapiutils.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright 2016 VMware, Inc. All rights reserved. + +This module defines basic helper functions used in the sampe codes +""" + +__author__ = 'VMware, Inc' + +from pyVmomi import vim, vmodl, SoapStubAdapter +#import the VSAN API python bindings +import vsanmgmtObjects + +VSAN_API_VC_SERVICE_ENDPOINT = '/vsanHealth' +VSAN_API_ESXI_SERVICE_ENDPOINT = '/vsan' + +#Constuct a stub for VSAN API access using VC or ESXi sessions from existing +#stubs. Correspoding VC or ESXi service endpoint is required. VC service +#endpoint is used as default +def _GetVsanStub( + stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT, + context=None, version='vim.version.version10' + ): + + hostname = stub.host.split(':')[0] + vsanStub = SoapStubAdapter( + host=hostname, + path=endpoint, + version=version, + sslContext=context + ) + vsanStub.cookie = stub.cookie + return vsanStub + +#Construct a stub for access VC side VSAN APIs +def GetVsanVcStub(stub, context=None): + return _GetVsanStub(stub, endpoint=VSAN_API_VC_SERVICE_ENDPOINT, + context=context) + +#Construct a stub for access ESXi side VSAN APIs +def GetVsanEsxStub(stub, context=None): + return _GetVsanStub(stub, endpoint=VSAN_API_ESXI_SERVICE_ENDPOINT, + context=context) + +#Construct a stub for access ESXi side VSAN APIs +def GetVsanVcMos(vcStub, context=None): + vsanStub = GetVsanVcStub(vcStub, context) + vcMos = { + 'vsan-disk-management-system' : vim.cluster.VsanVcDiskManagementSystem( + 'vsan-disk-management-system', + vsanStub + ), + 'vsan-stretched-cluster-system' : vim.cluster.VsanVcStretchedClusterSystem( + 'vsan-stretched-cluster-system', + vsanStub + ), + 'vsan-cluster-config-system' : vim.cluster.VsanVcClusterConfigSystem( + 'vsan-cluster-config-system', + vsanStub + ), + 'vsan-performance-manager' : vim.cluster.VsanPerformanceManager( + 'vsan-performance-manager', + vsanStub + ), + 'vsan-cluster-health-system' : vim.cluster.VsanVcClusterHealthSystem( + 'vsan-cluster-health-system', + vsanStub + ), + 'vsan-upgrade-systemex' : vim.VsanUpgradeSystemEx( + 'vsan-upgrade-systemex', + vsanStub + ), + 'vsan-cluster-space-report-system' : vim.cluster.VsanSpaceReportSystem( + 'vsan-cluster-space-report-system', + vsanStub + ), + + 'vsan-cluster-object-system' : vim.cluster.VsanObjectSystem( + 'vsan-cluster-object-system', + vsanStub + ), + } + + return vcMos + +#Construct a stub for access ESXi side VSAN APIs +def GetVsanEsxMos(esxStub, context=None): + vsanStub = GetVsanEsxStub(esxStub, context) + esxMos = { + 'vsan-performance-manager' : vim.cluster.VsanPerformanceManager( + 'vsan-performance-manager', + vsanStub + ), + 'ha-vsan-health-system' : vim.host.VsanHealthSystem( + 'ha-vsan-health-system', + vsanStub + ), + 'vsan-object-system' : vim.cluster.VsanObjectSystem( + 'vsan-object-system', + vsanStub + ), + } + + return esxMos + +#Convert a VSAN Task to a Task MO binding to VC service +#@param vsanTask the VSAN Task MO +#@param stub the stub for the VC API +def ConvertVsanTaskToVcTask(vsanTask, vcStub): + vcTask = vim.Task(vsanTask._moId, vcStub) + return vcTask + +def WaitForTasks(tasks, si): + """ + Given the service instance si and tasks, it returns after all the + tasks are complete + """ + + pc = si.content.propertyCollector + + taskList = [str(task) for task in tasks] + + # Create filter + objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task) + for task in tasks] + propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, + pathSet=[], all=True) + filterSpec = vmodl.query.PropertyCollector.FilterSpec() + filterSpec.objectSet = objSpecs + filterSpec.propSet = [propSpec] + filter = pc.CreateFilter(filterSpec, True) + + try: + version, state = None, None + + # Loop looking for updates till the state moves to a completed state. + while len(taskList): + update = pc.WaitForUpdates(version) + for filterSet in update.filterSet: + for objSet in filterSet.objectSet: + task = objSet.obj + for change in objSet.changeSet: + if change.name == 'info': + state = change.val.state + elif change.name == 'info.state': + state = change.val + else: + continue + + if not str(task) in taskList: + continue + + if state == vim.TaskInfo.State.success: + # Remove task from taskList + taskList.remove(str(task)) + elif state == vim.TaskInfo.State.error: + raise task.info.error + # Move to next version + version = update.version + finally: + if filter: + filter.Destroy() + diff --git a/salt/ext/vsan/vsanmgmtObjects.py b/salt/ext/vsan/vsanmgmtObjects.py new file mode 100644 index 0000000000..ebad265adb --- /dev/null +++ b/salt/ext/vsan/vsanmgmtObjects.py @@ -0,0 +1,142 @@ +from pyVmomi.VmomiSupport import CreateDataType, CreateManagedType, CreateEnumType, AddVersion, AddVersionParent, F_LINK, F_LINKABLE, F_OPTIONAL + +CreateManagedType('vim.cluster.VsanPerformanceManager', 'VsanPerformanceManager', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setStatsObjectPolicy', 'VsanPerfSetStatsObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('deleteStatsObject', 'VsanPerfDeleteStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('createStatsObjectTask', 'VsanPerfCreateStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('deleteStatsObjectTask', 'VsanPerfDeleteStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterHealth', 'VsanPerfQueryClusterHealth', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vmodl.DynamicData[]', 'vmodl.DynamicData[]'), 'System.Read', None), ('queryStatsObjectInformation', 'VsanPerfQueryStatsObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanObjectInformation', 'vim.cluster.VsanObjectInformation'), 'System.Read', None), ('queryNodeInformation', 'VsanPerfQueryNodeInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfNodeInformation[]', 'vim.cluster.VsanPerfNodeInformation[]'), 'System.Read', None), ('queryVsanPerf', 'VsanPerfQueryPerf', 'vim.version.version9', (('querySpecs', 'vim.cluster.VsanPerfQuerySpec[]', 'vim.version.version9', 0, None), ('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanPerfEntityMetricCSV[]', 'vim.cluster.VsanPerfEntityMetricCSV[]'), 'System.Read', None), ('getSupportedEntityTypes', 'VsanPerfGetSupportedEntityTypes', 'vim.version.version9', tuple(), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfEntityType[]', 'vim.cluster.VsanPerfEntityType[]'), 'System.Read', None), ('createStatsObject', 'VsanPerfCreateStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcDiskManagementSystem', 'VimClusterVsanVcDiskManagementSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('initializeDiskMappings', 'InitializeDiskMappings', 'vim.version.version10', (('spec', 'vim.vsan.host.DiskMappingCreationSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('retrieveAllFlashCapabilities', 'RetrieveAllFlashCapabilities', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.VsanHostCapability[]', 'vim.vsan.host.VsanHostCapability[]'), 'System.Read', None), ('queryDiskMappings', 'QueryDiskMappings', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.DiskMapInfoEx[]', 'vim.vsan.host.DiskMapInfoEx[]'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanObjectSystem', 'VsanObjectSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setVsanObjectPolicy', 'VosSetVsanObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectUuid', 'string', 'vim.version.version9', 0, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryObjectIdentities', 'VsanQueryObjectIdentities', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeHealth', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjIdentity', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeSpaceSummary', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanObjectIdentityAndHealth', 'vim.cluster.VsanObjectIdentityAndHealth'), 'System.Read', None), ('queryVsanObjectInformation', 'VosQueryVsanObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectQuerySpecs', 'vim.cluster.VsanObjectQuerySpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanObjectInformation[]', 'vim.cluster.VsanObjectInformation[]'), 'System.Read', None), ]) +CreateManagedType('vim.host.VsanStretchedClusterSystem', 'VimHostVsanStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getStretchedClusterInfoFromCmmds', 'VSANHostGetStretchedClusterInfoFromCmmds', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANStretchedClusterHostInfo[]', 'vim.host.VSANStretchedClusterHostInfo[]'), 'System.Read', None), ('witnessJoinVsanCluster', 'VSANWitnessJoinVsanCluster', 'vim.version.version10', (('clusterUuid', 'string', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('disableVsanAllowed', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('witnessSetPreferredFaultDomain', 'VSANWitnessSetPreferredFaultDomain', 'vim.version.version10', (('preferredFd', 'string', 'vim.version.version10', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('addUnicastAgent', 'VSANHostAddUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('witnessPort', 'int', 'vim.version.version10', 0 | F_OPTIONAL, None), ('overwrite', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('clusterGetPreferredFaultDomain', 'VSANClusterGetPreferredFaultDomain', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo'), 'System.Read', None), ('witnessLeaveVsanCluster', 'VSANWitnessLeaveVsanCluster', 'vim.version.version10', tuple(), (0, 'void', 'void'), 'System.Read', None), ('getStretchedClusterCapability', 'VSANHostGetStretchedClusterCapability', 'vim.version.version10', tuple(), (0, 'vim.host.VSANStretchedClusterHostCapability', 'vim.host.VSANStretchedClusterHostCapability'), 'System.Read', None), ('removeUnicastAgent', 'VSANHostRemoveUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('ignoreExistence', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('listUnicastAgent', 'VSANHostListUnicastAgent', 'vim.version.version10', tuple(), (0, 'string', 'string'), 'System.Read', None), ]) +CreateManagedType('vim.VsanUpgradeSystemEx', 'VsanUpgradeSystemEx', 'vmodl.ManagedObject', 'vim.version.version10', [], [('performUpgrade', 'PerformVsanUpgradeEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('performObjectUpgrade', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('excludeHosts', 'vim.HostSystem[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('performUpgradePreflightCheck', 'PerformVsanUpgradePreflightCheckEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanDiskFormatConversionCheckResult', 'vim.cluster.VsanDiskFormatConversionCheckResult'), 'System.Read', None), ('retrieveSupportedFormatVersion', 'RetrieveSupportedVsanFormatVersion', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanCapabilitySystem', 'VsanCapabilitySystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getCapabilities', 'VsanGetCapabilities', 'vim.version.version10', (('targets', 'vmodl.ManagedObject[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanCapability[]', 'vim.cluster.VsanCapability[]'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanSpaceReportSystem', 'VsanSpaceReportSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('querySpaceUsage', 'VsanQuerySpaceUsage', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanSpaceUsage', 'vim.cluster.VsanSpaceUsage'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcClusterConfigSystem', 'VsanVcClusterConfigSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getConfigInfoEx', 'VsanClusterGetConfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'vim.vsan.ConfigInfoEx', 'vim.vsan.ConfigInfoEx'), 'System.Read', None), ('reconfigureEx', 'VsanClusterReconfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('vsanReconfigSpec', 'vim.vsan.ReconfigSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) +CreateManagedType('vim.host.VsanHealthSystem', 'HostVsanHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryAdvCfg', 'VsanHostQueryAdvCfg', 'vim.version.version9', (('options', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.option.OptionValue[]', 'vim.option.OptionValue[]'), 'System.Read', None), ('queryPhysicalDiskHealthSummary', 'VsanHostQueryPhysicalDiskHealthSummary', 'vim.version.version9', tuple(), (0, 'vim.host.VsanPhysicalDiskHealthSummary', 'vim.host.VsanPhysicalDiskHealthSummary'), 'System.Read', None), ('startProactiveRebalance', 'VsanStartProactiveRebalance', 'vim.version.version9', (('timeSpan', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL, None), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryHostInfoByUuids', 'VsanHostQueryHostInfoByUuids', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanQueryResultHostInfo[]', 'vim.host.VsanQueryResultHostInfo[]'), 'System.Read', None), ('queryVersion', 'VsanHostQueryHealthSystemVersion', 'vim.version.version9', tuple(), (0, 'string', 'string'), 'System.Read', None), ('queryVerifyNetworkSettings', 'VsanHostQueryVerifyNetworkSettings', 'vim.version.version9', (('peers', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkHealthResult', 'vim.host.VsanNetworkHealthResult'), 'System.Read', None), ('queryRunIperfClient', 'VsanHostQueryRunIperfClient', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('runVmdkLoadTest', 'VsanHostRunVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanVmdkLoadTestResult[]', 'vim.host.VsanVmdkLoadTestResult[]'), 'System.Read', None), ('queryObjectHealthSummary', 'VsanHostQueryObjectHealthSummary', 'vim.version.version9', (('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('localHostOnly', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanObjectOverallHealth', 'vim.host.VsanObjectOverallHealth'), 'System.Read', None), ('getHclInfo', 'VsanGetHclInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanHostHclInfo', 'vim.host.VsanHostHclInfo'), 'System.Read', None), ('cleanupVmdkLoadTest', 'VsanHostCleanupVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ('waitForVsanHealthGenerationIdChange', 'VsanWaitForVsanHealthGenerationIdChange', 'vim.version.version9', (('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('stopProactiveRebalance', 'VsanStopProactiveRebalance', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairImmediateObjects', 'VsanHostRepairImmediateObjects', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('repairType', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanRepairObjectsResult', 'vim.host.VsanRepairObjectsResult'), 'System.Read', None), ('prepareVmdkLoadTest', 'VsanHostPrepareVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'string', 'string'), 'System.Read', None), ('queryRunIperfServer', 'VsanHostQueryRunIperfServer', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('queryCheckLimits', 'VsanHostQueryCheckLimits', 'vim.version.version9', tuple(), (0, 'vim.host.VsanLimitHealthResult', 'vim.host.VsanLimitHealthResult'), 'System.Read', None), ('getProactiveRebalanceInfo', 'VsanGetProactiveRebalanceInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanProactiveRebalanceInfoEx', 'vim.host.VsanProactiveRebalanceInfoEx'), 'System.Read', None), ('checkClomdLiveness', 'VsanHostClomdLiveness', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcClusterHealthSystem', 'VsanVcClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryClusterCreateVmHealthHistoryTest', 'VsanQueryVcClusterCreateVmHealthHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterCreateVmHealthTestResult[]', 'vim.cluster.VsanClusterCreateVmHealthTestResult[]'), 'System.Read', None), ('setLogLevel', 'VsanHealthSetLogLevel', 'vim.version.version9', (('level', 'vim.cluster.VsanHealthLogLevelEnum', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('testVsanClusterTelemetryProxy', 'VsanHealthTestVsanClusterTelemetryProxy', 'vim.version.version9', (('proxyConfig', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('uploadHclDb', 'VsanVcUploadHclDb', 'vim.version.version9', (('db', 'string', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('updateHclDbFromWeb', 'VsanVcUpdateHclDbFromWeb', 'vim.version.version9', (('url', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairClusterObjectsImmediate', 'VsanHealthRepairClusterObjectsImmediate', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryVcClusterNetworkPerfTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryClusterVmdkLoadHistoryTest', 'VsanQueryVcClusterVmdkLoadHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterVmdkLoadTestResult[]', 'vim.cluster.VsanClusterVmdkLoadTestResult[]'), 'System.Read', None), ('queryVsanClusterHealthCheckInterval', 'VsanHealthQueryVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryVcClusterCreateVmHealthTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanVcClusterGetHclInfo', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('includeHostsResult', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryAttachToSrHistory', 'VsanQueryAttachToSrHistory', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanAttachToSrOperation[]', 'vim.cluster.VsanAttachToSrOperation[]'), 'System.Read', None), ('rebalanceCluster', 'VsanRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('runVmdkLoadTest', 'VsanVcClusterRunVmdkLoadTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('action', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('sendVsanTelemetry', 'VsanHealthSendVsanTelemetry', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('queryClusterNetworkPerfHistoryTest', 'VsanQueryVcClusterNetworkPerfHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterNetworkLoadTestResult[]', 'vim.cluster.VsanClusterNetworkLoadTestResult[]'), 'System.Read', None), ('queryClusterHealthSummary', 'VsanQueryVcClusterHealthSummary', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vmCreateTimeout', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fields', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fetchFromCache', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSummary', 'vim.cluster.VsanClusterHealthSummary'), 'System.Read', None), ('stopRebalanceCluster', 'VsanStopRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryVsanClusterHealthConfig', 'VsanHealthQueryVsanClusterHealthConfig', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthConfigs', 'vim.cluster.VsanClusterHealthConfigs'), 'System.Read', None), ('attachVsanSupportBundleToSr', 'VsanAttachVsanSupportBundleToSr', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('srNumber', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterVmdkWorkloadTypes', 'VsanQueryVcClusterVmdkWorkloadTypes', 'vim.version.version9', tuple(), (0, 'vim.cluster.VsanStorageWorkloadType[]', 'vim.cluster.VsanStorageWorkloadType[]'), 'System.Read', None), ('queryVerifyClusterHealthSystemVersions', 'VsanVcClusterQueryVerifyHealthSystemVersions', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('isRebalanceRunning', 'VsanHealthIsRebalanceRunning', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setVsanClusterHealthCheckInterval', 'VsanHealthSetVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vsanClusterHealthCheckInterval', 'int', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcStretchedClusterSystem', 'VimClusterVsanVcStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('isWitnessHost', 'VSANVcIsWitnessHost', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setPreferredFaultDomain', 'VSANVcSetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('getPreferredFaultDomain', 'VSANVcGetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANPreferredFaultDomainInfo', 'vim.cluster.VSANPreferredFaultDomainInfo'), 'System.Read', None), ('getWitnessHosts', 'VSANVcGetWitnessHosts', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANWitnessHostInfo[]', 'vim.cluster.VSANWitnessHostInfo[]'), 'System.Read', None), ('retrieveStretchedClusterVcCapability', 'VSANVcRetrieveStretchedClusterVcCapability', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('verifyAllConnected', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANStretchedClusterCapability[]', 'vim.cluster.VSANStretchedClusterCapability[]'), 'System.Read', None), ('convertToStretchedCluster', 'VSANVcConvertToStretchedCluster', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('faultDomainConfig', 'vim.cluster.VSANStretchedClusterFaultDomainConfig', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('removeWitnessHost', 'VSANVcRemoveWitnessHost', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ('witnessAddress', 'string', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanClusterHealthSystem', 'VsanClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryPhysicalDiskHealthSummary', 'VsanQueryClusterPhysicalDiskHealthSummary', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.host.VsanPhysicalDiskHealthSummary[]'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryClusterNetworkPerfTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryAdvCfgSync', 'VsanQueryClusterAdvCfgSync', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.cluster.VsanClusterAdvCfgSyncResult[]'), 'System.Read', None), ('repairClusterImmediateObjects', 'VsanRepairClusterImmediateObjects', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult'), 'System.Read', None), ('queryVerifyClusterNetworkSettings', 'VsanQueryVerifyClusterNetworkSettings', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.cluster.VsanClusterNetworkHealthResult'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryClusterCreateVmHealthTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('queryClusterHealthSystemVersions', 'VsanQueryClusterHealthSystemVersions', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanClusterGetHclInfo', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryCheckLimits', 'VsanQueryClusterCheckLimits', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterLimitHealthResult', 'vim.cluster.VsanClusterLimitHealthResult'), 'System.Read', None), ('queryCaptureVsanPcap', 'VsanQueryClusterCaptureVsanPcap', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('duration', 'int', 'vim.version.version9', 0, None), ('vmknic', 'vim.cluster.VsanClusterHostVmknicMapping[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeRawPcap', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeIgmp', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsMsgTypeFilter', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsPorts', 'int[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('clusterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanVsanClusterPcapResult', 'vim.cluster.VsanVsanClusterPcapResult'), 'System.Read', None), ('checkClusterClomdLiveness', 'VsanCheckClusterClomdLiveness', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.cluster.VsanClusterClomdLivenessResult'), 'System.Read', None), ]) +CreateDataType('vim.host.VSANCmmdsNodeInfo', 'VimHostVSANCmmdsNodeInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('isWitness', 'boolean', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanPhysicalDiskHealth', 'VsanPhysicalDiskHealth', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('uuid', 'string', 'vim.version.version9', 0), ('inCmmds', 'boolean', 'vim.version.version9', 0), ('inVsi', 'boolean', 'vim.version.version9', 0), ('dedupScope', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('formatVersion', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('isAllFlash', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionValue', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionArea', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metadataHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealthDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupUsageHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('capacityHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('summaryHealth', 'string', 'vim.version.version9', 0), ('capacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('freeBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('hashedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('scsiDisk', 'vim.host.ScsiDisk', 'vim.version.version9', 0 | F_OPTIONAL), ('usedComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('compLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.DataEfficiencyConfig', 'VsanDataEfficiencyConfig', 'vmodl.DynamicData', 'vim.version.version10', [('dedupEnabled', 'boolean', 'vim.version.version10', 0), ('compressionEnabled', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StorageComplianceResult', 'VsanStorageComplianceResult', 'vmodl.DynamicData', 'vim.version.version9', [('checkTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('profile', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('objectUUID', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('complianceStatus', 'vim.cluster.StorageComplianceStatus', 'vim.version.version9', 0), ('mismatch', 'boolean', 'vim.version.version9', 0), ('violatedPolicies', 'vim.cluster.StoragePolicyStatus[]', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalStatus', 'vim.cluster.StorageOperationalStatus', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthGroup', 'VsanClusterHealthGroup', 'vmodl.DynamicData', 'vim.version.version9', [('groupId', 'string', 'vim.version.version9', 0), ('groupName', 'string', 'vim.version.version9', 0), ('groupHealth', 'string', 'vim.version.version9', 0), ('groupTests', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groupDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanSpaceUsageDetailResult', 'VsanSpaceUsageDetailResult', 'vmodl.DynamicData', 'vim.version.version9', [('spaceUsageByObjectType', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanAttachToSrOperation', 'VsanAttachToSrOperation', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('success', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('srNumber', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanObjectSpaceSummary', 'VsanObjectSpaceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('objType', 'vim.cluster.VsanObjectTypeEnum', 'vim.version.version9', 0 | F_OPTIONAL), ('overheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('temporaryOverheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('primaryCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('provisionCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('overReservedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalUsedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHclInfo', 'VsanClusterHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hclDbLastUpdate', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('hclDbAgeHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostHclInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfGraph', 'VsanPerfGraph', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0), ('metrics', 'vim.cluster.VsanPerfMetricId[]', 'vim.version.version9', 0), ('unit', 'vim.cluster.VsanPerfStatsUnitType', 'vim.version.version9', 0), ('threshold', 'vim.cluster.VsanPerfThreshold', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultBase', 'VsanClusterHealthResultBase', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfTopEntity', 'VsanPerfTopEntity', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterBalancePerDiskInfo', 'VsanClusterBalancePerDiskInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('fullness', 'long', 'vim.version.version9', 0), ('variance', 'long', 'vim.version.version9', 0), ('fullnessAboveThreshold', 'long', 'vim.version.version9', 0), ('dataToMoveB', 'long', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthTest', 'VsanClusterHealthTest', 'vmodl.DynamicData', 'vim.version.version9', [('testId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testShortDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL), ('testActions', 'vim.cluster.VsanClusterHealthAction[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StoragePolicyStatus', 'VsanStoragePolicyStatus', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('expectedValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('currentValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfMemberInfo', 'VsanPerfMemberInfo', 'vmodl.DynamicData', 'vim.version.version9', [('thumbprint', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanPerfMetricId', 'VsanPerfMetricId', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rollupType', 'vim.cluster.VsanPerfSummaryType', 'vim.version.version9', 0 | F_OPTIONAL), ('statsType', 'vim.cluster.VsanPerfStatsType', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metricsCollectInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VSANWitnessHostInfo', 'VimClusterVSANWitnessHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdUuid', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('unicastAgentAddr', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('host', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHealthExtMgmtPreCheckResult', 'VsanHealthExtMgmtPreCheckResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallResult', 'boolean', 'vim.version.version9', 0), ('esxVersionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('drsCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('eamConnectionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installStateCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('results', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0), ('vumRegistered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HostWithHybridDiskgroupIssue', 'VsanHostWithHybridDiskgroupIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfMetricSeriesCSV', 'VsanPerfMetricSeriesCSV', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('values', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfQuerySpec', 'VsanPerfQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('startTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('endTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('labels', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('interval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanRepairObjectsResult', 'VsanRepairObjectsResult', 'vmodl.DynamicData', 'vim.version.version9', [('inQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('notInQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterNetworkPartitionInfo', 'VsanClusterNetworkPartitionInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.MixedEsxVersionIssue', 'VsanMixedEsxVersionIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', []) +CreateDataType('vim.cluster.VsanClusterClomdLivenessResult', 'VsanClusterClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('clomdLivenessResult', 'vim.cluster.VsanHostClomdLivenessResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanVsanClusterPcapResult', 'VsanVsanClusterPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanVsanClusterPcapGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanVsanPcapResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfMasterInformation', 'VsanPerfMasterInformation', 'vmodl.DynamicData', 'vim.version.version9', [('secSinceLastStatsWrite', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('secSinceLastStatsCollect', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('statsIntervalSec', 'long', 'vim.version.version9', 0), ('collectionFailureHostUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('renamedStatsDirectories', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('statsDirectoryPercentFree', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostCreateVmHealthTestResult', 'VsanHostCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0), ('fault', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanDiskFormatConversionCheckResult', 'VsanDiskFormatConversionCheckResult', 'vim.VsanUpgradeSystem.PreflightCheckResult', 'vim.version.version10', [('isSupported', 'boolean', 'vim.version.version10', 0), ('targetVersion', 'int', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'VsanClusterHealthSystemObjectsRepairResult', 'vmodl.DynamicData', 'vim.version.version9', [('inRepairingQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.host.VsanHostHclInfo', 'VsanHostHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('hclChecked', 'boolean', 'vim.version.version9', 0), ('releaseName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('controllers', 'vim.host.VsanHclControllerInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VSANStretchedClusterCapability', 'VimClusterVSANStretchedClusterCapability', 'vmodl.DynamicData', 'vim.version.version10', [('hostMoId', 'string', 'vim.version.version10', 0), ('connStatus', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('isSupported', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL), ('hostCapability', 'vim.host.VSANStretchedClusterHostCapability', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanDiskMappingsConfigSpec', 'VimClusterVsanDiskMappingsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hostDiskMappings', 'vim.cluster.VsanHostDiskMapping[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanHostVmdkLoadTestResult', 'VsanHostVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('issueFound', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkResults', 'vim.host.VsanVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.ReconfigSpec', 'VimVsanReconfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('vsanClusterConfig', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL), ('diskMappingSpec', 'vim.cluster.VsanDiskMappingsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('faultDomainsSpec', 'vim.cluster.VsanFaultDomainsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('modify', 'boolean', 'vim.version.version10', 0), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkPeerHealthResult', 'VsanNetworkPeerHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('peer', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('smallPingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('onSameIpSubnet', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('sourceVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanWitnessSpec', 'VimClusterVsanWitnessSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.DiskMappingCreationSpec', 'VimVsanHostDiskMappingCreationSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('creationType', 'vim.vsan.host.DiskMappingCreationType', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanLimitHealthResult', 'VsanLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0), ('maxComponents', 'int', 'vim.version.version9', 0), ('freeComponents', 'int', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('lowestFreeDiskSpacePct', 'int', 'vim.version.version9', 0), ('usedDiskSpaceB', 'long', 'vim.version.version9', 0), ('totalDiskSpaceB', 'long', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('reservedRcSizeB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VSANPreferredFaultDomainInfo', 'VimClusterVSANPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('preferredFaultDomainId', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanObjectOverallHealth', 'VsanObjectOverallHealth', 'vmodl.DynamicData', 'vim.version.version9', [('objectHealthDetail', 'vim.host.VsanObjectHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('objectVersionCompliance', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanVsanClusterPcapGroup', 'VsanVsanClusterPcapGroup', 'vmodl.DynamicData', 'vim.version.version9', [('master', 'string', 'vim.version.version9', 0), ('members', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultColumnInfo', 'VsanClusterHealthResultColumnInfo', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterNetworkHealthResult', 'VsanClusterNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.host.VsanNetworkHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingMulticastConfig', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingIpSubnets', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('pingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('potentialMulticastIssue', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('otherHostsInVsanCluster', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('partitions', 'vim.cluster.VsanClusterNetworkPartitionInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsWithVsanDisabled', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsDisconnected', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInEsxMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInVsanMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('infoAboutUnexpectedHosts', 'vim.host.VsanQueryResultHostInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfNodeInformation', 'VsanPerfNodeInformation', 'vmodl.DynamicData', 'vim.version.version9', [('version', 'string', 'vim.version.version9', 0), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('isCmmdsMaster', 'boolean', 'vim.version.version9', 0), ('isStatsMaster', 'boolean', 'vim.version.version9', 0), ('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanNodeUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('masterInfo', 'vim.cluster.VsanPerfMasterInformation', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfEntityMetricCSV', 'VsanPerfEntityMetricCSV', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('sampleInfo', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'vim.cluster.VsanPerfMetricSeriesCSV[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.DiskUnhealthIssue', 'VsanDiskUnhealthIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanFaultDomainSpec', 'VimClusterVsanFaultDomainSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('name', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.vsan.upgradesystem.ObjectInaccessibleIssue', 'VsanObjectInaccessibleIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanDiskFormatConversionSpec', 'VsanDiskFormatConversionSpec', 'vmodl.DynamicData', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthAction', 'VsanClusterHealthAction', 'vmodl.DynamicData', 'vim.version.version9', [('actionId', 'vim.cluster.VsanClusterHealthActionIdEnum', 'vim.version.version9', 0), ('actionLabel', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('actionDescription', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('enabled', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemVersionResult', 'VsanClusterHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.cluster.VsanHostHealthSystemVersionResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('vcVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthResultRow', 'VsanClusterHealthResultRow', 'vmodl.DynamicData', 'vim.version.version9', [('values', 'string[]', 'vim.version.version9', 0), ('nestedRows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemStatusResult', 'VsanClusterHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('status', 'string', 'vim.version.version9', 0), ('goalState', 'string', 'vim.version.version9', 0), ('untrackedHosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('trackedHostsStatus', 'vim.host.VsanHostHealthSystemStatusResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostDiskMapping', 'VimClusterVsanHostDiskMapping', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('type', 'vim.cluster.VsanDiskGroupCreationType', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VSANStretchedClusterFaultDomainConfig', 'VimClusterVSANStretchedClusterFaultDomainConfig', 'vmodl.DynamicData', 'vim.version.version10', [('firstFdName', 'string', 'vim.version.version10', 0), ('firstFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('secondFdName', 'string', 'vim.version.version10', 0), ('secondFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VSANStretchedClusterHostInfo', 'VimHostVSANStretchedClusterHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeInfo', 'vim.host.VSANCmmdsNodeInfo', 'vim.version.version10', 0), ('faultDomainInfo', 'vim.host.VSANCmmdsFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HigherObjectsPresentDuringDowngradeIssue', 'VsanHigherObjectsPresentDuringDowngradeIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VSANCmmdsFaultDomainInfo', 'VimHostVSANCmmdsFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomainId', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.fault.VsanNodeNotMaster', 'VsanNodeNotMaster', 'vim.fault.VimFault', 'vim.version.version9', [('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cmmdsMasterButNotStatsMaster', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostHealthSystemVersionResult', 'VsanHostHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('version', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthConfigs', 'VsanClusterHealthConfigs', 'vmodl.DynamicData', 'vim.version.version9', [('enableVsanTelemetry', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryProxy', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0 | F_OPTIONAL), ('configs', 'vim.cluster.VsanClusterHealthResultKeyValuePair[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterWhatifHostFailuresResult', 'VsanClusterWhatifHostFailuresResult', 'vmodl.DynamicData', 'vim.version.version9', [('numFailures', 'long', 'vim.version.version9', 0), ('totalUsedCapacityB', 'long', 'vim.version.version9', 0), ('totalCapacityB', 'long', 'vim.version.version9', 0), ('totalRcReservationB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('usedComponents', 'long', 'vim.version.version9', 0), ('totalComponents', 'long', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectIdentityAndHealth', 'VsanObjectIdentityAndHealth', 'vmodl.DynamicData', 'vim.version.version9', [('identities', 'vim.cluster.VsanObjectIdentity[]', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceSummary', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rawData', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanHclControllerInfo', 'VsanHclControllerInfo', 'vmodl.DynamicData', 'vim.version.version9', [('deviceName', 'string', 'vim.version.version9', 0), ('deviceDisplayName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subVendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subDeviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('extraInfo', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceOnHcl', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releaseSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releasesOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionsOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderInfo', 'vim.host.VsanHostCimProviderInfo', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultKeyValuePair', 'VsanClusterHealthResultKeyValuePair', 'vmodl.DynamicData', 'vim.version.version9', [('key', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StorageOperationalStatus', 'VsanStorageOperationalStatus', 'vmodl.DynamicData', 'vim.version.version9', [('healthy', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('operationETA', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('operationProgress', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('transitional', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanSpaceUsage', 'VsanSpaceUsage', 'vmodl.DynamicData', 'vim.version.version9', [('totalCapacityB', 'long', 'vim.version.version9', 0), ('freeCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceOverview', 'vim.cluster.VsanObjectSpaceSummary', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceDetail', 'vim.cluster.VsanSpaceUsageDetailResult', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultTable', 'VsanClusterHealthResultTable', 'vim.cluster.VsanClusterHealthResultBase', 'vim.version.version9', [('columns', 'vim.cluster.VsanClusterHealthResultColumnInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterConfig', 'VsanClusterConfig', 'vmodl.DynamicData', 'vim.version.version9', [('config', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.VsanHostCapability', 'VimVsanHostVsanHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('isSupported', 'boolean', 'vim.version.version10', 0), ('isLicensed', 'boolean', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfThreshold', 'VsanPerfThreshold', 'vmodl.DynamicData', 'vim.version.version9', [('direction', 'vim.cluster.VsanPerfThresholdDirectionType', 'vim.version.version9', 0), ('yellow', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('red', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkHealthResult', 'VsanNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'vim.HostSystem', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('ipSubnets', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHealth', 'vim.host.VsanNetworkPeerHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('multicastConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.ConfigInfoEx', 'VsanConfigInfoEx', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkLoadTestResult', 'VsanVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('success', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spec', 'vim.host.VsanVmdkLoadTestSpec', 'vim.version.version9', 0), ('actualDurationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('iops', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('tputBps', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('avgLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('numIoAboveLatencyThreshold', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterVMsHealthOverallResult', 'VsanClusterVMsHealthOverAllResult', 'vmodl.DynamicData', 'vim.version.version9', [('healthStateList', 'vim.cluster.VsanClusterVMsHealthSummaryResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealthState', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanHostHealthSystemStatusResult', 'VsanHostHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterAdvCfgSyncResult', 'VsanClusterAdvCfgSyncResult', 'vmodl.DynamicData', 'vim.version.version9', [('inSync', 'boolean', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hostValues', 'vim.cluster.VsanClusterAdvCfgSyncHostResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanQueryResultHostInfo', 'VsanQueryResultHostInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostnameInCmmds', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanIpv4Addresses', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.DiskMapInfoEx', 'VimVsanHostDiskMapInfoEx', 'vmodl.DynamicData', 'vim.version.version10', [('mapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0), ('isMounted', 'boolean', 'vim.version.version10', 0), ('isAllFlash', 'boolean', 'vim.version.version10', 0), ('isDataEfficiency', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkLoadTestSpec', 'VsanVmdkLoadTestSpec', 'vmodl.DynamicData', 'vim.version.version9', [('vmdkCreateSpec', 'vim.VirtualDiskManager.FileBackedVirtualDiskSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpec', 'vim.host.VsanVmdkIOLoadSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpecSequence', 'vim.host.VsanVmdkIOLoadSpec[]', 'vim.version.version9', 0 | F_OPTIONAL), ('stepDurationSec', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSummary', 'VsanClusterHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('clusterStatus', 'vim.cluster.VsanClusterHealthSystemStatusResult', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterVersions', 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.version.version9', 0 | F_OPTIONAL), ('objectHealth', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('vmHealth', 'vim.cluster.VsanClusterVMsHealthOverallResult', 'vim.version.version9', 0 | F_OPTIONAL), ('networkHealth', 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('limitHealth', 'vim.cluster.VsanClusterLimitHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('advCfgSync', 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('createVmHealth', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalDisksHealth', 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hclInfo', 'vim.cluster.VsanClusterHclInfo', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanClusterHealthGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealth', 'string', 'vim.version.version9', 0), ('overallHealthDescription', 'string', 'vim.version.version9', 0), ('clomdLiveness', 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.version.version9', 0 | F_OPTIONAL), ('diskBalance', 'vim.cluster.VsanClusterBalanceSummary', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfEntityType', 'VsanPerfEntityType', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('id', 'string', 'vim.version.version9', 0), ('graphs', 'vim.cluster.VsanPerfGraph[]', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkLoadTestResult', 'VsanNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('client', 'boolean', 'vim.version.version9', 0), ('bandwidthBps', 'long', 'vim.version.version9', 0), ('totalBytes', 'long', 'vim.version.version9', 0), ('lostDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('lossPct', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('sentDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('jitterMs', 'float', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanPhysicalDiskHealthSummary', 'VsanPhysicalDiskHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('overallHealth', 'string', 'vim.version.version9', 0), ('heapsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('slabsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('disks', 'vim.host.VsanPhysicalDiskHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('componentsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostDedupScope', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.VsanDiskManagementSystemCapability', 'VimVsanHostVsanDiskManagementSystemCapability', 'vmodl.DynamicData', 'vim.version.version10', [('version', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanHostCimProviderInfo', 'VsanHostCimProviderInfo', 'vmodl.DynamicData', 'vim.version.version9', [('cimProviderSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installedCIMProvider', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectInformation', 'VsanObjectInformation', 'vmodl.DynamicData', 'vim.version.version9', [('directoryName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('policyAttributes', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmComplianceResult', 'vim.cluster.StorageComplianceResult', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectIdentity', 'VsanObjectIdentity', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0), ('vmInstanceUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmNsObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vm', 'vim.VirtualMachine', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanResourceHealth', 'VsanResourceHealth', 'vmodl.DynamicData', 'vim.version.version9', [('resource', 'string', 'vim.version.version9', 0), ('health', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanCapability', 'VsanCapability', 'vmodl.DynamicData', 'vim.version.version10', [('target', 'vmodl.ManagedObject', 'vim.version.version10', 0 | F_OPTIONAL), ('capabilities', 'string[]', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostClomdLivenessResult', 'VsanHostClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('clomdStat', 'string', 'vim.version.version9', 0), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectQuerySpec', 'VsanObjectQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterLimitHealthResult', 'VsanClusterLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('issueFound', 'boolean', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanLimitHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('whatifHostFailures', 'vim.cluster.VsanClusterWhatifHostFailuresResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanStorageWorkloadType', 'VsanStorageWorkloadType', 'vmodl.DynamicData', 'vim.version.version9', [('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0), ('typeId', 'string', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterAdvCfgSyncHostResult', 'VsanClusterAdvCfgSyncHostResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.vsan.upgradesystem.ObjectPolicyIssue', 'VsanObjectPolicyIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfTopEntities', 'VsanPerfTopEntities', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('entities', 'vim.cluster.VsanPerfTopEntity[]', 'vim.version.version9', 0)]) +CreateDataType('vim.host.VsanProactiveRebalanceInfoEx', 'VsanProactiveRebalanceInfoEx', 'vmodl.DynamicData', 'vim.version.version9', [('running', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('startTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('stopTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterProactiveTestResult', 'VsanClusterProactiveTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallStatus', 'string', 'vim.version.version9', 0), ('overallStatusDescription', 'string', 'vim.version.version9', 0), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0), ('healthTest', 'vim.cluster.VsanClusterHealthTest', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VSANCmmdsPreferredFaultDomainInfo', 'VimHostVSANCmmdsPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainId', 'string', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanFaultDomainsConfigSpec', 'VimClusterVsanFaultDomainsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomains', 'vim.cluster.VsanFaultDomainSpec[]', 'vim.version.version10', 0), ('witness', 'vim.cluster.VsanWitnessSpec', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHostVmknicMapping', 'VsanClusterHostVmknicMapping', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterVmdkLoadTestResult', 'VsanClusterVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterVMsHealthSummaryResult', 'VsanClusterVMsHealthSummaryResult', 'vmodl.DynamicData', 'vim.version.version9', [('numVMs', 'int', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'string', 'vim.version.version9', 0), ('vmInstanceUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VSANStretchedClusterHostCapability', 'VimHostVSANStretchedClusterHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('featureVersion', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanFailedRepairObjectResult', 'VsanFailedRepairObjectResult', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('errMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterCreateVmHealthTestResult', 'VsanClusterCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanObjectHealth', 'VsanObjectHealth', 'vmodl.DynamicData', 'vim.version.version9', [('numObjects', 'int', 'vim.version.version9', 0), ('health', 'vim.host.VsanObjectHealthState', 'vim.version.version9', 0), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterBalanceSummary', 'VsanClusterBalanceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('varianceThreshold', 'long', 'vim.version.version9', 0), ('disks', 'vim.cluster.VsanClusterBalancePerDiskInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterTelemetryProxyConfig', 'VsanClusterTelemetryProxyConfig', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('port', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('user', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('password', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('autoDiscovered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkIOLoadSpec', 'VsanVmdkIOLoadSpec', 'vmodl.DynamicData', 'vim.version.version9', [('readPct', 'int', 'vim.version.version9', 0), ('oio', 'int', 'vim.version.version9', 0), ('iosizeB', 'int', 'vim.version.version9', 0), ('dataSizeMb', 'long', 'vim.version.version9', 0), ('random', 'boolean', 'vim.version.version9', 0), ('startOffsetB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVsanPcapResult', 'VsanVsanPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('calltime', 'float', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0), ('tcpdumpFilter', 'string', 'vim.version.version9', 0), ('snaplen', 'int', 'vim.version.version9', 0), ('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('pcap', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterNetworkLoadTestResult', 'VsanClusterNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanNetworkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HostPropertyRetrieveIssue', 'VsanHostPropertyRetrieveIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateEnumType('vim.host.VsanObjectHealthState', 'VsanObjectHealthState', 'vim.version.version9', ['inaccessible' ,'reducedavailabilitywithnorebuild' ,'reducedavailabilitywithnorebuilddelaytimer' ,'reducedavailabilitywithactiverebuild' ,'datamove' ,'nonavailabilityrelatedreconfig' ,'nonavailabilityrelatedincompliance' ,'healthy' ,]) +CreateEnumType('vim.cluster.VsanObjectTypeEnum', 'VsanObjectTypeEnum', 'vim.version.version9', ['vmswap' ,'vdisk' ,'namespace' ,'vmem' ,'statsdb' ,'iscsi' ,'other' ,'fileSystemOverhead' ,'dedupOverhead' ,'checksumOverhead' ,]) +CreateEnumType('vim.cluster.VsanCapabilityType', 'VsanCapabilityType', 'vim.version.version10', ['capability' ,'allflash' ,'stretchedcluster' ,'dataefficiency' ,'clusterconfig' ,'upgrade' ,'objectidentities' ,]) +CreateEnumType('vim.cluster.VsanHealthLogLevelEnum', 'VsanHealthLogLevelEnum', 'vim.version.version9', ['INFO' ,'WARNING' ,'ERROR' ,'DEBUG' ,'CRITICAL' ,]) +CreateEnumType('vim.cluster.VsanPerfSummaryType', 'VsanPerfSummaryType', 'vim.version.version9', ['average' ,'maximum' ,'minimum' ,'latest' ,'summation' ,'none' ,]) +CreateEnumType('vim.cluster.StorageComplianceStatus', 'VsanStorageComplianceStatus', 'vim.version.version9', ['compliant' ,'nonCompliant' ,'unknown' ,'notApplicable' ,]) +CreateEnumType('vim.cluster.VsanPerfStatsUnitType', 'VsanPerfStatsUnitType', 'vim.version.version9', ['number' ,'time_ms' ,'percentage' ,'size_bytes' ,'rate_bytes' ,]) +CreateEnumType('vim.cluster.VsanPerfThresholdDirectionType', 'VsanPerfThresholdDirectionType', 'vim.version.version9', ['upper' ,'lower' ,]) +CreateEnumType('vim.cluster.VsanPerfStatsType', 'VsanPerfStatsType', 'vim.version.version9', ['absolute' ,'delta' ,'rate' ,]) +CreateEnumType('vim.vsan.host.DiskMappingCreationType', 'VimVsanHostDiskMappingCreationType', 'vim.version.version10', ['hybrid' ,'allFlash' ,]) +CreateEnumType('vim.cluster.VsanClusterHealthActionIdEnum', 'VsanClusterHealthActionIdEnum', 'vim.version.version9', ['RepairClusterObjectsAction' ,'UploadHclDb' ,'UpdateHclDbFromInternet' ,'EnableHealthService' ,'DiskBalance' ,'StopDiskBalance' ,'RemediateDedup' ,'UpgradeVsanDiskFormat' ,]) +CreateEnumType('vim.cluster.VsanDiskGroupCreationType', 'VimClusterVsanDiskGroupCreationType', 'vim.version.version10', ['allflash' ,'hybrid' ,]) \ No newline at end of file From 88e445a0d0c87ee99aa5e3e39dd31ae6458b0f6a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 05:34:15 -0400 Subject: [PATCH 514/639] Created utils.vsan and added dependencies and __virtual__ function --- salt/utils/vsan.py | 83 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 salt/utils/vsan.py diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py new file mode 100644 index 0000000000..5a02fa2cac --- /dev/null +++ b/salt/utils/vsan.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +''' +Connection library for VMware vSAN endpoint + +This library used the vSAN extension of the VMware SDK +used to manage vSAN related objects + +:codeauthor: Alexandru Bleotu + +Dependencies +~~~~~~~~~~~~ + +- pyVmomi Python Module + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file +was developed against. +''' + +# Import Python Libs +from __future__ import absolute_import +import sys +import atexit +import logging +import time +import re +import ssl + +# Import Salt Libs +from salt.exceptions import VMwareApiError, VMwareRuntimeError +import salt.utils.vmware + +try: + from pyVmomi import VmomiSupport, SoapStubAdapter, vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +try: + from salt.ext.vsan import vsanmgmtObjects, vsanapiutils + HAS_PYVSAN = True +except ImportError: + HAS_PYVSAN = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + ''' + Only load if PyVmomi is installed. + ''' + if HAS_PYVSAN and HAS_PYVMOMI: + return True + else: + return False, 'Missing dependency: The salt.utils.vsan module ' \ + 'requires pyvmomi and the pyvsan extension library' From 3912e4e338def330cd7aa69fc0f17fe5e091cbae Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 05:37:18 -0400 Subject: [PATCH 515/639] Added utils.vsan.vsan_supported that returns whether the VSAN API is supported --- salt/utils/vsan.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 5a02fa2cac..244889b324 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -81,3 +81,28 @@ def __virtual__(): else: return False, 'Missing dependency: The salt.utils.vsan module ' \ 'requires pyvmomi and the pyvsan extension library' + + +def vsan_supported(service_instance): + ''' + Returns whether vsan is supported on the vCenter: + api version needs to be 6 or higher + + service_instance + Service instance to the host or vCenter + ''' + try: + api_version = service_instance.content.about.apiVersion + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + if int(api_version.split('.')[0]) < 6: + return False + return True From a2f84e6f39a2fe312a5c0d86313688f9fb6fdcf7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 06:20:12 -0400 Subject: [PATCH 516/639] Added tests for utils.vsan.vsan_supported --- tests/unit/utils/test_vsan.py | 78 +++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 tests/unit/utils/test_vsan.py diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py new file mode 100644 index 0000000000..8f5749a08b --- /dev/null +++ b/tests/unit/utils/test_vsan.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests functions in salt.utils.vsan +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ + PropertyMock + +# Import Salt libraries +from salt.exceptions import VMwareApiError, VMwareRuntimeError +from salt.utils import vsan + +try: + from pyVmomi import VmomiSupport, SoapStubAdapter, vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False +HAS_PYVSAN = vsan.HAS_PYVSAN + + +# Get Logging Started +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +@skipIf(not HAS_PYVSAN, 'The \'vsan\' ext library is missing') +class VsanSupportedTestCase(TestCase): + '''Tests for salt.utils.vsan.vsan_supported''' + + def test_supported_api_version(self): + mock_si = MagicMock(content=MagicMock(about=MagicMock())) + type(mock_si.content.about).apiVersion = \ + PropertyMock(return_value='6.0') + self.assertTrue(vsan.vsan_supported(mock_si)) + + def test_unsupported_api_version(self): + mock_si = MagicMock(content=MagicMock(about=MagicMock())) + type(mock_si.content.about).apiVersion = \ + PropertyMock(return_value='5.0') + self.assertFalse(vsan.vsan_supported(mock_si)) + + def test_api_version_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + mock_si = MagicMock(content=MagicMock(about=MagicMock())) + type(mock_si.content.about).apiVersion = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vsan.vsan_supported(mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_api_version_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + mock_si = MagicMock(content=MagicMock(about=MagicMock())) + type(mock_si.content.about).apiVersion = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + vsan.vsan_supported(mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_api_version_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + mock_si = MagicMock(content=MagicMock(about=MagicMock())) + type(mock_si.content.about).apiVersion = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + vsan.vsan_supported(mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From fface2f38511c7582fdcfd636782108da8ce13e3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 07:04:08 -0400 Subject: [PATCH 517/639] Added utils.vsan.get_vsan_cluster_config_system that retrieves the VSAN cluster configuration system --- salt/utils/vsan.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 244889b324..ac21544bef 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -106,3 +106,27 @@ def vsan_supported(service_instance): if int(api_version.split('.')[0]) < 6: return False return True + + +def get_vsan_cluster_config_system(service_instance): + ''' + Returns a vim.cluster.VsanVcClusterConfigSystem object + + service_instance + Service instance to the host or vCenter + ''' + + #TODO Replace when better connection mechanism is available + + #For python 2.7.9 and later, the defaul SSL conext has more strict + #connection handshaking rule. We may need turn of the hostname checking + #and client side cert verification + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + stub = service_instance._stub + vc_mos = vsanapiutils.GetVsanVcMos(stub, context=context) + return vc_mos['vsan-cluster-config-system'] From e811eb9c853b6cd996635c5c42f995bf075f33cb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 07:05:57 -0400 Subject: [PATCH 518/639] Added tests for utils.vsan.get_vsan_cluster_config_system --- tests/unit/utils/test_vsan.py | 61 ++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index 8f5749a08b..54c5df99e2 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -10,6 +10,7 @@ from __future__ import absolute_import import logging # Import Salt testing libraries +from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ PropertyMock @@ -19,7 +20,7 @@ from salt.exceptions import VMwareApiError, VMwareRuntimeError from salt.utils import vsan try: - from pyVmomi import VmomiSupport, SoapStubAdapter, vim, vmodl + from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -76,3 +77,61 @@ class VsanSupportedTestCase(TestCase): with self.assertRaises(VMwareRuntimeError) as excinfo: vsan.vsan_supported(mock_si) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +@skipIf(not HAS_PYVSAN, 'The \'vsan\' ext library is missing') +class GetVsanClusterConfigSystemTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.utils.vsan.get_vsan_cluster_config_system''' + def setup_loader_modules(self): + return {vsan: { + '__virtual__': MagicMock(return_value='vsan'), + 'sys': MagicMock(), + 'ssl': MagicMock()}} + + def setUp(self): + self.mock_si = MagicMock() + self.mock_ret = MagicMock() + patches = (('salt.utils.vsan.vsanapiutils.GetVsanVcMos', + MagicMock( + return_value={'vsan-cluster-config-system': + self.mock_ret})),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + type(vsan.sys).version_info = PropertyMock(return_value=(2,7,9)) + self.mock_context = MagicMock() + self.mock_create_default_context = \ + MagicMock(return_value=self.mock_context) + vsan.ssl.create_default_context = self.mock_create_default_context + + def tearDown(self): + for attr in ('mock_si', 'mock_ret', 'mock_context', + 'mock_create_default_context'): + delattr(self, attr) + + def test_ssl_default_context_loaded(self): + vsan.get_vsan_cluster_config_system(self.mock_si) + self.mock_create_default_context.assert_called_once_with() + self.assertFalse(self.mock_context.check_hostname) + self.assertEqual(self.mock_context.verify_mode, vsan.ssl.CERT_NONE) + + def test_ssl_default_context_not_loaded(self): + type(vsan.sys).version_info = PropertyMock(return_value=(2,7,8)) + vsan.get_vsan_cluster_config_system(self.mock_si) + self.assertEqual(self.mock_create_default_context.call_count, 0) + + def test_GetVsanVcMos_call(self): + mock_get_vsan_vc_mos = MagicMock() + with patch('salt.utils.vsan.vsanapiutils.GetVsanVcMos', + mock_get_vsan_vc_mos): + vsan.get_vsan_cluster_config_system(self.mock_si) + mock_get_vsan_vc_mos.assert_called_once_with(self.mock_si._stub, + context=self.mock_context) + + def test_return(self): + ret = vsan.get_vsan_cluster_config_system(self.mock_si) + self.assertEqual(ret, self.mock_ret) From ed9469f6012da1dc1a42ed506844dc6f8b66b84b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 07:48:12 -0400 Subject: [PATCH 519/639] Added utils.vsan.get_cluster_vsan_info that returns VSAN info about clusters --- salt/utils/vsan.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index ac21544bef..5756f7f464 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -130,3 +130,32 @@ def get_vsan_cluster_config_system(service_instance): stub = service_instance._stub vc_mos = vsanapiutils.GetVsanVcMos(stub, context=context) return vc_mos['vsan-cluster-config-system'] + + +def get_cluster_vsan_info(cluster_ref): + ''' + Returns the extended cluster vsan configuration object + (vim.VsanConfigInfoEx). + + cluster_ref + Reference to the cluster + ''' + + cluster_name = salt.utils.vmware.get_managed_object_name(cluster_ref) + log.trace('Retrieving cluster vsan info of cluster ' + '\'{0}\''.format(cluster_name)) + si = salt.utils.vmware.get_service_instance_from_managed_object( + cluster_ref) + vsan_cl_conf_sys = get_vsan_cluster_config_system(si) + try: + return vsan_cl_conf_sys.VsanClusterGetConfig(cluster_ref) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) From 2c3b4434f8a267d234a42f95c8ad425f13ad1a8a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 07:49:13 -0400 Subject: [PATCH 520/639] Added tests for utils.vsan.get_cluster_vsan_info --- tests/unit/utils/test_vsan.py | 81 +++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index 54c5df99e2..ccd6e471e6 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -135,3 +135,84 @@ class GetVsanClusterConfigSystemTestCase(TestCase, LoaderModuleMockMixin): def test_return(self): ret = vsan.get_vsan_cluster_config_system(self.mock_si) self.assertEqual(ret, self.mock_ret) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +@skipIf(not HAS_PYVSAN, 'The \'vsan\' ext library is missing') +class GetClusterVsanInfoTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.utils.vsan.get_cluster_vsan_info''' + def setup_loader_modules(self): + return {vsan: { + '__virtual__': MagicMock(return_value='vsan')}} + + def setUp(self): + self.mock_cl_ref = MagicMock() + self.mock_si = MagicMock() + patches = ( + ('salt.utils.vmware.get_managed_object_name', MagicMock()), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vsan.get_vsan_cluster_config_system', MagicMock())) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_cl_ref'): + delattr(self, attr) + + def test_get_managed_object_name_call(self): + mock_get_managed_object_name = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + mock_get_managed_object_name): + vsan.get_cluster_vsan_info(self.mock_cl_ref) + mock_get_managed_object_name.assert_called_once_with(self.mock_cl_ref) + + def test_get_vsan_cluster_config_system_call(self): + mock_get_vsan_cl_syst = MagicMock() + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + mock_get_vsan_cl_syst): + vsan.get_cluster_vsan_info(self.mock_cl_ref) + mock_get_vsan_cl_syst.assert_called_once_with(self.mock_si) + + def test_VsanClusterGetConfig_call(self): + mock_vsan_sys = MagicMock() + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value=mock_vsan_sys)): + vsan.get_cluster_vsan_info(self.mock_cl_ref) + mock_vsan_sys.VsanClusterGetConfig.assert_called_once_with( + self.mock_cl_ref) + + def test_VsanClusterGetConfig_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value=MagicMock( + VsanClusterGetConfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareApiError) as excinfo: + vsan.get_cluster_vsan_info(self.mock_cl_ref) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_VsanClusterGetConfig_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value=MagicMock( + VsanClusterGetConfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareApiError) as excinfo: + vsan.get_cluster_vsan_info(self.mock_cl_ref) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_VsanClusterGetConfig_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value=MagicMock( + VsanClusterGetConfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareRuntimeError) as excinfo: + vsan.get_cluster_vsan_info(self.mock_cl_ref) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From 07bcfd23aa6a13749800a355b813465912e31a8c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 09:37:49 -0400 Subject: [PATCH 521/639] Fix email in salt.utils.vmware --- salt/utils/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index bfe9a14353..2caf08488f 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -8,7 +8,7 @@ This is a base library used by a number of VMware services such as VMware ESX, ESXi, and vCenter servers. :codeauthor: Nitin Madhok -:codeauthor: Alexandru Bleotu +:codeauthor: Alexandru Bleotu Dependencies ~~~~~~~~~~~~ From 719ea14f38bbaf34dfd62bb14f021c232b09f4ac Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 10:07:33 -0400 Subject: [PATCH 522/639] Added utils.vmware.get_service_info that retrieves about info about the service --- salt/utils/vmware.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 2caf08488f..6d2ff92e81 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -479,6 +479,28 @@ def is_connection_to_a_vcenter(service_instance): '\'VirtualCenter/HostAgent\''.format(api_type)) +def get_service_info(service_instance): + ''' + Returns information of the vCenter or ESXi host + + service_instance + The Service Instance from which to obtain managed object references. + ''' + try: + return service_instance.content.about + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + def _get_dvs(service_instance, dvs_name): ''' Return a reference to a Distributed Virtual Switch object. From caf9a0578ba038d46ae8904e26c7e69031541af4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 10:08:33 -0400 Subject: [PATCH 523/639] Added tests for utils.vmware.get_service_info --- tests/unit/utils/vmware/test_common.py | 47 ++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/unit/utils/vmware/test_common.py b/tests/unit/utils/vmware/test_common.py index fd50ed12d0..01787db2ba 100644 --- a/tests/unit/utils/vmware/test_common.py +++ b/tests/unit/utils/vmware/test_common.py @@ -900,3 +900,50 @@ class GetRootFolderTestCase(TestCase): def test_return(self): ret = salt.utils.vmware.get_root_folder(self.mock_si) self.assertEqual(ret, self.mock_root_folder) + + +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetServiceInfoTestCase(TestCase): + '''Tests for salt.utils.vmware.get_service_info''' + def setUp(self): + self.mock_about = MagicMock() + self.mock_si = MagicMock(content=MagicMock()) + type(self.mock_si.content).about = \ + PropertyMock(return_value=self.mock_about) + + def tearDown(self): + for attr in ('mock_si', 'mock_about'): + delattr(self, attr) + + def test_about_ret(self): + ret = salt.utils.vmware.get_service_info(self.mock_si) + self.assertEqual(ret, self.mock_about) + + def test_about_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_si.content).about = \ + PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_service_info(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_about_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_si.content).about = \ + PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareApiError) as excinfo: + salt.utils.vmware.get_service_info(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_about_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_si.content).about = \ + PropertyMock(side_effect=exc) + with self.assertRaises(excs.VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_service_info(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') From d9d78fa35cb6122fd6f6ebf4331bef1b91f3574c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 10:57:08 -0400 Subject: [PATCH 524/639] Added missing decorators in utils.vmware tests --- tests/unit/utils/vmware/test_common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/unit/utils/vmware/test_common.py b/tests/unit/utils/vmware/test_common.py index 01787db2ba..5a946e8aa9 100644 --- a/tests/unit/utils/vmware/test_common.py +++ b/tests/unit/utils/vmware/test_common.py @@ -547,6 +547,8 @@ class GetPropertiesOfManagedObjectTestCase(TestCase): 'retrieved', excinfo.exception.strerror) +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') class GetManagedObjectName(TestCase): '''Tests for salt.utils.get_managed_object_name''' @@ -902,6 +904,7 @@ class GetRootFolderTestCase(TestCase): self.assertEqual(ret, self.mock_root_folder) +@skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') class GetServiceInfoTestCase(TestCase): '''Tests for salt.utils.vmware.get_service_info''' From ed7ac43d89365d248b5aa3831073575bd292d5cc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 14:08:44 -0400 Subject: [PATCH 525/639] Added required exceptions --- salt/exceptions.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/exceptions.py b/salt/exceptions.py index 7df94e17bb..6283ddf729 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -393,7 +393,13 @@ class TemplateError(SaltException): # Validation related exceptions class InvalidConfigError(CommandExecutionError): ''' - Used when the input is invalid + Used when the config is invalid + ''' + + +class ArgumentValueError(CommandExecutionError): + ''' + Used when an invalid argument was passed to a command execution ''' From 69607d1f7b056db8db04c415811c1909372ff096 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Sep 2017 12:22:22 -0700 Subject: [PATCH 526/639] Making various options more clear, updating documentation and removing unnecessary imports. --- salt/modules/mount.py | 11 +++++++++-- salt/utils/mount.py | 9 +-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index c5b85dae5f..fbcc2763ce 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -1289,17 +1289,24 @@ def write_mount_cache(real_name, device, mkmnt, fstype, - opts): + mount_opts): ''' .. versionadded:: Oxygen Provide information if the path is mounted + :param real_name: The real name of the mount point where the device is mounted. + :param device: The device that is being mounted. + :param mkmnt: Whether or not the mount point should be created. + :param fstype: The file system that is used. + :param mount_opts: Additional options used when mounting the device. + :return: Boolean if message was sent successfully. + CLI Example: .. code-block:: bash - salt '*' mount.write_mount_cache /mnt/share + salt '*' mount.write_mount_cache /mnt/share /dev/sda1 False ext4 defaults,nosuid ''' cache = salt.utils.mount.read_cache(__opts__) diff --git a/salt/utils/mount.py b/salt/utils/mount.py index ba782c63d5..09dbc8bfc1 100644 --- a/salt/utils/mount.py +++ b/salt/utils/mount.py @@ -10,8 +10,8 @@ import os import yaml # Import Salt libs -import salt.utils # Can be removed once is_true is moved import salt.utils.files +import salt.utils.stringutils import salt.utils.versions from salt.utils.yamldumper import SafeOrderedDumper @@ -19,13 +19,6 @@ from salt.utils.yamldumper import SafeOrderedDumper log = logging.getLogger(__name__) -def __virtual__(): - ''' - Confine this module to Debian based distros - ''' - return True - - def _read_file(path): ''' Reads and returns the contents of a text file From cbe3889dd86c9b7c1a2d69facfc2fee05bc4459d Mon Sep 17 00:00:00 2001 From: spenceation Date: Thu, 14 Sep 2017 15:37:09 -0400 Subject: [PATCH 527/639] - Added get_security_rule function to PANOS execution module. - Added security_rule_exists function to PANOS state module. --- salt/modules/panos.py | 24 ++ salt/states/panos.py | 572 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 543 insertions(+), 53 deletions(-) diff --git a/salt/modules/panos.py b/salt/modules/panos.py index 5543c18132..aecf93fffe 100644 --- a/salt/modules/panos.py +++ b/salt/modules/panos.py @@ -926,6 +926,30 @@ def get_platform(): return __proxy__['panos.call'](query) +def get_security_rule(rulename=None, vsys='1'): + ''' + Get the candidate configuration for the specified rule. + + rulename(str): The name of the security rule. + + vsys(str): The string representation of the VSYS ID. + + CLI Example: + + .. code-block:: bash + + salt '*' panos.get_security_rule rule01 + salt '*' panos.get_security_rule rule01 3 + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/' + 'rulebase/security/rules/entry[@name=\'{1}\']'.format(vsys, rulename)} + + return __proxy__['panos.call'](query) + + def get_session_info(): ''' Show device session statistics. diff --git a/salt/states/panos.py b/salt/states/panos.py index c453cace5c..f941ff157a 100644 --- a/salt/states/panos.py +++ b/salt/states/panos.py @@ -94,6 +94,24 @@ def __virtual__(): return 'panos.commit' in __salt__ +def _build_members(members, anycheck=False): + ''' + Builds a member formatted string for XML operation. + + ''' + if isinstance(members, list): + + # This check will strip down members to a single any statement + if anycheck and 'any' in members: + return "any" + response = "" + for m in members: + response += "{0}".format(m) + return response + else: + return "{0}".format(members) + + def _default_ret(name): ''' Set the default response values. @@ -109,6 +127,135 @@ def _default_ret(name): return ret +def _edit_config(xpath, element): + ''' + Sends an edit request to the device. + + ''' + query = {'type': 'config', + 'action': 'edit', + 'xpath': xpath, + 'element': element} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _get_config(xpath): + ''' + Retrieves an xpath from the device. + + ''' + query = {'type': 'config', + 'action': 'get', + 'xpath': xpath} + + response = __proxy__['panos.call'](query) + + return response + + +def _move_after(xpath, target): + ''' + Moves an xpath to the after of its section. + + ''' + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'after', + 'dst': target} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _move_before(xpath, target): + ''' + Moves an xpath to the bottom of its section. + + ''' + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'before', + 'dst': target} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _move_bottom(xpath): + ''' + Moves an xpath to the bottom of its section. + + ''' + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'bottom'} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _move_top(xpath): + ''' + Moves an xpath to the top of its section. + + ''' + query = {'type': 'config', + 'action': 'move', + 'xpath': xpath, + 'where': 'top'} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _set_config(xpath, element): + ''' + Sends a set request to the device. + + ''' + query = {'type': 'config', + 'action': 'set', + 'xpath': xpath, + 'element': element} + + response = __proxy__['panos.call'](query) + + return _validate_response(response) + + +def _validate_response(response): + ''' + Validates a response from a Palo Alto device. Used to verify success of commands. + + ''' + if not response: + return False, "Error during move configuration. Verify connectivity to device." + elif 'msg' in response: + if response['msg'] == 'command succeeded': + return True, response['msg'] + else: + return False, response['msg'] + elif 'line' in response: + if response['line'] == 'already at the top': + return True, response['line'] + elif response['line'] == 'already at the bottom': + return True, response['line'] + else: + return False, response['line'] + else: + return False, "Error during move configuration. Verify connectivity to device." + + def add_config_lock(name): ''' Prevent other users from changing configuration until the lock is released. @@ -186,7 +333,7 @@ def clone_config(name, xpath=None, newname=None, commit=False): return ret -def commit(name): +def commit_config(name): ''' Commits the candidate configuration to the running configuration. @@ -197,7 +344,7 @@ def commit(name): .. code-block:: yaml panos/commit: - panos.commit + panos.commit_config ''' ret = _default_ret(name) @@ -338,6 +485,8 @@ def edit_config(name, xpath=None, value=None, commit=False): You can replace an existing object hierarchy at a specified location in the configuration with a new value. Use the xpath parameter to specify the location of the object, including the node to be replaced. + This is the recommended state to enforce configurations on a xpath. + name: The name of the module function to execute. xpath(str): The XPATH of the configuration API tree to control. @@ -359,24 +508,17 @@ def edit_config(name, xpath=None, value=None, commit=False): ''' ret = _default_ret(name) - if not xpath: - return ret - - if not value: - return ret - - query = {'type': 'config', - 'action': 'edit', - 'xpath': xpath, - 'element': value} - - response = __proxy__['panos.call'](query) + result, msg = _edit_config(xpath, value) ret.update({ - 'changes': response, - 'result': True + 'comment': msg, + 'result': result }) + # Ensure we do not commit after a failed action + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), @@ -404,7 +546,8 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False): dst(str): Optional. Specifies the destination to utilize for a move action. This is ignored for the top or bottom action. - commit(bool): If true the firewall will commit the changes, if false do not commit changes. + commit(bool): If true the firewall will commit the changes, if false do not commit changes. If the operation is + not successful, it will not commit. SLS Example: @@ -433,35 +576,21 @@ def move_config(name, xpath=None, where=None, dst=None, commit=False): return ret if where == 'after': - query = {'type': 'config', - 'action': 'move', - 'xpath': xpath, - 'where': 'after', - 'dst': dst} + result, msg = _move_after(xpath, dst) elif where == 'before': - query = {'type': 'config', - 'action': 'move', - 'xpath': xpath, - 'where': 'before', - 'dst': dst} + result, msg = _move_before(xpath, dst) elif where == 'top': - query = {'type': 'config', - 'action': 'move', - 'xpath': xpath, - 'where': 'top'} + result, msg = _move_top(xpath) elif where == 'bottom': - query = {'type': 'config', - 'action': 'move', - 'xpath': xpath, - 'where': 'bottom'} - - response = __proxy__['panos.call'](query) + result, msg = _move_bottom(xpath) ret.update({ - 'changes': response, - 'result': True + 'result': result }) + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), @@ -547,6 +676,350 @@ def rename_config(name, xpath=None, newname=None, commit=False): return ret +def security_rule_exists(name, + rulename=None, + vsys='1', + action=None, + disabled=None, + sourcezone=None, + destinationzone=None, + source=None, + destination=None, + application=None, + service=None, + description=None, + logsetting=None, + logstart=None, + logend=None, + negatesource=None, + negatedestination=None, + profilegroup=None, + datafilter=None, + fileblock=None, + spyware=None, + urlfilter=None, + virus=None, + vulnerability=None, + wildfire=None, + move=None, + movetarget=None, + commit=False): + ''' + Ensures that a security rule exists on the device. Also, ensure that all configurations are set appropriately. + + This method will create the rule if it does not exist. If the rule does exist, it will ensure that the + configurations are set appropriately. + + If the rule does not exist and is created, any value that is not provided will be provided as the default. + The action, to, from, source, destination, application, and service fields are mandatory and must be provided. + + This will enforce the exact match of the rule. For example, if the rule is currently configured with the log-end + option, but this option is not specified in the state method, it will be removed and reset to the system default. + + It is strongly recommended to specify all options to ensure proper operation. + + When defining the profile group settings, the device can only support either a profile group or individual settings. + If both are specified, the profile group will be preferred and the individual settings are ignored. If neither are + specified, the value will be set to system default of none. + + name: The name of the module function to execute. + + rulename(str): The name of the security rule. The name is case-sensitive and can have up to 31 characters, which + can be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama, + unique within its device group and any ancestor or descendant device groups. + + vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1. + + action(str): The action that the security rule will enforce. Valid options are: allow, deny, drop, reset-client, + reset-server, reset-both. + + disabled(bool): Controls if the rule is disabled. Set 'True' to disable and 'False' to enable. + + sourcezone(str, list): The source zone(s). The value 'any' will match all zones. + + destinationzone(str, list): The destination zone(s). The value 'any' will match all zones. + + source(str, list): The source address(es). The value 'any' will match all addresses. + + destination(str, list): The destination address(es). The value 'any' will match all addresses. + + application(str, list): The application(s) matched. The value 'any' will match all applications. + + service(str, list): The service(s) matched. The value 'any' will match all services. The value + 'application-default' will match based upon the application defined ports. + + description(str): A description for the policy (up to 255 characters). + + logsetting(str): The name of a valid log forwarding profile. + + logstart(bool): Generates a traffic log entry for the start of a session (disabled by default). + + logend(bool): Generates a traffic log entry for the end of a session (enabled by default). + + negatesource(bool): Match all but the specified source addresses. + + negatedestination(bool): Match all but the specified destination addresses. + + profilegroup(str): A valid profile group name. + + datafilter(str): A valid data filter profile name. Ignored with the profilegroup option set. + + fileblock(str): A valid file blocking profile name. Ignored with the profilegroup option set. + + spyware(str): A valid spyware profile name. Ignored with the profilegroup option set. + + urlfilter(str): A valid URL filtering profile name. Ignored with the profilegroup option set. + + virus(str): A valid virus profile name. Ignored with the profilegroup option set. + + vulnerability(str): A valid vulnerability profile name. Ignored with the profilegroup option set. + + wildfire(str): A valid vulnerability profile name. Ignored with the profilegroup option set. + + move(str): An optional argument that ensure the rule is moved to a specific location. Valid options are 'top', + 'bottom', 'before', or 'after'. The 'before' and 'after' options require the use of the 'movetarget' argument + to define the location of the move request. + + movetarget(str): An optional argument that defines the target of the move operation if the move argument is + set to 'before' or 'after'. + + commit(bool): If true the firewall will commit the changes, if false do not commit changes. + + SLS Example: + + .. code-block:: yaml + + panos/rulebase/security/rule01: + panos.security_rule_exists: + - rulename: rule01 + - vsys: 1 + - action: allow + - disabled: False + - sourcezone: untrust + - destinationzone: trust + - source: + - 10.10.10.0/24 + - 1.1.1.1 + - destination: + - 2.2.2.2-2.2.2.4 + - application: + - any + - service: + - tcp-25 + - description: My test security rule + - logsetting: logprofile + - logstart: False + - logend: True + - negatesource: False + - negatedestination: False + - profilegroup: myprofilegroup + - move: top + - commit: False + + panos/rulebase/security/rule01: + panos.security_rule_exists: + - rulename: rule01 + - vsys: 1 + - action: allow + - disabled: False + - sourcezone: untrust + - destinationzone: trust + - source: + - 10.10.10.0/24 + - 1.1.1.1 + - destination: + - 2.2.2.2-2.2.2.4 + - application: + - any + - service: + - tcp-25 + - description: My test security rule + - logsetting: logprofile + - logstart: False + - logend: False + - datafilter: foobar + - fileblock: foobar + - spyware: foobar + - urlfilter: foobar + - virus: foobar + - vulnerability: foobar + - wildfire: foobar + - move: after + - movetarget: rule02 + - commit: False + ''' + ret = _default_ret(name) + + if not rulename: + return ret + + # Check if rule currently exists + rule = __salt__['panos.get_security_rule'](rulename, vsys) + + # Build the rule element + element = "" + if sourcezone: + element += "{0}".format(_build_members(sourcezone, True)) + else: + ret.update({'comment': "The sourcezone field must be provided."}) + return ret + + if destinationzone: + element += "{0}".format(_build_members(destinationzone, True)) + else: + ret.update({'comment': "The destinationzone field must be provided."}) + return ret + + if source: + element += "{0}".format(_build_members(source, True)) + else: + ret.update({'comment': "The source field must be provided."}) + return + + if destination: + element += "{0}".format(_build_members(destination, True)) + else: + ret.update({'comment': "The destination field must be provided."}) + return ret + + if application: + element += "{0}".format(_build_members(application, True)) + else: + ret.update({'comment': "The application field must be provided."}) + return ret + + if service: + element += "{0}".format(_build_members(service, True)) + else: + ret.update({'comment': "The service field must be provided."}) + return ret + + if action: + element += "{0}".format(action) + else: + ret.update({'comment': "The action field must be provided."}) + return ret + + if disabled is not None: + if disabled: + element += "yes" + else: + element += "no" + + if description: + element += "{0}".format(description) + + if logsetting: + element += "{0}".format(logsetting) + + if logstart is not None: + if logstart: + element += "yes" + else: + element += "no" + + if logend is not None: + if logend: + element += "yes" + else: + element += "no" + + if negatesource is not None: + if negatesource: + element += "yes" + else: + element += "no" + + if negatedestination is not None: + if negatedestination: + element += "yes" + else: + element += "no" + + # Build the profile settings + profile_string = None + if profilegroup: + profile_string = "{0}".format(profilegroup) + else: + member_string = "" + if datafilter: + member_string += "{0}".format(datafilter) + if fileblock: + member_string += "{0}".format(fileblock) + if spyware: + member_string += "{0}".format(spyware) + if urlfilter: + member_string += "{0}".format(urlfilter) + if virus: + member_string += "{0}".format(virus) + if vulnerability: + member_string += "{0}".format(vulnerability) + if wildfire: + member_string += "{0}".format(wildfire) + if member_string != "": + profile_string = "{0}".format(member_string) + + if profile_string: + element += "{0}".format(profile_string) + + full_element = "{1}".format(rulename, element) + + create_rule = False + + if 'result' in rule: + if rule['result'] == "None": + create_rule = True + + if create_rule: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ + "security/rules".format(vsys) + + result, msg = _set_config(xpath, full_element) + if not result: + ret['changes']['set'] = msg + return ret + else: + xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ + "security/rules/entry[@name=\'{1}\']".format(vsys, rulename) + + result, msg = _edit_config(xpath, full_element) + if not result: + ret['changes']['edit'] = msg + return ret + + if move: + movepath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \ + "security/rules/entry[@name=\'{1}\']".format(vsys, rulename) + move_result = False + move_msg = '' + if move == "before" and movetarget: + move_result, move_msg = _move_before(movepath, movetarget) + elif move == "after": + move_result, move_msg = _move_after(movepath, movetarget) + elif move == "top": + move_result, move_msg = _move_top(movepath) + elif move == "bottom": + move_result, move_msg = _move_bottom(movepath) + + if not move_result: + ret['changes']['move'] = move_msg + return ret + + if commit is True: + ret.update({ + 'commit': __salt__['panos.commit'](), + 'comment': 'Security rule verified successfully.', + 'result': True + }) + else: + ret.update({ + 'comment': 'Security rule verified successfully.', + 'result': True + }) + + return ret + + def set_config(name, xpath=None, value=None, commit=False): ''' Sets a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not @@ -576,24 +1049,17 @@ def set_config(name, xpath=None, value=None, commit=False): ''' ret = _default_ret(name) - if not xpath: - return ret - - if not value: - return ret - - query = {'type': 'config', - 'action': 'set', - 'xpath': xpath, - 'element': value} - - response = __proxy__['panos.call'](query) + result, msg = _set_config(xpath, value) ret.update({ - 'changes': response, - 'result': True + 'comment': msg, + 'result': result }) + # Ensure we do not commit after a failed action + if not result: + return ret + if commit is True: ret.update({ 'commit': __salt__['panos.commit'](), From e8e7b6cacd2d7f2b4b224863c9578349adb3b36a Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Sep 2017 12:38:38 -0700 Subject: [PATCH 528/639] Better approach, less duplicated code. --- salt/modules/mount.py | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/salt/modules/mount.py b/salt/modules/mount.py index fbcc2763ce..9463283a4c 100644 --- a/salt/modules/mount.py +++ b/salt/modules/mount.py @@ -1310,25 +1310,17 @@ def write_mount_cache(real_name, ''' cache = salt.utils.mount.read_cache(__opts__) - if cache: - if 'mounts' in cache: - cache['mounts'][real_name] = {'device': device, - 'fstype': fstype, - 'mkmnt': mkmnt, - 'opts': opts} - else: - cache['mounts'] = {} - cache['mounts'][real_name] = {'device': device, - 'fstype': fstype, - 'mkmnt': mkmnt, - 'opts': opts} - else: + if not cache: cache = {} cache['mounts'] = {} - cache['mounts'][real_name] = {'device': device, - 'fstype': fstype, - 'mkmnt': mkmnt, - 'opts': opts} + else: + if 'mounts' not in cache: + cache['mounts'] = {} + + cache['mounts'][real_name] = {'device': device, + 'fstype': fstype, + 'mkmnt': mkmnt, + 'opts': mount_opts} cache_write = salt.utils.mount.write_cache(cache, __opts__) if cache_write: From 4421744882e0bbe3b06f583276b2fd963bfd6340 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 16:22:31 -0400 Subject: [PATCH 529/639] pylint on own code --- salt/ext/vsan/__init__.py | 4 ++-- salt/modules/vsphere.py | 4 ++-- salt/utils/vsan.py | 9 +++------ tests/unit/utils/test_vsan.py | 6 +++--- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/salt/ext/vsan/__init__.py b/salt/ext/vsan/__init__.py index 84c0a7eb58..cf17f9d75e 100644 --- a/salt/ext/vsan/__init__.py +++ b/salt/ext/vsan/__init__.py @@ -1,7 +1,7 @@ # coding: utf-8 -*- ''' This directory contains the object model and utils for the vsan VMware SDK -extension. +extension. -They are governed under their respective licenses. +They are governed under their respective licenses. ''' diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 99289326ef..c0745a4a59 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3721,7 +3721,7 @@ def _get_cluster_dict(cluster_name, cluster_ref): # XXX The correct way of retrieving the VSAN data (on the if branch) # is not supported before 60u2 vcenter vcenter_info = salt.utils.vmware.get_service_info(si) - if int(vcenter_info.build) >= 3634794: # 60u2 + if int(vcenter_info.build) >= 3634794: # 60u2 # VSAN API is fully supported by the VC starting with 60u2 vsan_conf = salt.utils.vsan.get_cluster_vsan_info(cluster_ref) log.trace('vsan_conf = {0}'.format(vsan_conf)) @@ -3735,7 +3735,7 @@ def _get_cluster_dict(cluster_name, cluster_ref): 'compression_enabled': data_eff.compressionEnabled or False, 'dedup_enabled': data_eff.dedupEnabled}) - else: # before 60u2 (no advanced vsan info) + else: # before 60u2 (no advanced vsan info) if props['configurationEx'].vsanConfigInfo: default_config = \ props['configurationEx'].vsanConfigInfo.defaultConfig diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 5756f7f464..1b9f796efe 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -45,10 +45,7 @@ was developed against. # Import Python Libs from __future__ import absolute_import import sys -import atexit import logging -import time -import re import ssl # Import Salt Libs @@ -56,14 +53,14 @@ from salt.exceptions import VMwareApiError, VMwareRuntimeError import salt.utils.vmware try: - from pyVmomi import VmomiSupport, SoapStubAdapter, vim, vmodl + from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False try: - from salt.ext.vsan import vsanmgmtObjects, vsanapiutils + from salt.ext.vsan import vsanapiutils HAS_PYVSAN = True except ImportError: HAS_PYVSAN = False @@ -122,7 +119,7 @@ def get_vsan_cluster_config_system(service_instance): #connection handshaking rule. We may need turn of the hostname checking #and client side cert verification context = None - if sys.version_info[:3] > (2,7,8): + if sys.version_info[:3] > (2, 7, 8): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index ccd6e471e6..197ba517de 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -12,7 +12,7 @@ import logging # Import Salt testing libraries from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ PropertyMock # Import Salt libraries @@ -102,7 +102,7 @@ class GetVsanClusterConfigSystemTestCase(TestCase, LoaderModuleMockMixin): patcher.start() self.addCleanup(patcher.stop) - type(vsan.sys).version_info = PropertyMock(return_value=(2,7,9)) + type(vsan.sys).version_info = PropertyMock(return_value=(2, 7, 9)) self.mock_context = MagicMock() self.mock_create_default_context = \ MagicMock(return_value=self.mock_context) @@ -120,7 +120,7 @@ class GetVsanClusterConfigSystemTestCase(TestCase, LoaderModuleMockMixin): self.assertEqual(self.mock_context.verify_mode, vsan.ssl.CERT_NONE) def test_ssl_default_context_not_loaded(self): - type(vsan.sys).version_info = PropertyMock(return_value=(2,7,8)) + type(vsan.sys).version_info = PropertyMock(return_value=(2, 7, 8)) vsan.get_vsan_cluster_config_system(self.mock_si) self.assertEqual(self.mock_create_default_context.call_count, 0) From 3bd158f2ed3b6a00b89692d90f02b29b8ad144d1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 16:23:14 -0400 Subject: [PATCH 530/639] Removed vsanmgmtObjects ext vsan library --- salt/ext/vsan/vsanmgmtObjects.py | 142 ------------------------------- 1 file changed, 142 deletions(-) delete mode 100644 salt/ext/vsan/vsanmgmtObjects.py diff --git a/salt/ext/vsan/vsanmgmtObjects.py b/salt/ext/vsan/vsanmgmtObjects.py deleted file mode 100644 index ebad265adb..0000000000 --- a/salt/ext/vsan/vsanmgmtObjects.py +++ /dev/null @@ -1,142 +0,0 @@ -from pyVmomi.VmomiSupport import CreateDataType, CreateManagedType, CreateEnumType, AddVersion, AddVersionParent, F_LINK, F_LINKABLE, F_OPTIONAL - -CreateManagedType('vim.cluster.VsanPerformanceManager', 'VsanPerformanceManager', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setStatsObjectPolicy', 'VsanPerfSetStatsObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('deleteStatsObject', 'VsanPerfDeleteStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('createStatsObjectTask', 'VsanPerfCreateStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('deleteStatsObjectTask', 'VsanPerfDeleteStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterHealth', 'VsanPerfQueryClusterHealth', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vmodl.DynamicData[]', 'vmodl.DynamicData[]'), 'System.Read', None), ('queryStatsObjectInformation', 'VsanPerfQueryStatsObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanObjectInformation', 'vim.cluster.VsanObjectInformation'), 'System.Read', None), ('queryNodeInformation', 'VsanPerfQueryNodeInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfNodeInformation[]', 'vim.cluster.VsanPerfNodeInformation[]'), 'System.Read', None), ('queryVsanPerf', 'VsanPerfQueryPerf', 'vim.version.version9', (('querySpecs', 'vim.cluster.VsanPerfQuerySpec[]', 'vim.version.version9', 0, None), ('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanPerfEntityMetricCSV[]', 'vim.cluster.VsanPerfEntityMetricCSV[]'), 'System.Read', None), ('getSupportedEntityTypes', 'VsanPerfGetSupportedEntityTypes', 'vim.version.version9', tuple(), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfEntityType[]', 'vim.cluster.VsanPerfEntityType[]'), 'System.Read', None), ('createStatsObject', 'VsanPerfCreateStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanVcDiskManagementSystem', 'VimClusterVsanVcDiskManagementSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('initializeDiskMappings', 'InitializeDiskMappings', 'vim.version.version10', (('spec', 'vim.vsan.host.DiskMappingCreationSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('retrieveAllFlashCapabilities', 'RetrieveAllFlashCapabilities', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.VsanHostCapability[]', 'vim.vsan.host.VsanHostCapability[]'), 'System.Read', None), ('queryDiskMappings', 'QueryDiskMappings', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.DiskMapInfoEx[]', 'vim.vsan.host.DiskMapInfoEx[]'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanObjectSystem', 'VsanObjectSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setVsanObjectPolicy', 'VosSetVsanObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectUuid', 'string', 'vim.version.version9', 0, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryObjectIdentities', 'VsanQueryObjectIdentities', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeHealth', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjIdentity', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeSpaceSummary', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanObjectIdentityAndHealth', 'vim.cluster.VsanObjectIdentityAndHealth'), 'System.Read', None), ('queryVsanObjectInformation', 'VosQueryVsanObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectQuerySpecs', 'vim.cluster.VsanObjectQuerySpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanObjectInformation[]', 'vim.cluster.VsanObjectInformation[]'), 'System.Read', None), ]) -CreateManagedType('vim.host.VsanStretchedClusterSystem', 'VimHostVsanStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getStretchedClusterInfoFromCmmds', 'VSANHostGetStretchedClusterInfoFromCmmds', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANStretchedClusterHostInfo[]', 'vim.host.VSANStretchedClusterHostInfo[]'), 'System.Read', None), ('witnessJoinVsanCluster', 'VSANWitnessJoinVsanCluster', 'vim.version.version10', (('clusterUuid', 'string', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('disableVsanAllowed', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('witnessSetPreferredFaultDomain', 'VSANWitnessSetPreferredFaultDomain', 'vim.version.version10', (('preferredFd', 'string', 'vim.version.version10', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('addUnicastAgent', 'VSANHostAddUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('witnessPort', 'int', 'vim.version.version10', 0 | F_OPTIONAL, None), ('overwrite', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('clusterGetPreferredFaultDomain', 'VSANClusterGetPreferredFaultDomain', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo'), 'System.Read', None), ('witnessLeaveVsanCluster', 'VSANWitnessLeaveVsanCluster', 'vim.version.version10', tuple(), (0, 'void', 'void'), 'System.Read', None), ('getStretchedClusterCapability', 'VSANHostGetStretchedClusterCapability', 'vim.version.version10', tuple(), (0, 'vim.host.VSANStretchedClusterHostCapability', 'vim.host.VSANStretchedClusterHostCapability'), 'System.Read', None), ('removeUnicastAgent', 'VSANHostRemoveUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('ignoreExistence', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('listUnicastAgent', 'VSANHostListUnicastAgent', 'vim.version.version10', tuple(), (0, 'string', 'string'), 'System.Read', None), ]) -CreateManagedType('vim.VsanUpgradeSystemEx', 'VsanUpgradeSystemEx', 'vmodl.ManagedObject', 'vim.version.version10', [], [('performUpgrade', 'PerformVsanUpgradeEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('performObjectUpgrade', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('excludeHosts', 'vim.HostSystem[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('performUpgradePreflightCheck', 'PerformVsanUpgradePreflightCheckEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanDiskFormatConversionCheckResult', 'vim.cluster.VsanDiskFormatConversionCheckResult'), 'System.Read', None), ('retrieveSupportedFormatVersion', 'RetrieveSupportedVsanFormatVersion', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanCapabilitySystem', 'VsanCapabilitySystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getCapabilities', 'VsanGetCapabilities', 'vim.version.version10', (('targets', 'vmodl.ManagedObject[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanCapability[]', 'vim.cluster.VsanCapability[]'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanSpaceReportSystem', 'VsanSpaceReportSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('querySpaceUsage', 'VsanQuerySpaceUsage', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanSpaceUsage', 'vim.cluster.VsanSpaceUsage'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanVcClusterConfigSystem', 'VsanVcClusterConfigSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getConfigInfoEx', 'VsanClusterGetConfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'vim.vsan.ConfigInfoEx', 'vim.vsan.ConfigInfoEx'), 'System.Read', None), ('reconfigureEx', 'VsanClusterReconfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('vsanReconfigSpec', 'vim.vsan.ReconfigSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) -CreateManagedType('vim.host.VsanHealthSystem', 'HostVsanHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryAdvCfg', 'VsanHostQueryAdvCfg', 'vim.version.version9', (('options', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.option.OptionValue[]', 'vim.option.OptionValue[]'), 'System.Read', None), ('queryPhysicalDiskHealthSummary', 'VsanHostQueryPhysicalDiskHealthSummary', 'vim.version.version9', tuple(), (0, 'vim.host.VsanPhysicalDiskHealthSummary', 'vim.host.VsanPhysicalDiskHealthSummary'), 'System.Read', None), ('startProactiveRebalance', 'VsanStartProactiveRebalance', 'vim.version.version9', (('timeSpan', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL, None), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryHostInfoByUuids', 'VsanHostQueryHostInfoByUuids', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanQueryResultHostInfo[]', 'vim.host.VsanQueryResultHostInfo[]'), 'System.Read', None), ('queryVersion', 'VsanHostQueryHealthSystemVersion', 'vim.version.version9', tuple(), (0, 'string', 'string'), 'System.Read', None), ('queryVerifyNetworkSettings', 'VsanHostQueryVerifyNetworkSettings', 'vim.version.version9', (('peers', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkHealthResult', 'vim.host.VsanNetworkHealthResult'), 'System.Read', None), ('queryRunIperfClient', 'VsanHostQueryRunIperfClient', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('runVmdkLoadTest', 'VsanHostRunVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanVmdkLoadTestResult[]', 'vim.host.VsanVmdkLoadTestResult[]'), 'System.Read', None), ('queryObjectHealthSummary', 'VsanHostQueryObjectHealthSummary', 'vim.version.version9', (('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('localHostOnly', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanObjectOverallHealth', 'vim.host.VsanObjectOverallHealth'), 'System.Read', None), ('getHclInfo', 'VsanGetHclInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanHostHclInfo', 'vim.host.VsanHostHclInfo'), 'System.Read', None), ('cleanupVmdkLoadTest', 'VsanHostCleanupVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ('waitForVsanHealthGenerationIdChange', 'VsanWaitForVsanHealthGenerationIdChange', 'vim.version.version9', (('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('stopProactiveRebalance', 'VsanStopProactiveRebalance', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairImmediateObjects', 'VsanHostRepairImmediateObjects', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('repairType', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanRepairObjectsResult', 'vim.host.VsanRepairObjectsResult'), 'System.Read', None), ('prepareVmdkLoadTest', 'VsanHostPrepareVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'string', 'string'), 'System.Read', None), ('queryRunIperfServer', 'VsanHostQueryRunIperfServer', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('queryCheckLimits', 'VsanHostQueryCheckLimits', 'vim.version.version9', tuple(), (0, 'vim.host.VsanLimitHealthResult', 'vim.host.VsanLimitHealthResult'), 'System.Read', None), ('getProactiveRebalanceInfo', 'VsanGetProactiveRebalanceInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanProactiveRebalanceInfoEx', 'vim.host.VsanProactiveRebalanceInfoEx'), 'System.Read', None), ('checkClomdLiveness', 'VsanHostClomdLiveness', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanVcClusterHealthSystem', 'VsanVcClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryClusterCreateVmHealthHistoryTest', 'VsanQueryVcClusterCreateVmHealthHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterCreateVmHealthTestResult[]', 'vim.cluster.VsanClusterCreateVmHealthTestResult[]'), 'System.Read', None), ('setLogLevel', 'VsanHealthSetLogLevel', 'vim.version.version9', (('level', 'vim.cluster.VsanHealthLogLevelEnum', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('testVsanClusterTelemetryProxy', 'VsanHealthTestVsanClusterTelemetryProxy', 'vim.version.version9', (('proxyConfig', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('uploadHclDb', 'VsanVcUploadHclDb', 'vim.version.version9', (('db', 'string', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('updateHclDbFromWeb', 'VsanVcUpdateHclDbFromWeb', 'vim.version.version9', (('url', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairClusterObjectsImmediate', 'VsanHealthRepairClusterObjectsImmediate', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryVcClusterNetworkPerfTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryClusterVmdkLoadHistoryTest', 'VsanQueryVcClusterVmdkLoadHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterVmdkLoadTestResult[]', 'vim.cluster.VsanClusterVmdkLoadTestResult[]'), 'System.Read', None), ('queryVsanClusterHealthCheckInterval', 'VsanHealthQueryVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryVcClusterCreateVmHealthTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanVcClusterGetHclInfo', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('includeHostsResult', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryAttachToSrHistory', 'VsanQueryAttachToSrHistory', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanAttachToSrOperation[]', 'vim.cluster.VsanAttachToSrOperation[]'), 'System.Read', None), ('rebalanceCluster', 'VsanRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('runVmdkLoadTest', 'VsanVcClusterRunVmdkLoadTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('action', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('sendVsanTelemetry', 'VsanHealthSendVsanTelemetry', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('queryClusterNetworkPerfHistoryTest', 'VsanQueryVcClusterNetworkPerfHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterNetworkLoadTestResult[]', 'vim.cluster.VsanClusterNetworkLoadTestResult[]'), 'System.Read', None), ('queryClusterHealthSummary', 'VsanQueryVcClusterHealthSummary', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vmCreateTimeout', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fields', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fetchFromCache', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSummary', 'vim.cluster.VsanClusterHealthSummary'), 'System.Read', None), ('stopRebalanceCluster', 'VsanStopRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryVsanClusterHealthConfig', 'VsanHealthQueryVsanClusterHealthConfig', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthConfigs', 'vim.cluster.VsanClusterHealthConfigs'), 'System.Read', None), ('attachVsanSupportBundleToSr', 'VsanAttachVsanSupportBundleToSr', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('srNumber', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterVmdkWorkloadTypes', 'VsanQueryVcClusterVmdkWorkloadTypes', 'vim.version.version9', tuple(), (0, 'vim.cluster.VsanStorageWorkloadType[]', 'vim.cluster.VsanStorageWorkloadType[]'), 'System.Read', None), ('queryVerifyClusterHealthSystemVersions', 'VsanVcClusterQueryVerifyHealthSystemVersions', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('isRebalanceRunning', 'VsanHealthIsRebalanceRunning', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setVsanClusterHealthCheckInterval', 'VsanHealthSetVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vsanClusterHealthCheckInterval', 'int', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanVcStretchedClusterSystem', 'VimClusterVsanVcStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('isWitnessHost', 'VSANVcIsWitnessHost', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setPreferredFaultDomain', 'VSANVcSetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('getPreferredFaultDomain', 'VSANVcGetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANPreferredFaultDomainInfo', 'vim.cluster.VSANPreferredFaultDomainInfo'), 'System.Read', None), ('getWitnessHosts', 'VSANVcGetWitnessHosts', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANWitnessHostInfo[]', 'vim.cluster.VSANWitnessHostInfo[]'), 'System.Read', None), ('retrieveStretchedClusterVcCapability', 'VSANVcRetrieveStretchedClusterVcCapability', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('verifyAllConnected', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANStretchedClusterCapability[]', 'vim.cluster.VSANStretchedClusterCapability[]'), 'System.Read', None), ('convertToStretchedCluster', 'VSANVcConvertToStretchedCluster', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('faultDomainConfig', 'vim.cluster.VSANStretchedClusterFaultDomainConfig', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('removeWitnessHost', 'VSANVcRemoveWitnessHost', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ('witnessAddress', 'string', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) -CreateManagedType('vim.cluster.VsanClusterHealthSystem', 'VsanClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryPhysicalDiskHealthSummary', 'VsanQueryClusterPhysicalDiskHealthSummary', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.host.VsanPhysicalDiskHealthSummary[]'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryClusterNetworkPerfTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryAdvCfgSync', 'VsanQueryClusterAdvCfgSync', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.cluster.VsanClusterAdvCfgSyncResult[]'), 'System.Read', None), ('repairClusterImmediateObjects', 'VsanRepairClusterImmediateObjects', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult'), 'System.Read', None), ('queryVerifyClusterNetworkSettings', 'VsanQueryVerifyClusterNetworkSettings', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.cluster.VsanClusterNetworkHealthResult'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryClusterCreateVmHealthTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('queryClusterHealthSystemVersions', 'VsanQueryClusterHealthSystemVersions', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanClusterGetHclInfo', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryCheckLimits', 'VsanQueryClusterCheckLimits', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterLimitHealthResult', 'vim.cluster.VsanClusterLimitHealthResult'), 'System.Read', None), ('queryCaptureVsanPcap', 'VsanQueryClusterCaptureVsanPcap', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('duration', 'int', 'vim.version.version9', 0, None), ('vmknic', 'vim.cluster.VsanClusterHostVmknicMapping[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeRawPcap', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeIgmp', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsMsgTypeFilter', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsPorts', 'int[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('clusterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanVsanClusterPcapResult', 'vim.cluster.VsanVsanClusterPcapResult'), 'System.Read', None), ('checkClusterClomdLiveness', 'VsanCheckClusterClomdLiveness', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.cluster.VsanClusterClomdLivenessResult'), 'System.Read', None), ]) -CreateDataType('vim.host.VSANCmmdsNodeInfo', 'VimHostVSANCmmdsNodeInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('isWitness', 'boolean', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanPhysicalDiskHealth', 'VsanPhysicalDiskHealth', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('uuid', 'string', 'vim.version.version9', 0), ('inCmmds', 'boolean', 'vim.version.version9', 0), ('inVsi', 'boolean', 'vim.version.version9', 0), ('dedupScope', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('formatVersion', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('isAllFlash', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionValue', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionArea', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metadataHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealthDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupUsageHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('capacityHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('summaryHealth', 'string', 'vim.version.version9', 0), ('capacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('freeBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('hashedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('scsiDisk', 'vim.host.ScsiDisk', 'vim.version.version9', 0 | F_OPTIONAL), ('usedComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('compLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.DataEfficiencyConfig', 'VsanDataEfficiencyConfig', 'vmodl.DynamicData', 'vim.version.version10', [('dedupEnabled', 'boolean', 'vim.version.version10', 0), ('compressionEnabled', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.StorageComplianceResult', 'VsanStorageComplianceResult', 'vmodl.DynamicData', 'vim.version.version9', [('checkTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('profile', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('objectUUID', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('complianceStatus', 'vim.cluster.StorageComplianceStatus', 'vim.version.version9', 0), ('mismatch', 'boolean', 'vim.version.version9', 0), ('violatedPolicies', 'vim.cluster.StoragePolicyStatus[]', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalStatus', 'vim.cluster.StorageOperationalStatus', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthGroup', 'VsanClusterHealthGroup', 'vmodl.DynamicData', 'vim.version.version9', [('groupId', 'string', 'vim.version.version9', 0), ('groupName', 'string', 'vim.version.version9', 0), ('groupHealth', 'string', 'vim.version.version9', 0), ('groupTests', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groupDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanSpaceUsageDetailResult', 'VsanSpaceUsageDetailResult', 'vmodl.DynamicData', 'vim.version.version9', [('spaceUsageByObjectType', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanAttachToSrOperation', 'VsanAttachToSrOperation', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('success', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('srNumber', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanObjectSpaceSummary', 'VsanObjectSpaceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('objType', 'vim.cluster.VsanObjectTypeEnum', 'vim.version.version9', 0 | F_OPTIONAL), ('overheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('temporaryOverheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('primaryCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('provisionCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('overReservedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalUsedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHclInfo', 'VsanClusterHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hclDbLastUpdate', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('hclDbAgeHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostHclInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfGraph', 'VsanPerfGraph', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0), ('metrics', 'vim.cluster.VsanPerfMetricId[]', 'vim.version.version9', 0), ('unit', 'vim.cluster.VsanPerfStatsUnitType', 'vim.version.version9', 0), ('threshold', 'vim.cluster.VsanPerfThreshold', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthResultBase', 'VsanClusterHealthResultBase', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfTopEntity', 'VsanPerfTopEntity', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterBalancePerDiskInfo', 'VsanClusterBalancePerDiskInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('fullness', 'long', 'vim.version.version9', 0), ('variance', 'long', 'vim.version.version9', 0), ('fullnessAboveThreshold', 'long', 'vim.version.version9', 0), ('dataToMoveB', 'long', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterHealthTest', 'VsanClusterHealthTest', 'vmodl.DynamicData', 'vim.version.version9', [('testId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testShortDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL), ('testActions', 'vim.cluster.VsanClusterHealthAction[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.StoragePolicyStatus', 'VsanStoragePolicyStatus', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('expectedValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('currentValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfMemberInfo', 'VsanPerfMemberInfo', 'vmodl.DynamicData', 'vim.version.version9', [('thumbprint', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanPerfMetricId', 'VsanPerfMetricId', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rollupType', 'vim.cluster.VsanPerfSummaryType', 'vim.version.version9', 0 | F_OPTIONAL), ('statsType', 'vim.cluster.VsanPerfStatsType', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metricsCollectInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VSANWitnessHostInfo', 'VimClusterVSANWitnessHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdUuid', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('unicastAgentAddr', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('host', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanHealthExtMgmtPreCheckResult', 'VsanHealthExtMgmtPreCheckResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallResult', 'boolean', 'vim.version.version9', 0), ('esxVersionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('drsCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('eamConnectionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installStateCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('results', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0), ('vumRegistered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.upgradesystem.HostWithHybridDiskgroupIssue', 'VsanHostWithHybridDiskgroupIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanPerfMetricSeriesCSV', 'VsanPerfMetricSeriesCSV', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('values', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfQuerySpec', 'VsanPerfQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('startTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('endTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('labels', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('interval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanRepairObjectsResult', 'VsanRepairObjectsResult', 'vmodl.DynamicData', 'vim.version.version9', [('inQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('notInQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterNetworkPartitionInfo', 'VsanClusterNetworkPartitionInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.upgradesystem.MixedEsxVersionIssue', 'VsanMixedEsxVersionIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', []) -CreateDataType('vim.cluster.VsanClusterClomdLivenessResult', 'VsanClusterClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('clomdLivenessResult', 'vim.cluster.VsanHostClomdLivenessResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanVsanClusterPcapResult', 'VsanVsanClusterPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanVsanClusterPcapGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanVsanPcapResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfMasterInformation', 'VsanPerfMasterInformation', 'vmodl.DynamicData', 'vim.version.version9', [('secSinceLastStatsWrite', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('secSinceLastStatsCollect', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('statsIntervalSec', 'long', 'vim.version.version9', 0), ('collectionFailureHostUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('renamedStatsDirectories', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('statsDirectoryPercentFree', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanHostCreateVmHealthTestResult', 'VsanHostCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0), ('fault', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanDiskFormatConversionCheckResult', 'VsanDiskFormatConversionCheckResult', 'vim.VsanUpgradeSystem.PreflightCheckResult', 'vim.version.version10', [('isSupported', 'boolean', 'vim.version.version10', 0), ('targetVersion', 'int', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'VsanClusterHealthSystemObjectsRepairResult', 'vmodl.DynamicData', 'vim.version.version9', [('inRepairingQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) -CreateDataType('vim.host.VsanHostHclInfo', 'VsanHostHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('hclChecked', 'boolean', 'vim.version.version9', 0), ('releaseName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('controllers', 'vim.host.VsanHclControllerInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VSANStretchedClusterCapability', 'VimClusterVSANStretchedClusterCapability', 'vmodl.DynamicData', 'vim.version.version10', [('hostMoId', 'string', 'vim.version.version10', 0), ('connStatus', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('isSupported', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL), ('hostCapability', 'vim.host.VSANStretchedClusterHostCapability', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanDiskMappingsConfigSpec', 'VimClusterVsanDiskMappingsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hostDiskMappings', 'vim.cluster.VsanHostDiskMapping[]', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanHostVmdkLoadTestResult', 'VsanHostVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('issueFound', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkResults', 'vim.host.VsanVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.ReconfigSpec', 'VimVsanReconfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('vsanClusterConfig', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL), ('diskMappingSpec', 'vim.cluster.VsanDiskMappingsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('faultDomainsSpec', 'vim.cluster.VsanFaultDomainsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('modify', 'boolean', 'vim.version.version10', 0), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanNetworkPeerHealthResult', 'VsanNetworkPeerHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('peer', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('smallPingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('onSameIpSubnet', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('sourceVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanWitnessSpec', 'VimClusterVsanWitnessSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.host.DiskMappingCreationSpec', 'VimVsanHostDiskMappingCreationSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('creationType', 'vim.vsan.host.DiskMappingCreationType', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanLimitHealthResult', 'VsanLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0), ('maxComponents', 'int', 'vim.version.version9', 0), ('freeComponents', 'int', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('lowestFreeDiskSpacePct', 'int', 'vim.version.version9', 0), ('usedDiskSpaceB', 'long', 'vim.version.version9', 0), ('totalDiskSpaceB', 'long', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('reservedRcSizeB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VSANPreferredFaultDomainInfo', 'VimClusterVSANPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('preferredFaultDomainId', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanObjectOverallHealth', 'VsanObjectOverallHealth', 'vmodl.DynamicData', 'vim.version.version9', [('objectHealthDetail', 'vim.host.VsanObjectHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('objectVersionCompliance', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanVsanClusterPcapGroup', 'VsanVsanClusterPcapGroup', 'vmodl.DynamicData', 'vim.version.version9', [('master', 'string', 'vim.version.version9', 0), ('members', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthResultColumnInfo', 'VsanClusterHealthResultColumnInfo', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterNetworkHealthResult', 'VsanClusterNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.host.VsanNetworkHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingMulticastConfig', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingIpSubnets', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('pingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('potentialMulticastIssue', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('otherHostsInVsanCluster', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('partitions', 'vim.cluster.VsanClusterNetworkPartitionInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsWithVsanDisabled', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsDisconnected', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInEsxMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInVsanMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('infoAboutUnexpectedHosts', 'vim.host.VsanQueryResultHostInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfNodeInformation', 'VsanPerfNodeInformation', 'vmodl.DynamicData', 'vim.version.version9', [('version', 'string', 'vim.version.version9', 0), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('isCmmdsMaster', 'boolean', 'vim.version.version9', 0), ('isStatsMaster', 'boolean', 'vim.version.version9', 0), ('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanNodeUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('masterInfo', 'vim.cluster.VsanPerfMasterInformation', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfEntityMetricCSV', 'VsanPerfEntityMetricCSV', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('sampleInfo', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'vim.cluster.VsanPerfMetricSeriesCSV[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.upgradesystem.DiskUnhealthIssue', 'VsanDiskUnhealthIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanFaultDomainSpec', 'VimClusterVsanFaultDomainSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('name', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.vsan.upgradesystem.ObjectInaccessibleIssue', 'VsanObjectInaccessibleIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanDiskFormatConversionSpec', 'VsanDiskFormatConversionSpec', 'vmodl.DynamicData', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthAction', 'VsanClusterHealthAction', 'vmodl.DynamicData', 'vim.version.version9', [('actionId', 'vim.cluster.VsanClusterHealthActionIdEnum', 'vim.version.version9', 0), ('actionLabel', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('actionDescription', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('enabled', 'boolean', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterHealthSystemVersionResult', 'VsanClusterHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.cluster.VsanHostHealthSystemVersionResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('vcVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterHealthResultRow', 'VsanClusterHealthResultRow', 'vmodl.DynamicData', 'vim.version.version9', [('values', 'string[]', 'vim.version.version9', 0), ('nestedRows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthSystemStatusResult', 'VsanClusterHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('status', 'string', 'vim.version.version9', 0), ('goalState', 'string', 'vim.version.version9', 0), ('untrackedHosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('trackedHostsStatus', 'vim.host.VsanHostHealthSystemStatusResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanHostDiskMapping', 'VimClusterVsanHostDiskMapping', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('type', 'vim.cluster.VsanDiskGroupCreationType', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VSANStretchedClusterFaultDomainConfig', 'VimClusterVSANStretchedClusterFaultDomainConfig', 'vmodl.DynamicData', 'vim.version.version10', [('firstFdName', 'string', 'vim.version.version10', 0), ('firstFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('secondFdName', 'string', 'vim.version.version10', 0), ('secondFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VSANStretchedClusterHostInfo', 'VimHostVSANStretchedClusterHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeInfo', 'vim.host.VSANCmmdsNodeInfo', 'vim.version.version10', 0), ('faultDomainInfo', 'vim.host.VSANCmmdsFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.upgradesystem.HigherObjectsPresentDuringDowngradeIssue', 'VsanHigherObjectsPresentDuringDowngradeIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VSANCmmdsFaultDomainInfo', 'VimHostVSANCmmdsFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomainId', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.fault.VsanNodeNotMaster', 'VsanNodeNotMaster', 'vim.fault.VimFault', 'vim.version.version9', [('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cmmdsMasterButNotStatsMaster', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanHostHealthSystemVersionResult', 'VsanHostHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('version', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthConfigs', 'VsanClusterHealthConfigs', 'vmodl.DynamicData', 'vim.version.version9', [('enableVsanTelemetry', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryProxy', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0 | F_OPTIONAL), ('configs', 'vim.cluster.VsanClusterHealthResultKeyValuePair[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterWhatifHostFailuresResult', 'VsanClusterWhatifHostFailuresResult', 'vmodl.DynamicData', 'vim.version.version9', [('numFailures', 'long', 'vim.version.version9', 0), ('totalUsedCapacityB', 'long', 'vim.version.version9', 0), ('totalCapacityB', 'long', 'vim.version.version9', 0), ('totalRcReservationB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('usedComponents', 'long', 'vim.version.version9', 0), ('totalComponents', 'long', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanObjectIdentityAndHealth', 'VsanObjectIdentityAndHealth', 'vmodl.DynamicData', 'vim.version.version9', [('identities', 'vim.cluster.VsanObjectIdentity[]', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceSummary', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rawData', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanHclControllerInfo', 'VsanHclControllerInfo', 'vmodl.DynamicData', 'vim.version.version9', [('deviceName', 'string', 'vim.version.version9', 0), ('deviceDisplayName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subVendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subDeviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('extraInfo', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceOnHcl', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releaseSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releasesOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionsOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderInfo', 'vim.host.VsanHostCimProviderInfo', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthResultKeyValuePair', 'VsanClusterHealthResultKeyValuePair', 'vmodl.DynamicData', 'vim.version.version9', [('key', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.StorageOperationalStatus', 'VsanStorageOperationalStatus', 'vmodl.DynamicData', 'vim.version.version9', [('healthy', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('operationETA', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('operationProgress', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('transitional', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanSpaceUsage', 'VsanSpaceUsage', 'vmodl.DynamicData', 'vim.version.version9', [('totalCapacityB', 'long', 'vim.version.version9', 0), ('freeCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceOverview', 'vim.cluster.VsanObjectSpaceSummary', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceDetail', 'vim.cluster.VsanSpaceUsageDetailResult', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthResultTable', 'VsanClusterHealthResultTable', 'vim.cluster.VsanClusterHealthResultBase', 'vim.version.version9', [('columns', 'vim.cluster.VsanClusterHealthResultColumnInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterConfig', 'VsanClusterConfig', 'vmodl.DynamicData', 'vim.version.version9', [('config', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.host.VsanHostCapability', 'VimVsanHostVsanHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('isSupported', 'boolean', 'vim.version.version10', 0), ('isLicensed', 'boolean', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanPerfThreshold', 'VsanPerfThreshold', 'vmodl.DynamicData', 'vim.version.version9', [('direction', 'vim.cluster.VsanPerfThresholdDirectionType', 'vim.version.version9', 0), ('yellow', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('red', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanNetworkHealthResult', 'VsanNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'vim.HostSystem', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('ipSubnets', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHealth', 'vim.host.VsanNetworkPeerHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('multicastConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.ConfigInfoEx', 'VsanConfigInfoEx', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanVmdkLoadTestResult', 'VsanVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('success', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spec', 'vim.host.VsanVmdkLoadTestSpec', 'vim.version.version9', 0), ('actualDurationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('iops', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('tputBps', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('avgLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('numIoAboveLatencyThreshold', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterVMsHealthOverallResult', 'VsanClusterVMsHealthOverAllResult', 'vmodl.DynamicData', 'vim.version.version9', [('healthStateList', 'vim.cluster.VsanClusterVMsHealthSummaryResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealthState', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanHostHealthSystemStatusResult', 'VsanHostHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterAdvCfgSyncResult', 'VsanClusterAdvCfgSyncResult', 'vmodl.DynamicData', 'vim.version.version9', [('inSync', 'boolean', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hostValues', 'vim.cluster.VsanClusterAdvCfgSyncHostResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanQueryResultHostInfo', 'VsanQueryResultHostInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostnameInCmmds', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanIpv4Addresses', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.host.DiskMapInfoEx', 'VimVsanHostDiskMapInfoEx', 'vmodl.DynamicData', 'vim.version.version10', [('mapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0), ('isMounted', 'boolean', 'vim.version.version10', 0), ('isAllFlash', 'boolean', 'vim.version.version10', 0), ('isDataEfficiency', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanVmdkLoadTestSpec', 'VsanVmdkLoadTestSpec', 'vmodl.DynamicData', 'vim.version.version9', [('vmdkCreateSpec', 'vim.VirtualDiskManager.FileBackedVirtualDiskSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpec', 'vim.host.VsanVmdkIOLoadSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpecSequence', 'vim.host.VsanVmdkIOLoadSpec[]', 'vim.version.version9', 0 | F_OPTIONAL), ('stepDurationSec', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHealthSummary', 'VsanClusterHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('clusterStatus', 'vim.cluster.VsanClusterHealthSystemStatusResult', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterVersions', 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.version.version9', 0 | F_OPTIONAL), ('objectHealth', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('vmHealth', 'vim.cluster.VsanClusterVMsHealthOverallResult', 'vim.version.version9', 0 | F_OPTIONAL), ('networkHealth', 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('limitHealth', 'vim.cluster.VsanClusterLimitHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('advCfgSync', 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('createVmHealth', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalDisksHealth', 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hclInfo', 'vim.cluster.VsanClusterHclInfo', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanClusterHealthGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealth', 'string', 'vim.version.version9', 0), ('overallHealthDescription', 'string', 'vim.version.version9', 0), ('clomdLiveness', 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.version.version9', 0 | F_OPTIONAL), ('diskBalance', 'vim.cluster.VsanClusterBalanceSummary', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanPerfEntityType', 'VsanPerfEntityType', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('id', 'string', 'vim.version.version9', 0), ('graphs', 'vim.cluster.VsanPerfGraph[]', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanNetworkLoadTestResult', 'VsanNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('client', 'boolean', 'vim.version.version9', 0), ('bandwidthBps', 'long', 'vim.version.version9', 0), ('totalBytes', 'long', 'vim.version.version9', 0), ('lostDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('lossPct', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('sentDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('jitterMs', 'float', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanPhysicalDiskHealthSummary', 'VsanPhysicalDiskHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('overallHealth', 'string', 'vim.version.version9', 0), ('heapsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('slabsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('disks', 'vim.host.VsanPhysicalDiskHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('componentsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostDedupScope', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.host.VsanDiskManagementSystemCapability', 'VimVsanHostVsanDiskManagementSystemCapability', 'vmodl.DynamicData', 'vim.version.version10', [('version', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanHostCimProviderInfo', 'VsanHostCimProviderInfo', 'vmodl.DynamicData', 'vim.version.version9', [('cimProviderSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installedCIMProvider', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanObjectInformation', 'VsanObjectInformation', 'vmodl.DynamicData', 'vim.version.version9', [('directoryName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('policyAttributes', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmComplianceResult', 'vim.cluster.StorageComplianceResult', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanObjectIdentity', 'VsanObjectIdentity', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0), ('vmInstanceUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmNsObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vm', 'vim.VirtualMachine', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanResourceHealth', 'VsanResourceHealth', 'vmodl.DynamicData', 'vim.version.version9', [('resource', 'string', 'vim.version.version9', 0), ('health', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanCapability', 'VsanCapability', 'vmodl.DynamicData', 'vim.version.version10', [('target', 'vmodl.ManagedObject', 'vim.version.version10', 0 | F_OPTIONAL), ('capabilities', 'string[]', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanHostClomdLivenessResult', 'VsanHostClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('clomdStat', 'string', 'vim.version.version9', 0), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanObjectQuerySpec', 'VsanObjectQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterLimitHealthResult', 'VsanClusterLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('issueFound', 'boolean', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanLimitHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('whatifHostFailures', 'vim.cluster.VsanClusterWhatifHostFailuresResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanStorageWorkloadType', 'VsanStorageWorkloadType', 'vmodl.DynamicData', 'vim.version.version9', [('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0), ('typeId', 'string', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterAdvCfgSyncHostResult', 'VsanClusterAdvCfgSyncHostResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.vsan.upgradesystem.ObjectPolicyIssue', 'VsanObjectPolicyIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanPerfTopEntities', 'VsanPerfTopEntities', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('entities', 'vim.cluster.VsanPerfTopEntity[]', 'vim.version.version9', 0)]) -CreateDataType('vim.host.VsanProactiveRebalanceInfoEx', 'VsanProactiveRebalanceInfoEx', 'vmodl.DynamicData', 'vim.version.version9', [('running', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('startTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('stopTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterProactiveTestResult', 'VsanClusterProactiveTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallStatus', 'string', 'vim.version.version9', 0), ('overallStatusDescription', 'string', 'vim.version.version9', 0), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0), ('healthTest', 'vim.cluster.VsanClusterHealthTest', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VSANCmmdsPreferredFaultDomainInfo', 'VimHostVSANCmmdsPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainId', 'string', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.cluster.VsanFaultDomainsConfigSpec', 'VimClusterVsanFaultDomainsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomains', 'vim.cluster.VsanFaultDomainSpec[]', 'vim.version.version10', 0), ('witness', 'vim.cluster.VsanWitnessSpec', 'vim.version.version10', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterHostVmknicMapping', 'VsanClusterHostVmknicMapping', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0)]) -CreateDataType('vim.cluster.VsanClusterVmdkLoadTestResult', 'VsanClusterVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterVMsHealthSummaryResult', 'VsanClusterVMsHealthSummaryResult', 'vmodl.DynamicData', 'vim.version.version9', [('numVMs', 'int', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'string', 'vim.version.version9', 0), ('vmInstanceUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VSANStretchedClusterHostCapability', 'VimHostVSANStretchedClusterHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('featureVersion', 'string', 'vim.version.version10', 0)]) -CreateDataType('vim.host.VsanFailedRepairObjectResult', 'VsanFailedRepairObjectResult', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('errMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterCreateVmHealthTestResult', 'VsanClusterCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanObjectHealth', 'VsanObjectHealth', 'vmodl.DynamicData', 'vim.version.version9', [('numObjects', 'int', 'vim.version.version9', 0), ('health', 'vim.host.VsanObjectHealthState', 'vim.version.version9', 0), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterBalanceSummary', 'VsanClusterBalanceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('varianceThreshold', 'long', 'vim.version.version9', 0), ('disks', 'vim.cluster.VsanClusterBalancePerDiskInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterTelemetryProxyConfig', 'VsanClusterTelemetryProxyConfig', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('port', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('user', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('password', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('autoDiscovered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanVmdkIOLoadSpec', 'VsanVmdkIOLoadSpec', 'vmodl.DynamicData', 'vim.version.version9', [('readPct', 'int', 'vim.version.version9', 0), ('oio', 'int', 'vim.version.version9', 0), ('iosizeB', 'int', 'vim.version.version9', 0), ('dataSizeMb', 'long', 'vim.version.version9', 0), ('random', 'boolean', 'vim.version.version9', 0), ('startOffsetB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.host.VsanVsanPcapResult', 'VsanVsanPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('calltime', 'float', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0), ('tcpdumpFilter', 'string', 'vim.version.version9', 0), ('snaplen', 'int', 'vim.version.version9', 0), ('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('pcap', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.cluster.VsanClusterNetworkLoadTestResult', 'VsanClusterNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanNetworkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) -CreateDataType('vim.vsan.upgradesystem.HostPropertyRetrieveIssue', 'VsanHostPropertyRetrieveIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) -CreateEnumType('vim.host.VsanObjectHealthState', 'VsanObjectHealthState', 'vim.version.version9', ['inaccessible' ,'reducedavailabilitywithnorebuild' ,'reducedavailabilitywithnorebuilddelaytimer' ,'reducedavailabilitywithactiverebuild' ,'datamove' ,'nonavailabilityrelatedreconfig' ,'nonavailabilityrelatedincompliance' ,'healthy' ,]) -CreateEnumType('vim.cluster.VsanObjectTypeEnum', 'VsanObjectTypeEnum', 'vim.version.version9', ['vmswap' ,'vdisk' ,'namespace' ,'vmem' ,'statsdb' ,'iscsi' ,'other' ,'fileSystemOverhead' ,'dedupOverhead' ,'checksumOverhead' ,]) -CreateEnumType('vim.cluster.VsanCapabilityType', 'VsanCapabilityType', 'vim.version.version10', ['capability' ,'allflash' ,'stretchedcluster' ,'dataefficiency' ,'clusterconfig' ,'upgrade' ,'objectidentities' ,]) -CreateEnumType('vim.cluster.VsanHealthLogLevelEnum', 'VsanHealthLogLevelEnum', 'vim.version.version9', ['INFO' ,'WARNING' ,'ERROR' ,'DEBUG' ,'CRITICAL' ,]) -CreateEnumType('vim.cluster.VsanPerfSummaryType', 'VsanPerfSummaryType', 'vim.version.version9', ['average' ,'maximum' ,'minimum' ,'latest' ,'summation' ,'none' ,]) -CreateEnumType('vim.cluster.StorageComplianceStatus', 'VsanStorageComplianceStatus', 'vim.version.version9', ['compliant' ,'nonCompliant' ,'unknown' ,'notApplicable' ,]) -CreateEnumType('vim.cluster.VsanPerfStatsUnitType', 'VsanPerfStatsUnitType', 'vim.version.version9', ['number' ,'time_ms' ,'percentage' ,'size_bytes' ,'rate_bytes' ,]) -CreateEnumType('vim.cluster.VsanPerfThresholdDirectionType', 'VsanPerfThresholdDirectionType', 'vim.version.version9', ['upper' ,'lower' ,]) -CreateEnumType('vim.cluster.VsanPerfStatsType', 'VsanPerfStatsType', 'vim.version.version9', ['absolute' ,'delta' ,'rate' ,]) -CreateEnumType('vim.vsan.host.DiskMappingCreationType', 'VimVsanHostDiskMappingCreationType', 'vim.version.version10', ['hybrid' ,'allFlash' ,]) -CreateEnumType('vim.cluster.VsanClusterHealthActionIdEnum', 'VsanClusterHealthActionIdEnum', 'vim.version.version9', ['RepairClusterObjectsAction' ,'UploadHclDb' ,'UpdateHclDbFromInternet' ,'EnableHealthService' ,'DiskBalance' ,'StopDiskBalance' ,'RemediateDedup' ,'UpgradeVsanDiskFormat' ,]) -CreateEnumType('vim.cluster.VsanDiskGroupCreationType', 'VimClusterVsanDiskGroupCreationType', 'vim.version.version10', ['allflash' ,'hybrid' ,]) \ No newline at end of file From 989667a591dda9785cfc6b620effa43b0773734c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 16:28:56 -0400 Subject: [PATCH 531/639] Removed pylint checks from vsan ext library --- salt/ext/vsan/vsanapiutils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/ext/vsan/vsanapiutils.py b/salt/ext/vsan/vsanapiutils.py index 2e4c79a498..6b9b1b826c 100644 --- a/salt/ext/vsan/vsanapiutils.py +++ b/salt/ext/vsan/vsanapiutils.py @@ -7,6 +7,7 @@ Copyright 2016 VMware, Inc. All rights reserved. This module defines basic helper functions used in the sampe codes """ +# pylint: skip-file __author__ = 'VMware, Inc' from pyVmomi import vim, vmodl, SoapStubAdapter From c70df4adcd4974362163102f1c675defb03b3a36 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Sep 2017 16:18:45 -0700 Subject: [PATCH 532/639] Following the change in #42103 if Salt is installed via setup.py then the generated _syspaths.py does not contain the HOME_DIR which results in a failure. --- setup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.py b/setup.py index effdc2f230..53892807c3 100755 --- a/setup.py +++ b/setup.py @@ -234,6 +234,7 @@ class GenerateSaltSyspaths(Command): spm_formula_path=self.distribution.salt_spm_formula_dir, spm_pillar_path=self.distribution.salt_spm_pillar_dir, spm_reactor_path=self.distribution.salt_spm_reactor_dir, + home_dir=self.distribution.salt_home_dir, ) ) @@ -724,6 +725,7 @@ PIDFILE_DIR = {pidfile_dir!r} SPM_FORMULA_PATH = {spm_formula_path!r} SPM_PILLAR_PATH = {spm_pillar_path!r} SPM_REACTOR_PATH = {spm_reactor_path!r} +HOME_DIR = {home_dir!r} ''' @@ -892,6 +894,7 @@ class SaltDistribution(distutils.dist.Distribution): self.salt_spm_formula_dir = None self.salt_spm_pillar_dir = None self.salt_spm_reactor_dir = None + self.salt_home_dir = None self.name = 'salt-ssh' if PACKAGED_FOR_SALT_SSH else 'salt' self.salt_version = __version__ # pylint: disable=undefined-variable From fadcc61618fa1b579ac76bd8f41cb34c442f2ac7 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Sep 2017 18:34:32 -0700 Subject: [PATCH 533/639] Adding home_dir to SaltDistribution.global_options. --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 53892807c3..bcdd058d69 100755 --- a/setup.py +++ b/setup.py @@ -868,8 +868,8 @@ class SaltDistribution(distutils.dist.Distribution): 'Salt\'s pre-configured SPM formulas directory'), ('salt-spm-pillar-dir=', None, 'Salt\'s pre-configured SPM pillar directory'), - ('salt-spm-reactor-dir=', None, - 'Salt\'s pre-configured SPM reactor directory'), + ('salt-home-dir=', None, + 'Salt\'s pre-configured user home directory'), ] def __init__(self, attrs=None): From 2033f3d6d39188a50bb8006e145397a0e0fdd262 Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Fri, 15 Sep 2017 14:14:46 +0200 Subject: [PATCH 534/639] Update zabbix_return.py Using salt.utils.fopen... correctly --- salt/returners/zabbix_return.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index a78f784c56..be4f71cf48 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -56,7 +56,7 @@ def zbx(): def zabbix_send(key, host, output): - f = open(zbx()['zabbix_config'],'r') + with salt.utils.fopen(zbx()['zabbix_config'],'r') as file_handle: for line in f: if "ServerActive" in line: flag = "true" From 2cdd775ca8bcc00d7b8e1d9bcaac104e1457c131 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 15 Sep 2017 08:40:38 -0700 Subject: [PATCH 535/639] Adding back in the salt-spm-reactor-dir --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index bcdd058d69..aacebb95a1 100755 --- a/setup.py +++ b/setup.py @@ -868,6 +868,8 @@ class SaltDistribution(distutils.dist.Distribution): 'Salt\'s pre-configured SPM formulas directory'), ('salt-spm-pillar-dir=', None, 'Salt\'s pre-configured SPM pillar directory'), + ('salt-spm-reactor-dir=', None, + 'Salt\'s pre-configured SPM reactor directory'), ('salt-home-dir=', None, 'Salt\'s pre-configured user home directory'), ] From b4966ac56574cb0c5f194c2b651bcfe490f44b1b Mon Sep 17 00:00:00 2001 From: Orlando Richards Date: Fri, 15 Sep 2017 17:40:36 +0100 Subject: [PATCH 536/639] Update yumpkg.py Fix issue #41978 - I've been using this amended regular expression for several months now, with around 800-900 versionlocks (all packages on a typical CentOS 7 instance), without issue. --- salt/modules/yumpkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py index e5ddc11e5a..849c5c4f29 100644 --- a/salt/modules/yumpkg.py +++ b/salt/modules/yumpkg.py @@ -61,7 +61,7 @@ from salt.ext import six log = logging.getLogger(__name__) -__HOLD_PATTERN = r'\w+(?:[.-][^-]+)*' +__HOLD_PATTERN = r'[\w+]+(?:[.-][^-]+)*' # Define the module's virtual name __virtualname__ = 'pkg' From b6e5fa170b75ee4e7fc72a34c00d781d1e51bb59 Mon Sep 17 00:00:00 2001 From: Kirill Bespalov Date: Fri, 15 Sep 2017 20:25:25 +0300 Subject: [PATCH 537/639] [network.interface_ip] Fix handling of interfaces without IP If an interface have no any assigned IP address, then the func fails with the following: root@cfg01:/home/ubuntu# salt-call network.interface_ip eth10 --local [ERROR ] An un-handled exception was caught by salt's global exception handler: KeyError: 0 Expected result: - the call must returns empty string if there is no IP of an interface. --- salt/utils/network.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/utils/network.py b/salt/utils/network.py index 77289d21b6..119aeed25a 100644 --- a/salt/utils/network.py +++ b/salt/utils/network.py @@ -1039,7 +1039,8 @@ def interface_ip(iface): iface_info, error = _get_iface_info(iface) if error is False: - return iface_info.get(iface, {}).get('inet', {})[0].get('address', '') + inet = iface_info.get(iface, {}).get('inet', None) + return inet[0].get('address', '') if inet else '' else: return error From 9e30c8b3623051dbf9c2d2186290235d5ea747b7 Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Fri, 15 Sep 2017 18:28:51 +0000 Subject: [PATCH 538/639] Remove unused get_role_arn function from boto3 util --- salt/utils/boto3.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/salt/utils/boto3.py b/salt/utils/boto3.py index 2d0fa12fc1..29d45e0e2d 100644 --- a/salt/utils/boto3.py +++ b/salt/utils/boto3.py @@ -329,16 +329,6 @@ def paged_call(function, *args, **kwargs): kwargs[marker_arg] = marker -def get_role_arn(name, region=None, key=None, keyid=None, profile=None): - if name.startswith('arn:aws:iam:'): - return name - - account_id = __salt__['boto_iam.get_account_id']( - region=region, key=key, keyid=keyid, profile=profile - ) - return 'arn:aws:iam::{0}:role/{1}'.format(account_id, name) - - def ordered(obj): if isinstance(obj, (list, tuple)): return sorted(ordered(x) for x in obj) From 36294f5d14956631c19cac9a074d3b63877a3fde Mon Sep 17 00:00:00 2001 From: Aneesh Agrawal Date: Fri, 15 Sep 2017 18:29:20 +0000 Subject: [PATCH 539/639] Remove bad pack argument from boto_sqs This is only available for the boto utils module, not in boto3. The boto utils module needs this as it uses `__salt__` internally, but boto3 does not and thus does not need a `pack` argument. --- salt/modules/boto_sqs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/boto_sqs.py b/salt/modules/boto_sqs.py index eb44f4a949..034ba11802 100644 --- a/salt/modules/boto_sqs.py +++ b/salt/modules/boto_sqs.py @@ -78,7 +78,7 @@ def __virtual__(): ''' if not HAS_BOTO3: return (False, 'The boto_sqs module could not be loaded: boto3 libraries not found') - __utils__['boto3.assign_funcs'](__name__, 'sqs', pack=__salt__) + __utils__['boto3.assign_funcs'](__name__, 'sqs') return True From cd2ff46284a8144755b880c035d0a89938474955 Mon Sep 17 00:00:00 2001 From: Morgan Willcock Date: Fri, 15 Sep 2017 21:08:54 +0100 Subject: [PATCH 540/639] Return COMSPEC as the shell for Windows --- salt/grains/extra.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/salt/grains/extra.py b/salt/grains/extra.py index 8db1b016b8..e89386ba21 100644 --- a/salt/grains/extra.py +++ b/salt/grains/extra.py @@ -11,6 +11,7 @@ import logging # Import salt libs import salt.utils.files +import salt.utils.platform log = logging.getLogger(__name__) @@ -21,7 +22,14 @@ def shell(): ''' # Provides: # shell - return {'shell': os.environ.get('SHELL', '/bin/sh')} + if salt.utils.platform.is_windows(): + env_var = 'COMSPEC' + default = r'C:\Windows\system32\cmd.exe' + else: + env_var = 'SHELL' + default = '/bin/sh' + + return {'shell': os.environ.get(env_var, default)} def config(): From 9afc84091dd5e060aa8b5872286298272890d48e Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 14 Sep 2017 11:35:23 -0400 Subject: [PATCH 541/639] Update which and is_windows calls to use new paths --- salt/grains/core.py | 2 +- salt/spm/__init__.py | 10 +++++----- tests/unit/fileserver/test_gitfs.py | 8 ++++---- tests/unit/modules/test_gem.py | 2 +- tests/unit/modules/test_groupadd.py | 4 ++-- tests/unit/modules/test_parted.py | 27 ++++++++++++++++++--------- tests/unit/modules/test_pw_group.py | 6 +++--- 7 files changed, 34 insertions(+), 25 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index f4c50dd42a..bc3eb5ff0f 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2446,7 +2446,7 @@ def default_gateway(): ip_gw: True # True if either of the above is True, False otherwise ''' grains = {} - if not salt.utils.which('ip'): + if not salt.utils.path.which('ip'): return {} grains['ip_gw'] = False grains['ip4_gw'] = False diff --git a/salt/spm/__init__.py b/salt/spm/__init__.py index 6696aae286..10da68a9b4 100644 --- a/salt/spm/__init__.py +++ b/salt/spm/__init__.py @@ -25,14 +25,16 @@ import salt.client import salt.config import salt.loader import salt.cache -import salt.utils.files -import salt.utils.http as http import salt.syspaths as syspaths from salt.ext import six from salt.ext.six import string_types from salt.ext.six.moves import input from salt.ext.six.moves import filter from salt.template import compile_template +import salt.utils.files +import salt.utils.http as http +import salt.utils.platform +import salt.utils.win_functions from salt.utils.yamldumper import SafeOrderedDumper # Get logging started @@ -493,9 +495,7 @@ class SPMClient(object): # No defaults for this in config.py; default to the current running # user and group - import salt.utils - if salt.utils.is_windows(): - import salt.utils.win_functions + if salt.utils.platform.is_windows(): uname = gname = salt.utils.win_functions.get_current_user() uname_sid = salt.utils.win_functions.get_sid_from_name(uname) uid = self.opts.get('spm_uid', uname_sid) diff --git a/tests/unit/fileserver/test_gitfs.py b/tests/unit/fileserver/test_gitfs.py index 53fbab6766..64d8ca5284 100644 --- a/tests/unit/fileserver/test_gitfs.py +++ b/tests/unit/fileserver/test_gitfs.py @@ -33,8 +33,10 @@ from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch from tests.support.paths import TMP, FILES # Import salt libs -import salt.utils.gitfs import salt.fileserver.gitfs as gitfs +import salt.utils.gitfs +import salt.utils.platform +import salt.utils.win_functions log = logging.getLogger(__name__) @@ -227,9 +229,7 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): if 'USERNAME' not in os.environ: try: - import salt.utils - if salt.utils.is_windows(): - import salt.utils.win_functions + if salt.utils.platform.is_windows(): os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() else: os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name diff --git a/tests/unit/modules/test_gem.py b/tests/unit/modules/test_gem.py index b1ff893f7e..23221ba646 100644 --- a/tests/unit/modules/test_gem.py +++ b/tests/unit/modules/test_gem.py @@ -66,7 +66,7 @@ class TestGemModule(TestCase, LoaderModuleMockMixin): {'rvm.is_installed': MagicMock(return_value=False), 'rbenv.is_installed': MagicMock(return_value=True), 'rbenv.do': mock}),\ - patch('salt.utils.is_windows', return_value=False): + patch('salt.utils.platform.is_windows', return_value=False): gem._gem(['install', 'rails']) mock.assert_called_once_with( ['gem', 'install', 'rails'], diff --git a/tests/unit/modules/test_groupadd.py b/tests/unit/modules/test_groupadd.py index 9345cd043e..a0646556ea 100644 --- a/tests/unit/modules/test_groupadd.py +++ b/tests/unit/modules/test_groupadd.py @@ -16,12 +16,12 @@ from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON # Import Salt Libs -import salt.utils import salt.modules.groupadd as groupadd +import salt.utils.platform @skipIf(NO_MOCK, NO_MOCK_REASON) -@skipIf(salt.utils.is_windows(), "Module not available on Windows") +@skipIf(salt.utils.platform.is_windows(), "Module not available on Windows") class GroupAddTestCase(TestCase, LoaderModuleMockMixin): ''' TestCase for salt.modules.groupadd diff --git a/tests/unit/modules/test_parted.py b/tests/unit/modules/test_parted.py index a7a920b08a..6accff745a 100644 --- a/tests/unit/modules/test_parted.py +++ b/tests/unit/modules/test_parted.py @@ -41,39 +41,48 @@ class PartedTestCase(TestCase, LoaderModuleMockMixin): # Test __virtual__ function for module registration def test_virtual_bails_on_windows(self): - '''If running windows, __virtual__ shouldn't register module''' + ''' + If running windows, __virtual__ shouldn't register module + ''' with patch('salt.utils.platform.is_windows', lambda: True): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load Windows systems are not supported.') self.assertEqual(err, ret) def test_virtual_bails_without_parted(self): - '''If parted not in PATH, __virtual__ shouldn't register module''' + ''' + If parted not in PATH, __virtual__ shouldn't register module + ''' with patch('salt.utils.path.which', lambda exe: not exe == "parted"),\ - patch('salt.utils.is_windows', return_value=False): + patch('salt.utils.platform.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load parted binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_lsblk(self): - '''If lsblk not in PATH, __virtual__ shouldn't register module''' + ''' + If lsblk not in PATH, __virtual__ shouldn't register module + ''' with patch('salt.utils.path.which', lambda exe: not exe == "lsblk"),\ - patch('salt.utils.is_windows', return_value=False): + patch('salt.utils.platform.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load lsblk binary is not in the path.') self.assertEqual(err, ret) def test_virtual_bails_without_partprobe(self): - '''If partprobe not in PATH, __virtual__ shouldn't register module''' + ''' + If partprobe not in PATH, __virtual__ shouldn't register module + ''' with patch('salt.utils.path.which', lambda exe: not exe == "partprobe"),\ - patch('salt.utils.is_windows', return_value=False): + patch('salt.utils.platform.is_windows', return_value=False): ret = parted.__virtual__() err = (False, 'The parted execution module failed to load partprobe binary is not in the path.') self.assertEqual(err, ret) def test_virtual(self): - '''On expected platform with correct utils in PATH, register - "partition" module''' + ''' + On expected platform with correct utils in PATH, register "partition" module + ''' with patch('salt.utils.platform.is_windows', lambda: False), \ patch('salt.utils.path.which', lambda exe: exe in ('parted', 'lsblk', 'partprobe')): ret = parted.__virtual__() diff --git a/tests/unit/modules/test_pw_group.py b/tests/unit/modules/test_pw_group.py index 07854e0aea..2cfc5f32d2 100644 --- a/tests/unit/modules/test_pw_group.py +++ b/tests/unit/modules/test_pw_group.py @@ -18,7 +18,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.pw_group as pw_group -import salt.utils +import salt.utils.platform @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -45,7 +45,7 @@ class PwGroupTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(pw_group.__salt__, {'cmd.run_all': mock}): self.assertTrue(pw_group.delete('a')) - @skipIf(salt.utils.is_windows(), 'grp not available on Windows') + @skipIf(salt.utils.platform.is_windows(), 'grp not available on Windows') def test_info(self): ''' Tests to return information about a group @@ -59,7 +59,7 @@ class PwGroupTestCase(TestCase, LoaderModuleMockMixin): with patch.dict(pw_group.grinfo, mock): self.assertDictEqual(pw_group.info('name'), {}) - @skipIf(salt.utils.is_windows(), 'grp not available on Windows') + @skipIf(salt.utils.platform.is_windows(), 'grp not available on Windows') def test_getent(self): ''' Tests for return info on all groups From d4236aeeb73224deb9d5758ea9aae21d07f16ff5 Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Mon, 18 Sep 2017 08:43:54 +0200 Subject: [PATCH 542/639] Update zabbix_return.py forgot to change variable f for file_handle --- salt/returners/zabbix_return.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index be4f71cf48..bdf94d9749 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -57,7 +57,7 @@ def zbx(): def zabbix_send(key, host, output): with salt.utils.fopen(zbx()['zabbix_config'],'r') as file_handle: - for line in f: + for line in file_handle: if "ServerActive" in line: flag = "true" server = line.rsplit('=') From 74d1a56524c01c59ced12a5fe8c70c50b8cb23de Mon Sep 17 00:00:00 2001 From: Mircea Ulinic Date: Mon, 18 Sep 2017 10:28:42 +0000 Subject: [PATCH 543/639] Add validate function Also, correct some docstings and improve logged messages --- salt/beacons/napalm_beacon.py | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/salt/beacons/napalm_beacon.py b/salt/beacons/napalm_beacon.py index 827a80ba39..6650215471 100644 --- a/salt/beacons/napalm_beacon.py +++ b/salt/beacons/napalm_beacon.py @@ -196,14 +196,14 @@ __virtualname__ = 'napalm' def __virtual__(): ''' - This beacon can only work when running under a regular or a proxy minion. + This beacon can only work when running under a regular or a proxy minion, managed through napalm. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _compare(cur_cmp, cur_struct): ''' - Compares two obejcts and return a boolean value + Compares two objects and return a boolean value when there's a match. ''' if isinstance(cur_cmp, dict) and isinstance(cur_struct, dict): @@ -251,11 +251,11 @@ def _compare(cur_cmp, cur_struct): found |= _compare(cur_cmp, cur_struct_ele) return found elif isinstance(cur_cmp, bool) and isinstance(cur_struct, bool): - log.debug('Comparing booleans') + log.debug('Comparing booleans: %s ? %s', cur_cmp, cur_struct) return cur_cmp == cur_struct elif isinstance(cur_cmp, (six.string_types, six.text_type)) and \ isinstance(cur_struct, (six.string_types, six.text_type)): - log.debug('Comparing strings (and regex?)') + log.debug('Comparing strings (and regex?): %s ? %s', cur_cmp, cur_struct) # Trying literal match matched = re.match(cur_cmp, cur_struct, re.I) if matched: @@ -263,7 +263,7 @@ def _compare(cur_cmp, cur_struct): return False elif isinstance(cur_cmp, (six.integer_types, float)) and \ isinstance(cur_struct, (six.integer_types, float)): - log.debug('Comparing numeric values') + log.debug('Comparing numeric values: %d ? %d', cur_cmp, cur_struct) # numeric compare return cur_cmp == cur_struct elif isinstance(cur_struct, (six.integer_types, float)) and \ @@ -279,6 +279,23 @@ def _compare(cur_cmp, cur_struct): return False +def validate(config): + ''' + Validate the beacon configuration. + ''' + # Must be a list of dicts. + if not isinstance(config, list): + return False, 'Configuration for napalm beacon must be a list.' + for mod in config: + fun = mod.keys()[0] + fun_cfg = mod.values()[0] + if not isinstance(fun_cfg, dict): + return False, 'The match structure for the {} execution function output must be a dictionary'.format(fun) + if fun not in __salt__: + return False, 'Execution function {} is not availabe!'.format(fun) + return True, 'Valid configuration for the napal beacon!' + + def beacon(config): ''' Watch napalm function and fire events. From cdcbcdf989ce36dc6f240e6de53840ea5e28fb3a Mon Sep 17 00:00:00 2001 From: Nathan DELHAYE Date: Mon, 18 Sep 2017 16:10:59 +0200 Subject: [PATCH 544/639] Add an option to check & update elasticsearch template --- salt/states/elasticsearch.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/salt/states/elasticsearch.py b/salt/states/elasticsearch.py index 2c37a304ce..1965c40512 100644 --- a/salt/states/elasticsearch.py +++ b/salt/states/elasticsearch.py @@ -230,7 +230,7 @@ def index_template_absent(name): return ret -def index_template_present(name, definition): +def index_template_present(name, definition, check_definition=False): ''' Ensure that the named index templat eis present. @@ -238,6 +238,8 @@ def index_template_present(name, definition): Name of the index to add definition Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html + check_definition + If the template already exists and the definition is up to date **Example:** @@ -270,7 +272,27 @@ def index_template_present(name, definition): ret['result'] = False ret['comment'] = 'Cannot create index template {0}, {1}'.format(name, output) else: - ret['comment'] = 'Index template {0} is already present'.format(name) + if check_definition: + definition_parsed = json.loads(definition) + current_template = __salt__['elasticsearch.index_template_get'](name=name)[name] + diff = __utils__['dictdiffer.deep_diff'](current_template,definition_parsed) + if len(diff) != 0: + if __opts__['test']: + ret['comment'] = 'Index template {0} exist but need to be updated'.format(name) + ret['changes'] = diff + ret['result'] = None + else: + output = __salt__['elasticsearch.index_template_create'](name=name, body=definition) + if output: + ret['comment'] = 'Successfully updated index template {0}'.format(name) + ret['changes'] = diff + else: + ret['result'] = False + ret['comment'] = 'Cannot update index template {0}, {1}'.format(name, output) + else: + ret['comment'] = 'Index template {0} is already present and up to date'.format(name) + else: + ret['comment'] = 'Index template {0} is already present'.format(name) except Exception as e: ret['result'] = False ret['comment'] = str(e) From deff257fe744009f3a8e669dba37d5f0fde19b5e Mon Sep 17 00:00:00 2001 From: Nathan DELHAYE Date: Mon, 18 Sep 2017 17:08:05 +0200 Subject: [PATCH 545/639] Lint --- salt/states/elasticsearch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/elasticsearch.py b/salt/states/elasticsearch.py index 1965c40512..c5f297db59 100644 --- a/salt/states/elasticsearch.py +++ b/salt/states/elasticsearch.py @@ -275,7 +275,7 @@ def index_template_present(name, definition, check_definition=False): if check_definition: definition_parsed = json.loads(definition) current_template = __salt__['elasticsearch.index_template_get'](name=name)[name] - diff = __utils__['dictdiffer.deep_diff'](current_template,definition_parsed) + diff = __utils__['dictdiffer.deep_diff'](current_template, definition_parsed) if len(diff) != 0: if __opts__['test']: ret['comment'] = 'Index template {0} exist but need to be updated'.format(name) From ac7e81b6e3c81aef52c1097c631da5f3bd6d7288 Mon Sep 17 00:00:00 2001 From: spenceation Date: Mon, 18 Sep 2017 11:50:10 -0400 Subject: [PATCH 546/639] - Added support to parse XML ElementTrees with attributes. --- salt/utils/xmlutil.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py index 559d2eeae8..c89f45a34f 100644 --- a/salt/utils/xmlutil.py +++ b/salt/utils/xmlutil.py @@ -14,7 +14,7 @@ def to_dict(xmltree): ''' # If this object has no children, the for..loop below will return nothing # for it, so just return a single dict representing it. - if len(xmltree.getchildren()) < 1: + if len(xmltree.getchildren()) < 1 and len(xmltree.attrib.items()) < 1: name = xmltree.tag if '}' in name: comps = name.split('}') @@ -33,6 +33,8 @@ def to_dict(xmltree): if name not in xmldict: if len(item.getchildren()) > 0: xmldict[name] = to_dict(item) + elif len(item.attrib.items()) > 0: + xmldict[name] = to_dict(item) else: xmldict[name] = item.text else: @@ -42,4 +44,12 @@ def to_dict(xmltree): if not isinstance(xmldict[name], list): xmldict[name] = [xmldict[name]] xmldict[name].append(to_dict(item)) - return xmldict + + for attrName, attrValue in xmltree.attrib.items(): + if attrName not in xmldict: + xmldict[attrName] = attrValue + else: + # Attempt to ensure that items are not overwritten by attributes. + xmldict["attr{0}".format(attrName)] = attrValue + + return xmldict \ No newline at end of file From 35d18cd05a5d6e6d895d875ef3ea3d14183b90e0 Mon Sep 17 00:00:00 2001 From: Simon Dodsley Date: Sat, 16 Sep 2017 09:33:42 -0700 Subject: [PATCH 547/639] Fix documentation to state correct module name --- salt/modules/purefa.py | 50 +++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index 02f4ea049b..c604839752 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -203,8 +203,8 @@ def snap_create(name, suffix=None): .. code-block:: bash - salt '*' pure.snap_create foo - salt '*' pure.snap_create foo suffix=bar + salt '*' purefa.snap_create foo + salt '*' purefa.snap_create foo suffix=bar ''' array = _get_system() @@ -241,7 +241,7 @@ def snap_delete(name, suffix=None, eradicate=False): .. code-block:: bash - salt '*' pure.snap_delete foo suffix=snap eradicate=True + salt '*' purefa.snap_delete foo suffix=snap eradicate=True ''' array = _get_system() @@ -281,7 +281,7 @@ def snap_eradicate(name, suffix=None): .. code-block:: bash - salt '*' pure.snap_delete foo suffix=snap eradicate=True + salt '*' purefa.snap_delete foo suffix=snap eradicate=True ''' array = _get_system() @@ -315,8 +315,8 @@ def volume_create(name, size=None): .. code-block:: bash - salt '*' pure.volume_create foo - salt '*' pure.volume_create foo size=10T + salt '*' purefa.volume_create foo + salt '*' purefa.volume_create foo size=10T ''' if len(name) > 63: @@ -352,7 +352,7 @@ def volume_delete(name, eradicate=False): .. code-block:: bash - salt '*' pure.volume_delete foo eradicate=True + salt '*' purefa.volume_delete foo eradicate=True ''' array = _get_system() @@ -389,7 +389,7 @@ def volume_eradicate(name): .. code-block:: bash - salt '*' pure.volume_eradicate foo + salt '*' purefa.volume_eradicate foo ''' array = _get_system() @@ -422,7 +422,7 @@ def volume_extend(name, size): .. code-block:: bash - salt '*' pure.volume_extend foo 10T + salt '*' purefa.volume_extend foo 10T ''' array = _get_system() @@ -461,7 +461,7 @@ def snap_volume_create(name, target, overwrite=False): .. code-block:: bash - salt '*' pure.snap_volume_create foo.bar clone overwrite=True + salt '*' purefa.snap_volume_create foo.bar clone overwrite=True ''' array = _get_system() @@ -507,7 +507,7 @@ def volume_clone(name, target, overwrite=False): .. code-block:: bash - salt '*' pure.volume_clone foo bar overwrite=True + salt '*' purefa.volume_clone foo bar overwrite=True ''' array = _get_system() @@ -549,7 +549,7 @@ def volume_attach(name, host): .. code-block:: bash - salt '*' pure.volume_attach foo bar + salt '*' purefa.volume_attach foo bar ''' array = _get_system() @@ -582,7 +582,7 @@ def volume_detach(name, host): .. code-block:: bash - salt '*' pure.volume_detach foo bar + salt '*' purefa.volume_detach foo bar ''' array = _get_system() @@ -618,7 +618,7 @@ def host_create(name, iqn=None, wwn=None): .. code-block:: bash - salt '*' pure.host_create foo iqn='' wwn='' + salt '*' purefa.host_create foo iqn='' wwn='' ''' array = _get_system() @@ -669,7 +669,7 @@ def host_update(name, iqn=None, wwn=None): .. code-block:: bash - salt '*' pure.host_update foo iqn='' wwn='' + salt '*' purefa.host_update foo iqn='' wwn='' ''' array = _get_system() @@ -705,7 +705,7 @@ def host_delete(name): .. code-block:: bash - salt '*' pure.host_delete foo + salt '*' purefa.host_delete foo ''' array = _get_system() @@ -745,7 +745,7 @@ def hg_create(name, host=None, volume=None): .. code-block:: bash - salt '*' pure.hg_create foo host=bar volume=vol + salt '*' purefa.hg_create foo host=bar volume=vol ''' array = _get_system() @@ -801,7 +801,7 @@ def hg_update(name, host=None, volume=None): .. code-block:: bash - salt '*' pure.hg_update foo host=bar volume=vol + salt '*' purefa.hg_update foo host=bar volume=vol ''' array = _get_system() @@ -843,7 +843,7 @@ def hg_delete(name): .. code-block:: bash - salt '*' pure.hg_delete foo + salt '*' purefa.hg_delete foo ''' array = _get_system() @@ -885,7 +885,7 @@ def hg_remove(name, volume=None, host=None): .. code-block:: bash - salt '*' pure.hg_remove foo volume=test host=bar + salt '*' purefa.hg_remove foo volume=test host=bar ''' array = _get_system() @@ -948,7 +948,7 @@ def pg_create(name, hostgroup=None, host=None, volume=None, enabled=True): .. code-block:: bash - salt '*' pure.pg_create foo [hostgroup=foo | host=bar | volume=vol] enabled=[true | false] + salt '*' purefa.pg_create foo [hostgroup=foo | host=bar | volume=vol] enabled=[true | false] ''' array = _get_system() @@ -1041,7 +1041,7 @@ def pg_update(name, hostgroup=None, host=None, volume=None): .. code-block:: bash - salt '*' pure.pg_update foo [hostgroup=foo | host=bar | volume=vol] + salt '*' purefa.pg_update foo [hostgroup=foo | host=bar | volume=vol] ''' array = _get_system() @@ -1125,7 +1125,7 @@ def pg_delete(name, eradicate=False): .. code-block:: bash - salt '*' pure.pg_delete foo + salt '*' purefa.pg_delete foo ''' array = _get_system() @@ -1162,7 +1162,7 @@ def pg_eradicate(name): .. code-block:: bash - salt '*' pure.pg_eradicate foo + salt '*' purefa.pg_eradicate foo ''' array = _get_system() @@ -1200,7 +1200,7 @@ def pg_remove(name, hostgroup=None, host=None, volume=None): .. code-block:: bash - salt '*' pure.pg_remove foo [hostgroup=bar | host=test | volume=bar] + salt '*' purefa.pg_remove foo [hostgroup=bar | host=test | volume=bar] ''' array = _get_system() From c682f86e38bbdd69a4fa02ee12f34b613fdab18d Mon Sep 17 00:00:00 2001 From: Nicole Thomas Date: Mon, 18 Sep 2017 16:54:04 -0400 Subject: [PATCH 548/639] Lint: Add empty line at end of file --- salt/utils/xmlutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py index c89f45a34f..bb07921aaa 100644 --- a/salt/utils/xmlutil.py +++ b/salt/utils/xmlutil.py @@ -52,4 +52,4 @@ def to_dict(xmltree): # Attempt to ensure that items are not overwritten by attributes. xmldict["attr{0}".format(attrName)] = attrValue - return xmldict \ No newline at end of file + return xmldict From ad05cc5d984d4627180e0f875f8d06eaf86591b3 Mon Sep 17 00:00:00 2001 From: Petr Michalec Date: Sun, 17 Sep 2017 18:37:11 +0200 Subject: [PATCH 549/639] Fix names of NACL dec. methods --- salt/renderers/nacl.py | 7 +++---- tests/unit/renderers/test_nacl.py | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/renderers/nacl.py b/salt/renderers/nacl.py index 98fa247b5b..91ba558e9d 100644 --- a/salt/renderers/nacl.py +++ b/salt/renderers/nacl.py @@ -22,8 +22,7 @@ To set things up, first generate a keypair. On the master, run the following: .. code-block:: bash - # salt-call --local nacl.keygen keyfile=/root/.nacl - # salt-call --local nacl.keygen_pub keyfile_pub=/root/.nacl.pub + # salt-call --local nacl.keygen sk_file=/root/.nacl Using encrypted pillar @@ -33,7 +32,7 @@ To encrypt secrets, copy the public key to your local machine and run: .. code-block:: bash - $ salt-call --local nacl.enc_pub datatoenc keyfile_pub=/root/.nacl.pub + $ salt-call --local nacl.enc datatoenc pk_file=/root/.nacl.pub To apply the renderer on a file-by-file basis add the following line to the @@ -80,7 +79,7 @@ def _decrypt_object(obj, **kwargs): return _decrypt_object(obj.getvalue(), **kwargs) if isinstance(obj, six.string_types): if re.search(NACL_REGEX, obj) is not None: - return __salt__['nacl.dec_pub'](re.search(NACL_REGEX, obj).group(1), **kwargs) + return __salt__['nacl.dec'](re.search(NACL_REGEX, obj).group(1), **kwargs) else: return obj elif isinstance(obj, dict): diff --git a/tests/unit/renderers/test_nacl.py b/tests/unit/renderers/test_nacl.py index bc55dd4e39..04aa31dd88 100644 --- a/tests/unit/renderers/test_nacl.py +++ b/tests/unit/renderers/test_nacl.py @@ -38,7 +38,7 @@ class NaclTestCase(TestCase, LoaderModuleMockMixin): secret_list = [secret] crypted_list = [crypted] - with patch.dict(nacl.__salt__, {'nacl.dec_pub': MagicMock(return_value=secret)}): + with patch.dict(nacl.__salt__, {'nacl.dec': MagicMock(return_value=secret)}): self.assertEqual(nacl._decrypt_object(secret), secret) self.assertEqual(nacl._decrypt_object(crypted), secret) self.assertEqual(nacl._decrypt_object(crypted_map), secret_map) @@ -51,5 +51,5 @@ class NaclTestCase(TestCase, LoaderModuleMockMixin): ''' secret = 'Use more salt.' crypted = 'NACL[MRN3cc+fmdxyQbz6WMF+jq1hKdU5X5BBI7OjK+atvHo1ll+w1gZ7XyWtZVfq9gK9rQaMfkDxmidJKwE0Mw==]' - with patch.dict(nacl.__salt__, {'nacl.dec_pub': MagicMock(return_value=secret)}): + with patch.dict(nacl.__salt__, {'nacl.dec': MagicMock(return_value=secret)}): self.assertEqual(nacl.render(crypted), secret) From 91fd112e5c8666c4ce6a110cea4fc66731e751af Mon Sep 17 00:00:00 2001 From: Mike Place Date: Tue, 19 Sep 2017 10:17:05 -0600 Subject: [PATCH 550/639] Remove trailing whitespace --- salt/proxy/junos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/proxy/junos.py b/salt/proxy/junos.py index d8136e9af0..e3227bb4ae 100644 --- a/salt/proxy/junos.py +++ b/salt/proxy/junos.py @@ -128,7 +128,7 @@ def alive(opts): ''' dev = conn() - + # Check that the underlying netconf connection still exists. if dev._conn is None: return False From 3bae894306b91a02e7c9c649421e8a16e4a6cbe3 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 19 Sep 2017 12:47:16 -0600 Subject: [PATCH 551/639] digitalocean should be one word. Everything on their webpage is a reference to DigitalOcean --- conf/cloud.providers | 2 +- conf/cloud.providers.d/digitalocean.conf | 2 +- doc/ref/cli/salt-cloud.rst | 2 +- doc/ref/clouds/all/index.rst | 2 +- .../all/salt.cloud.clouds.digital_ocean.rst | 4 +- doc/topics/cloud/cloud.rst | 2 +- doc/topics/cloud/config.rst | 2 +- doc/topics/cloud/digitalocean.rst | 10 ++-- doc/topics/development/tests/index.rst | 4 +- doc/topics/development/tests/integration.rst | 6 +-- doc/topics/tutorials/libcloud.rst | 2 +- .../{digital_ocean.py => digitalocean.py} | 21 ++++---- salt/cloud/clouds/gce.py | 2 +- salt/config/__init__.py | 2 +- salt/utils/parsers.py | 2 +- ..._digital_ocean.py => test_digitalocean.py} | 8 +-- tests/integration/cloud/test_cloud.py | 4 +- .../conf/cloud.providers.d/digital_ocean.conf | 2 +- tests/unit/config/test_config.py | 6 +-- tests/unit/utils/test_schema.py | 54 +++++++++---------- 20 files changed, 70 insertions(+), 69 deletions(-) rename salt/cloud/clouds/{digital_ocean.py => digitalocean.py} (98%) rename tests/integration/cloud/providers/{test_digital_ocean.py => test_digitalocean.py} (98%) diff --git a/conf/cloud.providers b/conf/cloud.providers index b4879432c4..8e9cc2ccf7 100644 --- a/conf/cloud.providers +++ b/conf/cloud.providers @@ -3,7 +3,7 @@ # directory is identical. #my-digitalocean-config: -# driver: digital_ocean +# driver: digitalocean # client_key: wFGEwgregeqw3435gDger # api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg # location: New York 1 diff --git a/conf/cloud.providers.d/digitalocean.conf b/conf/cloud.providers.d/digitalocean.conf index 989758f184..da3c13b45d 100644 --- a/conf/cloud.providers.d/digitalocean.conf +++ b/conf/cloud.providers.d/digitalocean.conf @@ -1,5 +1,5 @@ #my-digitalocean-config: -# driver: digital_ocean +# driver: digitalocean # client_key: wFGEwgregeqw3435gDger # api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg # location: New York 1 diff --git a/doc/ref/cli/salt-cloud.rst b/doc/ref/cli/salt-cloud.rst index a9f3123756..a64c6ba83b 100644 --- a/doc/ref/cli/salt-cloud.rst +++ b/doc/ref/cli/salt-cloud.rst @@ -136,7 +136,7 @@ Query Options .. versionadded:: 2014.7.0 Display a list of configured profiles. Pass in a cloud provider to view - the provider's associated profiles, such as ``digital_ocean``, or pass in + the provider's associated profiles, such as ``digitalocean``, or pass in ``all`` to list all the configured profiles. diff --git a/doc/ref/clouds/all/index.rst b/doc/ref/clouds/all/index.rst index 5c5a3a9f5c..15fb4b1ae3 100644 --- a/doc/ref/clouds/all/index.rst +++ b/doc/ref/clouds/all/index.rst @@ -13,7 +13,7 @@ Full list of Salt Cloud modules aliyun azurearm cloudstack - digital_ocean + digitalocean dimensiondata ec2 gce diff --git a/doc/ref/clouds/all/salt.cloud.clouds.digital_ocean.rst b/doc/ref/clouds/all/salt.cloud.clouds.digital_ocean.rst index 71917c8765..1eeb2b2a41 100644 --- a/doc/ref/clouds/all/salt.cloud.clouds.digital_ocean.rst +++ b/doc/ref/clouds/all/salt.cloud.clouds.digital_ocean.rst @@ -1,6 +1,6 @@ =============================== -salt.cloud.clouds.digital_ocean +salt.cloud.clouds.digitalocean =============================== -.. automodule:: salt.cloud.clouds.digital_ocean +.. automodule:: salt.cloud.clouds.digitalocean :members: \ No newline at end of file diff --git a/doc/topics/cloud/cloud.rst b/doc/topics/cloud/cloud.rst index c88a5eccb7..568866a7e4 100644 --- a/doc/topics/cloud/cloud.rst +++ b/doc/topics/cloud/cloud.rst @@ -183,7 +183,7 @@ imports should be absent from the Salt Cloud module. A good example of a non-libcloud driver is the DigitalOcean driver: -https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digital_ocean.py +https://github.com/saltstack/salt/tree/develop/salt/cloud/clouds/digitalocean.py The ``create()`` Function ------------------------- diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index 8028aa414f..173ea4e692 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -444,7 +444,7 @@ under the API Access tab. .. code-block:: yaml my-digitalocean-config: - driver: digital_ocean + driver: digitalocean personal_access_token: xxx location: New York 1 diff --git a/doc/topics/cloud/digitalocean.rst b/doc/topics/cloud/digitalocean.rst index e89faf1a5c..dd7c76d91f 100644 --- a/doc/topics/cloud/digitalocean.rst +++ b/doc/topics/cloud/digitalocean.rst @@ -19,7 +19,7 @@ under the "SSH Keys" section. # /etc/salt/cloud.providers.d/ directory. my-digitalocean-config: - driver: digital_ocean + driver: digitalocean personal_access_token: xxx ssh_key_file: /path/to/ssh/key/file ssh_key_names: my-key-name,my-key-name-2 @@ -63,7 +63,7 @@ command: # salt-cloud --list-locations my-digitalocean-config my-digitalocean-config: ---------- - digital_ocean: + digitalocean: ---------- Amsterdam 1: ---------- @@ -87,7 +87,7 @@ command: # salt-cloud --list-sizes my-digitalocean-config my-digitalocean-config: ---------- - digital_ocean: + digitalocean: ---------- 512MB: ---------- @@ -117,7 +117,7 @@ command: # salt-cloud --list-images my-digitalocean-config my-digitalocean-config: ---------- - digital_ocean: + digitalocean: ---------- 10.1: ---------- @@ -142,7 +142,7 @@ Profile Specifics: ssh_username ------------ -If using a FreeBSD image from Digital Ocean, you'll need to set the ``ssh_username`` +If using a FreeBSD image from DigitalOcean, you'll need to set the ``ssh_username`` setting to ``freebsd`` in your profile configuration. .. code-block:: yaml diff --git a/doc/topics/development/tests/index.rst b/doc/topics/development/tests/index.rst index 2679ebf0fd..f743b1da09 100644 --- a/doc/topics/development/tests/index.rst +++ b/doc/topics/development/tests/index.rst @@ -219,7 +219,7 @@ the default cloud provider configuration file for DigitalOcean looks like this: .. code-block:: yaml digitalocean-config: - driver: digital_ocean + driver: digitalocean client_key: '' api_key: '' location: New York 1 @@ -230,7 +230,7 @@ must be provided: .. code-block:: yaml digitalocean-config: - driver: digital_ocean + driver: digitalocean client_key: wFGEwgregeqw3435gDger api_key: GDE43t43REGTrkilg43934t34qT43t4dgegerGEgg location: New York 1 diff --git a/doc/topics/development/tests/integration.rst b/doc/topics/development/tests/integration.rst index c6140abda5..79c1eb3a48 100644 --- a/doc/topics/development/tests/integration.rst +++ b/doc/topics/development/tests/integration.rst @@ -541,7 +541,7 @@ provider configuration file in the integration test file directory located at ``tests/integration/files/conf/cloud.*.d/``. The following is an example of the default profile configuration file for Digital -Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digital_ocean.conf``: +Ocean, located at: ``tests/integration/files/conf/cloud.profiles.d/digitalocean.conf``: .. code-block:: yaml @@ -557,12 +557,12 @@ be provided by the user by editing the provider configuration file before runnin tests. The following is an example of the default provider configuration file for Digital -Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digital_ocean.conf``: +Ocean, located at: ``tests/integration/files/conf/cloud.providers.d/digitalocean.conf``: .. code-block:: yaml digitalocean-config: - driver: digital_ocean + driver: digitalocean client_key: '' api_key: '' location: New York 1 diff --git a/doc/topics/tutorials/libcloud.rst b/doc/topics/tutorials/libcloud.rst index 793c37aaa8..a66c2e4e76 100644 --- a/doc/topics/tutorials/libcloud.rst +++ b/doc/topics/tutorials/libcloud.rst @@ -13,7 +13,7 @@ Using Apache Libcloud for declarative and procedural multi-cloud orchestration Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows you to manage different cloud resources through a unified and easy to use API. Apache Libcloud supports over -60 cloud platforms, including Amazon, Microsoft Azure, Digital Ocean, Google Cloud Platform and OpenStack. +60 cloud platforms, including Amazon, Microsoft Azure, DigitalOcean, Google Cloud Platform and OpenStack. Execution and state modules are available for Compute, DNS, Storage and Load Balancer drivers from Apache Libcloud in SaltStack. diff --git a/salt/cloud/clouds/digital_ocean.py b/salt/cloud/clouds/digitalocean.py similarity index 98% rename from salt/cloud/clouds/digital_ocean.py rename to salt/cloud/clouds/digitalocean.py index daf5b8f75a..38516dee45 100644 --- a/salt/cloud/clouds/digital_ocean.py +++ b/salt/cloud/clouds/digitalocean.py @@ -20,7 +20,7 @@ under the "SSH Keys" section. personal_access_token: xxx ssh_key_file: /path/to/ssh/key/file ssh_key_names: my-key-name,my-key-name-2 - driver: digital_ocean + driver: digitalocean :depends: requests ''' @@ -59,10 +59,11 @@ except ImportError: # Get logging started log = logging.getLogger(__name__) -__virtualname__ = 'digital_ocean' +__virtualname__ = 'digitalocean' +__virtual_aliases__ = ('digital_ocean', 'do') -# Only load in this module if the DIGITAL_OCEAN configurations are in place +# Only load in this module if the DIGITALOCEAN configurations are in place def __virtual__(): ''' Check for DigitalOcean configurations @@ -274,7 +275,7 @@ def create(vm_): try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, - __active_provider_name__ or 'digital_ocean', + __active_provider_name__ or 'digitalocean', vm_['profile'], vm_=vm_) is False: return False @@ -441,7 +442,7 @@ def create(vm_): ret = create_node(kwargs) except Exception as exc: log.error( - 'Error creating {0} on DIGITAL_OCEAN\n\n' + 'Error creating {0} on DIGITALOCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format( vm_['name'], @@ -716,12 +717,12 @@ def import_keypair(kwargs=None, call=None): with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename: public_key_content = public_key_filename.read() - digital_ocean_kwargs = { + digitalocean_kwargs = { 'name': kwargs['keyname'], 'public_key': public_key_content } - created_result = create_key(digital_ocean_kwargs, call=call) + created_result = create_key(digitalocean_kwargs, call=call) return created_result @@ -938,11 +939,11 @@ def show_pricing(kwargs=None, call=None): if not profile: return {'Error': 'The requested profile was not found'} - # Make sure the profile belongs to Digital Ocean + # Make sure the profile belongs to DigitalOcean provider = profile.get('provider', '0:0') comps = provider.split(':') - if len(comps) < 2 or comps[1] != 'digital_ocean': - return {'Error': 'The requested profile does not belong to Digital Ocean'} + if len(comps) < 2 or comps[1] != 'digitalocean': + return {'Error': 'The requested profile does not belong to DigitalOcean'} raw = {} ret = {} diff --git a/salt/cloud/clouds/gce.py b/salt/cloud/clouds/gce.py index 8d2f891c12..6e26cf8b95 100644 --- a/salt/cloud/clouds/gce.py +++ b/salt/cloud/clouds/gce.py @@ -2643,7 +2643,7 @@ def show_pricing(kwargs=None, call=None): if not profile: return {'Error': 'The requested profile was not found'} - # Make sure the profile belongs to Digital Ocean + # Make sure the profile belongs to DigitalOcean provider = profile.get('provider', '0:0') comps = provider.split(':') if len(comps) < 2 or comps[1] != 'gce': diff --git a/salt/config/__init__.py b/salt/config/__init__.py index 74dbf1a6c7..6a89e1f485 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -2721,7 +2721,7 @@ def old_to_new(opts): providers = ( 'AWS', 'CLOUDSTACK', - 'DIGITAL_OCEAN', + 'DIGITALOCEAN', 'EC2', 'GOGRID', 'IBMSCE', diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py index 454fb39bcf..c32bd4b14f 100644 --- a/salt/utils/parsers.py +++ b/salt/utils/parsers.py @@ -1547,7 +1547,7 @@ class CloudQueriesMixIn(six.with_metaclass(MixInMeta, object)): action='store', help='Display a list of configured profiles. Pass in a cloud ' 'provider to view the provider\'s associated profiles, ' - 'such as digital_ocean, or pass in "all" to list all the ' + 'such as digitalocean, or pass in "all" to list all the ' 'configured profiles.' ) self.add_option_group(group) diff --git a/tests/integration/cloud/providers/test_digital_ocean.py b/tests/integration/cloud/providers/test_digitalocean.py similarity index 98% rename from tests/integration/cloud/providers/test_digital_ocean.py rename to tests/integration/cloud/providers/test_digitalocean.py index 950b637988..adbf76f43e 100644 --- a/tests/integration/cloud/providers/test_digital_ocean.py +++ b/tests/integration/cloud/providers/test_digitalocean.py @@ -17,7 +17,7 @@ from salt.config import cloud_providers_config # Create the cloud instance name to be used throughout the tests INSTANCE_NAME = generate_random_name('CLOUD-TEST-') -PROVIDER_NAME = 'digital_ocean' +PROVIDER_NAME = 'digitalocean' class DigitalOceanTest(ShellCase): @@ -66,7 +66,7 @@ class DigitalOceanTest(ShellCase): def test_list_images(self): ''' - Tests the return of running the --list-images command for digital ocean + Tests the return of running the --list-images command for digitalocean ''' image_list = self.run_cloud('--list-images {0}'.format(PROVIDER_NAME)) self.assertIn( @@ -76,7 +76,7 @@ class DigitalOceanTest(ShellCase): def test_list_locations(self): ''' - Tests the return of running the --list-locations command for digital ocean + Tests the return of running the --list-locations command for digitalocean ''' _list_locations = self.run_cloud('--list-locations {0}'.format(PROVIDER_NAME)) self.assertIn( @@ -86,7 +86,7 @@ class DigitalOceanTest(ShellCase): def test_list_sizes(self): ''' - Tests the return of running the --list-sizes command for digital ocean + Tests the return of running the --list-sizes command for digitalocean ''' _list_sizes = self.run_cloud('--list-sizes {0}'.format(PROVIDER_NAME)) self.assertIn( diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 3eb85125e1..983f34fded 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -40,11 +40,11 @@ class CloudClientTestCase(ShellCase): @expensiveTest def setUp(self): self.config_file = os.path.join(RUNTIME_VARS.TMP_CONF_CLOUD_PROVIDER_INCLUDES, - 'digital_ocean.conf') + 'digitalocean.conf') self.provider_name = 'digitalocean-config' self.image_name = '14.04.5 x64' - # Use a --list-images salt-cloud call to see if the Digital Ocean provider is + # Use a --list-images salt-cloud call to see if the DigitalOcean provider is # configured correctly before running any tests. images = self.run_cloud('--list-images {0}'.format(self.provider_name)) diff --git a/tests/integration/files/conf/cloud.providers.d/digital_ocean.conf b/tests/integration/files/conf/cloud.providers.d/digital_ocean.conf index 44f89b558f..b0248442a9 100644 --- a/tests/integration/files/conf/cloud.providers.d/digital_ocean.conf +++ b/tests/integration/files/conf/cloud.providers.d/digital_ocean.conf @@ -1,5 +1,5 @@ digitalocean-config: - driver: digital_ocean + driver: digitalocean personal_access_token: '' ssh_key_file: '' ssh_key_name: '' diff --git a/tests/unit/config/test_config.py b/tests/unit/config/test_config.py index 00914ee15d..948d2ee35e 100644 --- a/tests/unit/config/test_config.py +++ b/tests/unit/config/test_config.py @@ -727,8 +727,8 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): Tests passing in valid provider and profile config files successfully ''' providers = {'test-provider': - {'digital_ocean': - {'driver': 'digital_ocean', 'profiles': {}}}} + {'digitalocean': + {'driver': 'digitalocean', 'profiles': {}}}} overrides = {'test-profile': {'provider': 'test-provider', 'image': 'Ubuntu 12.10 x64', @@ -736,7 +736,7 @@ class ConfigTestCase(TestCase, AdaptedConfigurationTestCaseMixin): 'conf_file': PATH} ret = {'test-profile': {'profile': 'test-profile', - 'provider': 'test-provider:digital_ocean', + 'provider': 'test-provider:digitalocean', 'image': 'Ubuntu 12.10 x64', 'size': '512MB'}} self.assertEqual(sconfig.apply_vm_profiles_config(providers, diff --git a/tests/unit/utils/test_schema.py b/tests/unit/utils/test_schema.py index a8181ffe86..689bd8a1b8 100644 --- a/tests/unit/utils/test_schema.py +++ b/tests/unit/utils/test_schema.py @@ -136,7 +136,7 @@ class ConfigTestCase(TestCase): def test_optional_requirements_config(self): class BaseRequirements(schema.Schema): - driver = schema.StringItem(default='digital_ocean', format='hidden') + driver = schema.StringItem(default='digitalocean', format='hidden') class SSHKeyFileSchema(schema.Schema): ssh_key_file = schema.StringItem( @@ -149,13 +149,13 @@ class ConfigTestCase(TestCase): ssh_key_names = schema.StringItem( title='SSH Key Names', description='The names of an SSH key being managed on ' - 'Digital Ocean account which will be used to ' + 'DigitalOcean account which will be used to ' 'authenticate on the deployed VMs', ) class Requirements(BaseRequirements): - title = 'Digital Ocean' - description = 'Digital Ocean Cloud VM configuration requirements.' + title = 'DigitalOcean' + description = 'DigitalOcean Cloud VM configuration requirements.' personal_access_token = schema.StringItem( title='Personal Access Token', @@ -174,12 +174,12 @@ class ConfigTestCase(TestCase): expected = { '$schema': 'http://json-schema.org/draft-04/schema#', - 'title': 'Digital Ocean', - 'description': 'Digital Ocean Cloud VM configuration requirements.', + 'title': 'DigitalOcean', + 'description': 'DigitalOcean Cloud VM configuration requirements.', 'type': 'object', 'properties': { 'driver': { - 'default': 'digital_ocean', + 'default': 'digitalocean', 'format': 'hidden', 'type': 'string', 'title': 'driver' @@ -222,8 +222,8 @@ class ConfigTestCase(TestCase): self.assertDictEqual(expected, Requirements.serialize()) class Requirements2(BaseRequirements): - title = 'Digital Ocean' - description = 'Digital Ocean Cloud VM configuration requirements.' + title = 'DigitalOcean' + description = 'DigitalOcean Cloud VM configuration requirements.' personal_access_token = schema.StringItem( title='Personal Access Token', @@ -239,7 +239,7 @@ class ConfigTestCase(TestCase): ssh_key_names = schema.StringItem( title='SSH Key Names', description='The names of an SSH key being managed on ' - 'Digital Ocean account which will be used to ' + 'DigitalOcean account which will be used to ' 'authenticate on the deployed VMs') requirements_definition = schema.AnyOfItem( @@ -251,12 +251,12 @@ class ConfigTestCase(TestCase): expected = { '$schema': 'http://json-schema.org/draft-04/schema#', - 'title': 'Digital Ocean', - 'description': 'Digital Ocean Cloud VM configuration requirements.', + 'title': 'DigitalOcean', + 'description': 'DigitalOcean Cloud VM configuration requirements.', 'type': 'object', 'properties': { 'driver': { - 'default': 'digital_ocean', + 'default': 'digitalocean', 'format': 'hidden', 'type': 'string', 'title': 'driver' @@ -299,19 +299,19 @@ class ConfigTestCase(TestCase): self.assertDictContainsSubset(expected, Requirements2.serialize()) class Requirements3(schema.Schema): - title = 'Digital Ocean' - description = 'Digital Ocean Cloud VM configuration requirements.' + title = 'DigitalOcean' + description = 'DigitalOcean Cloud VM configuration requirements.' merge_reqs = Requirements(flatten=True) expected = { '$schema': 'http://json-schema.org/draft-04/schema#', - 'title': 'Digital Ocean', - 'description': 'Digital Ocean Cloud VM configuration requirements.', + 'title': 'DigitalOcean', + 'description': 'DigitalOcean Cloud VM configuration requirements.', 'type': 'object', 'properties': { 'driver': { - 'default': 'digital_ocean', + 'default': 'digitalocean', 'format': 'hidden', 'type': 'string', 'title': 'driver' @@ -354,8 +354,8 @@ class ConfigTestCase(TestCase): self.assertDictContainsSubset(expected, Requirements3.serialize()) class Requirements4(schema.Schema): - title = 'Digital Ocean' - description = 'Digital Ocean Cloud VM configuration requirements.' + title = 'DigitalOcean' + description = 'DigitalOcean Cloud VM configuration requirements.' merge_reqs = Requirements(flatten=True) @@ -367,7 +367,7 @@ class ConfigTestCase(TestCase): ssh_key_names_2 = schema.StringItem( title='SSH Key Names', description='The names of an SSH key being managed on ' - 'Digital Ocean account which will be used to ' + 'DigitalOcean account which will be used to ' 'authenticate on the deployed VMs') requirements_definition_2 = schema.AnyOfItem( @@ -379,12 +379,12 @@ class ConfigTestCase(TestCase): expected = { '$schema': 'http://json-schema.org/draft-04/schema#', - 'title': 'Digital Ocean', - 'description': 'Digital Ocean Cloud VM configuration requirements.', + 'title': 'DigitalOcean', + 'description': 'DigitalOcean Cloud VM configuration requirements.', 'type': 'object', 'properties': { 'driver': { - 'default': 'digital_ocean', + 'default': 'digitalocean', 'format': 'hidden', 'type': 'string', 'title': 'driver' @@ -446,7 +446,7 @@ class ConfigTestCase(TestCase): @skipIf(HAS_JSONSCHEMA is False, 'The \'jsonschema\' library is missing') def test_optional_requirements_config_validation(self): class BaseRequirements(schema.Schema): - driver = schema.StringItem(default='digital_ocean', format='hidden') + driver = schema.StringItem(default='digitalocean', format='hidden') class SSHKeyFileSchema(schema.Schema): ssh_key_file = schema.StringItem( @@ -462,8 +462,8 @@ class ConfigTestCase(TestCase): 'authenticate on the deployed VMs') class Requirements(BaseRequirements): - title = 'Digital Ocean' - description = 'Digital Ocean Cloud VM configuration requirements.' + title = 'DigitalOcean' + description = 'DigitalOcean Cloud VM configuration requirements.' personal_access_token = schema.StringItem( title='Personal Access Token', From afaf76d0ba31dfe88d9e79aac615a4660e50cbac Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 19 Sep 2017 12:50:50 -0600 Subject: [PATCH 552/639] add release note --- doc/topics/releases/oxygen.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/topics/releases/oxygen.rst b/doc/topics/releases/oxygen.rst index c11a8c97bd..4c651bfce9 100644 --- a/doc/topics/releases/oxygen.rst +++ b/doc/topics/releases/oxygen.rst @@ -116,6 +116,14 @@ Newer PyWinRM Versions Versions of ``pywinrm>=0.2.1`` are finally able to disable validation of self signed certificates. :ref:`Here` for more information. +DigitalOcean +------------ + +The DigitalOcean driver has been renamed to conform to the companies name. The +new driver name is ``digitalocean``. The old name ``digital_ocean`` and a +short one ``do`` will still be supported through virtual aliases, this is mostly +cosmetic. + Solaris Logical Domains In Virtual Grain ---------------------------------------- From fccc4aff068b53a59982b9ecacc4d36cb50fbfbe Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 14 Sep 2017 05:14:36 -0400 Subject: [PATCH 553/639] Added external VMware VSAN libraries --- salt/ext/vsan/vsanapiutils.py | 2 - salt/ext/vsan/vsanmgmtObjects.py | 142 +++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+), 2 deletions(-) create mode 100644 salt/ext/vsan/vsanmgmtObjects.py diff --git a/salt/ext/vsan/vsanapiutils.py b/salt/ext/vsan/vsanapiutils.py index 6b9b1b826c..fce4945e23 100644 --- a/salt/ext/vsan/vsanapiutils.py +++ b/salt/ext/vsan/vsanapiutils.py @@ -7,7 +7,6 @@ Copyright 2016 VMware, Inc. All rights reserved. This module defines basic helper functions used in the sampe codes """ -# pylint: skip-file __author__ = 'VMware, Inc' from pyVmomi import vim, vmodl, SoapStubAdapter @@ -163,4 +162,3 @@ def WaitForTasks(tasks, si): finally: if filter: filter.Destroy() - diff --git a/salt/ext/vsan/vsanmgmtObjects.py b/salt/ext/vsan/vsanmgmtObjects.py new file mode 100644 index 0000000000..ebad265adb --- /dev/null +++ b/salt/ext/vsan/vsanmgmtObjects.py @@ -0,0 +1,142 @@ +from pyVmomi.VmomiSupport import CreateDataType, CreateManagedType, CreateEnumType, AddVersion, AddVersionParent, F_LINK, F_LINKABLE, F_OPTIONAL + +CreateManagedType('vim.cluster.VsanPerformanceManager', 'VsanPerformanceManager', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setStatsObjectPolicy', 'VsanPerfSetStatsObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('deleteStatsObject', 'VsanPerfDeleteStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('createStatsObjectTask', 'VsanPerfCreateStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('deleteStatsObjectTask', 'VsanPerfDeleteStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterHealth', 'VsanPerfQueryClusterHealth', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vmodl.DynamicData[]', 'vmodl.DynamicData[]'), 'System.Read', None), ('queryStatsObjectInformation', 'VsanPerfQueryStatsObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanObjectInformation', 'vim.cluster.VsanObjectInformation'), 'System.Read', None), ('queryNodeInformation', 'VsanPerfQueryNodeInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfNodeInformation[]', 'vim.cluster.VsanPerfNodeInformation[]'), 'System.Read', None), ('queryVsanPerf', 'VsanPerfQueryPerf', 'vim.version.version9', (('querySpecs', 'vim.cluster.VsanPerfQuerySpec[]', 'vim.version.version9', 0, None), ('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanPerfEntityMetricCSV[]', 'vim.cluster.VsanPerfEntityMetricCSV[]'), 'System.Read', None), ('getSupportedEntityTypes', 'VsanPerfGetSupportedEntityTypes', 'vim.version.version9', tuple(), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfEntityType[]', 'vim.cluster.VsanPerfEntityType[]'), 'System.Read', None), ('createStatsObject', 'VsanPerfCreateStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcDiskManagementSystem', 'VimClusterVsanVcDiskManagementSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('initializeDiskMappings', 'InitializeDiskMappings', 'vim.version.version10', (('spec', 'vim.vsan.host.DiskMappingCreationSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('retrieveAllFlashCapabilities', 'RetrieveAllFlashCapabilities', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.VsanHostCapability[]', 'vim.vsan.host.VsanHostCapability[]'), 'System.Read', None), ('queryDiskMappings', 'QueryDiskMappings', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.vsan.host.DiskMapInfoEx[]', 'vim.vsan.host.DiskMapInfoEx[]'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanObjectSystem', 'VsanObjectSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setVsanObjectPolicy', 'VosSetVsanObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectUuid', 'string', 'vim.version.version9', 0, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryObjectIdentities', 'VsanQueryObjectIdentities', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeHealth', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjIdentity', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeSpaceSummary', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanObjectIdentityAndHealth', 'vim.cluster.VsanObjectIdentityAndHealth'), 'System.Read', None), ('queryVsanObjectInformation', 'VosQueryVsanObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('vsanObjectQuerySpecs', 'vim.cluster.VsanObjectQuerySpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanObjectInformation[]', 'vim.cluster.VsanObjectInformation[]'), 'System.Read', None), ]) +CreateManagedType('vim.host.VsanStretchedClusterSystem', 'VimHostVsanStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getStretchedClusterInfoFromCmmds', 'VSANHostGetStretchedClusterInfoFromCmmds', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANStretchedClusterHostInfo[]', 'vim.host.VSANStretchedClusterHostInfo[]'), 'System.Read', None), ('witnessJoinVsanCluster', 'VSANWitnessJoinVsanCluster', 'vim.version.version10', (('clusterUuid', 'string', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('disableVsanAllowed', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('witnessSetPreferredFaultDomain', 'VSANWitnessSetPreferredFaultDomain', 'vim.version.version10', (('preferredFd', 'string', 'vim.version.version10', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('addUnicastAgent', 'VSANHostAddUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('witnessPort', 'int', 'vim.version.version10', 0 | F_OPTIONAL, None), ('overwrite', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('clusterGetPreferredFaultDomain', 'VSANClusterGetPreferredFaultDomain', 'vim.version.version10', tuple(), (0 | F_OPTIONAL, 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo'), 'System.Read', None), ('witnessLeaveVsanCluster', 'VSANWitnessLeaveVsanCluster', 'vim.version.version10', tuple(), (0, 'void', 'void'), 'System.Read', None), ('getStretchedClusterCapability', 'VSANHostGetStretchedClusterCapability', 'vim.version.version10', tuple(), (0, 'vim.host.VSANStretchedClusterHostCapability', 'vim.host.VSANStretchedClusterHostCapability'), 'System.Read', None), ('removeUnicastAgent', 'VSANHostRemoveUnicastAgent', 'vim.version.version10', (('witnessAddress', 'string', 'vim.version.version10', 0, None), ('ignoreExistence', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('listUnicastAgent', 'VSANHostListUnicastAgent', 'vim.version.version10', tuple(), (0, 'string', 'string'), 'System.Read', None), ]) +CreateManagedType('vim.VsanUpgradeSystemEx', 'VsanUpgradeSystemEx', 'vmodl.ManagedObject', 'vim.version.version10', [], [('performUpgrade', 'PerformVsanUpgradeEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('performObjectUpgrade', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('excludeHosts', 'vim.HostSystem[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('performUpgradePreflightCheck', 'PerformVsanUpgradePreflightCheckEx', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('downgradeFormat', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ('spec', 'vim.cluster.VsanDiskFormatConversionSpec', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanDiskFormatConversionCheckResult', 'vim.cluster.VsanDiskFormatConversionCheckResult'), 'System.Read', None), ('retrieveSupportedFormatVersion', 'RetrieveSupportedVsanFormatVersion', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanCapabilitySystem', 'VsanCapabilitySystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getCapabilities', 'VsanGetCapabilities', 'vim.version.version10', (('targets', 'vmodl.ManagedObject[]', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanCapability[]', 'vim.cluster.VsanCapability[]'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanSpaceReportSystem', 'VsanSpaceReportSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('querySpaceUsage', 'VsanQuerySpaceUsage', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanSpaceUsage', 'vim.cluster.VsanSpaceUsage'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcClusterConfigSystem', 'VsanVcClusterConfigSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('getConfigInfoEx', 'VsanClusterGetConfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0, 'vim.vsan.ConfigInfoEx', 'vim.vsan.ConfigInfoEx'), 'System.Read', None), ('reconfigureEx', 'VsanClusterReconfig', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('vsanReconfigSpec', 'vim.vsan.ReconfigSpec', 'vim.version.version10', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) +CreateManagedType('vim.host.VsanHealthSystem', 'HostVsanHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryAdvCfg', 'VsanHostQueryAdvCfg', 'vim.version.version9', (('options', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.option.OptionValue[]', 'vim.option.OptionValue[]'), 'System.Read', None), ('queryPhysicalDiskHealthSummary', 'VsanHostQueryPhysicalDiskHealthSummary', 'vim.version.version9', tuple(), (0, 'vim.host.VsanPhysicalDiskHealthSummary', 'vim.host.VsanPhysicalDiskHealthSummary'), 'System.Read', None), ('startProactiveRebalance', 'VsanStartProactiveRebalance', 'vim.version.version9', (('timeSpan', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL, None), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('queryHostInfoByUuids', 'VsanHostQueryHostInfoByUuids', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanQueryResultHostInfo[]', 'vim.host.VsanQueryResultHostInfo[]'), 'System.Read', None), ('queryVersion', 'VsanHostQueryHealthSystemVersion', 'vim.version.version9', tuple(), (0, 'string', 'string'), 'System.Read', None), ('queryVerifyNetworkSettings', 'VsanHostQueryVerifyNetworkSettings', 'vim.version.version9', (('peers', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkHealthResult', 'vim.host.VsanNetworkHealthResult'), 'System.Read', None), ('queryRunIperfClient', 'VsanHostQueryRunIperfClient', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('runVmdkLoadTest', 'VsanHostRunVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanVmdkLoadTestResult[]', 'vim.host.VsanVmdkLoadTestResult[]'), 'System.Read', None), ('queryObjectHealthSummary', 'VsanHostQueryObjectHealthSummary', 'vim.version.version9', (('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('localHostOnly', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanObjectOverallHealth', 'vim.host.VsanObjectOverallHealth'), 'System.Read', None), ('getHclInfo', 'VsanGetHclInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanHostHclInfo', 'vim.host.VsanHostHclInfo'), 'System.Read', None), ('cleanupVmdkLoadTest', 'VsanHostCleanupVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ('waitForVsanHealthGenerationIdChange', 'VsanWaitForVsanHealthGenerationIdChange', 'vim.version.version9', (('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('stopProactiveRebalance', 'VsanStopProactiveRebalance', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairImmediateObjects', 'VsanHostRepairImmediateObjects', 'vim.version.version9', (('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('repairType', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanRepairObjectsResult', 'vim.host.VsanRepairObjectsResult'), 'System.Read', None), ('prepareVmdkLoadTest', 'VsanHostPrepareVmdkLoadTest', 'vim.version.version9', (('runname', 'string', 'vim.version.version9', 0, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0, None), ), (0, 'string', 'string'), 'System.Read', None), ('queryRunIperfServer', 'VsanHostQueryRunIperfServer', 'vim.version.version9', (('multicast', 'boolean', 'vim.version.version9', 0, None), ('serverIp', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.host.VsanNetworkLoadTestResult', 'vim.host.VsanNetworkLoadTestResult'), 'System.Read', None), ('queryCheckLimits', 'VsanHostQueryCheckLimits', 'vim.version.version9', tuple(), (0, 'vim.host.VsanLimitHealthResult', 'vim.host.VsanLimitHealthResult'), 'System.Read', None), ('getProactiveRebalanceInfo', 'VsanGetProactiveRebalanceInfo', 'vim.version.version9', tuple(), (0, 'vim.host.VsanProactiveRebalanceInfoEx', 'vim.host.VsanProactiveRebalanceInfoEx'), 'System.Read', None), ('checkClomdLiveness', 'VsanHostClomdLiveness', 'vim.version.version9', tuple(), (0, 'boolean', 'boolean'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcClusterHealthSystem', 'VsanVcClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryClusterCreateVmHealthHistoryTest', 'VsanQueryVcClusterCreateVmHealthHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterCreateVmHealthTestResult[]', 'vim.cluster.VsanClusterCreateVmHealthTestResult[]'), 'System.Read', None), ('setLogLevel', 'VsanHealthSetLogLevel', 'vim.version.version9', (('level', 'vim.cluster.VsanHealthLogLevelEnum', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'void', 'void'), 'System.Read', None), ('testVsanClusterTelemetryProxy', 'VsanHealthTestVsanClusterTelemetryProxy', 'vim.version.version9', (('proxyConfig', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('uploadHclDb', 'VsanVcUploadHclDb', 'vim.version.version9', (('db', 'string', 'vim.version.version9', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('updateHclDbFromWeb', 'VsanVcUpdateHclDbFromWeb', 'vim.version.version9', (('url', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('repairClusterObjectsImmediate', 'VsanHealthRepairClusterObjectsImmediate', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryVcClusterNetworkPerfTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryClusterVmdkLoadHistoryTest', 'VsanQueryVcClusterVmdkLoadHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterVmdkLoadTestResult[]', 'vim.cluster.VsanClusterVmdkLoadTestResult[]'), 'System.Read', None), ('queryVsanClusterHealthCheckInterval', 'VsanHealthQueryVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'int', 'int'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryVcClusterCreateVmHealthTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanVcClusterGetHclInfo', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('includeHostsResult', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryAttachToSrHistory', 'VsanQueryAttachToSrHistory', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('taskId', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanAttachToSrOperation[]', 'vim.cluster.VsanAttachToSrOperation[]'), 'System.Read', None), ('rebalanceCluster', 'VsanRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('runVmdkLoadTest', 'VsanVcClusterRunVmdkLoadTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('runname', 'string', 'vim.version.version9', 0, None), ('durationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('action', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('sendVsanTelemetry', 'VsanHealthSendVsanTelemetry', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ('queryClusterNetworkPerfHistoryTest', 'VsanQueryVcClusterNetworkPerfHistoryTest', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('count', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanClusterNetworkLoadTestResult[]', 'vim.cluster.VsanClusterNetworkLoadTestResult[]'), 'System.Read', None), ('queryClusterHealthSummary', 'VsanQueryVcClusterHealthSummary', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vmCreateTimeout', 'int', 'vim.version.version9', 0 | F_OPTIONAL, None), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeObjUuids', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fields', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('fetchFromCache', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSummary', 'vim.cluster.VsanClusterHealthSummary'), 'System.Read', None), ('stopRebalanceCluster', 'VsanStopRebalanceCluster', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryVsanClusterHealthConfig', 'VsanHealthQueryVsanClusterHealthConfig', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthConfigs', 'vim.cluster.VsanClusterHealthConfigs'), 'System.Read', None), ('attachVsanSupportBundleToSr', 'VsanAttachVsanSupportBundleToSr', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('srNumber', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterVmdkWorkloadTypes', 'VsanQueryVcClusterVmdkWorkloadTypes', 'vim.version.version9', tuple(), (0, 'vim.cluster.VsanStorageWorkloadType[]', 'vim.cluster.VsanStorageWorkloadType[]'), 'System.Read', None), ('queryVerifyClusterHealthSystemVersions', 'VsanVcClusterQueryVerifyHealthSystemVersions', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('isRebalanceRunning', 'VsanHealthIsRebalanceRunning', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('targetHosts', 'vim.HostSystem[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setVsanClusterHealthCheckInterval', 'VsanHealthSetVsanClusterHealthCheckInterval', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ('vsanClusterHealthCheckInterval', 'int', 'vim.version.version9', 0, None), ), (0, 'void', 'void'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanVcStretchedClusterSystem', 'VimClusterVsanVcStretchedClusterSystem', 'vmodl.ManagedObject', 'vim.version.version10', [], [('isWitnessHost', 'VSANVcIsWitnessHost', 'vim.version.version10', (('host', 'vim.HostSystem', 'vim.version.version10', 0, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('setPreferredFaultDomain', 'VSANVcSetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('getPreferredFaultDomain', 'VSANVcGetPreferredFaultDomain', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANPreferredFaultDomainInfo', 'vim.cluster.VSANPreferredFaultDomainInfo'), 'System.Read', None), ('getWitnessHosts', 'VSANVcGetWitnessHosts', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANWitnessHostInfo[]', 'vim.cluster.VSANWitnessHostInfo[]'), 'System.Read', None), ('retrieveStretchedClusterVcCapability', 'VSANVcRetrieveStretchedClusterVcCapability', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('verifyAllConnected', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VSANStretchedClusterCapability[]', 'vim.cluster.VSANStretchedClusterCapability[]'), 'System.Read', None), ('convertToStretchedCluster', 'VSANVcConvertToStretchedCluster', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('faultDomainConfig', 'vim.cluster.VSANStretchedClusterFaultDomainConfig', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0, None), ('preferredFd', 'string', 'vim.version.version10', 0, None), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('removeWitnessHost', 'VSANVcRemoveWitnessHost', 'vim.version.version10', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version10', 0, None), ('witnessHost', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL, None), ('witnessAddress', 'string', 'vim.version.version10', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ]) +CreateManagedType('vim.cluster.VsanClusterHealthSystem', 'VsanClusterHealthSystem', 'vmodl.ManagedObject', 'vim.version.version9', [], [('queryPhysicalDiskHealthSummary', 'VsanQueryClusterPhysicalDiskHealthSummary', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.host.VsanPhysicalDiskHealthSummary[]'), 'System.Read', None), ('queryClusterNetworkPerfTest', 'VsanQueryClusterNetworkPerfTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('multicast', 'boolean', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkLoadTestResult', 'vim.cluster.VsanClusterNetworkLoadTestResult'), 'System.Read', None), ('queryAdvCfgSync', 'VsanQueryClusterAdvCfgSync', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.cluster.VsanClusterAdvCfgSyncResult[]'), 'System.Read', None), ('repairClusterImmediateObjects', 'VsanRepairClusterImmediateObjects', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('uuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'vim.cluster.VsanClusterHealthSystemObjectsRepairResult'), 'System.Read', None), ('queryVerifyClusterNetworkSettings', 'VsanQueryVerifyClusterNetworkSettings', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.cluster.VsanClusterNetworkHealthResult'), 'System.Read', None), ('queryClusterCreateVmHealthTest', 'VsanQueryClusterCreateVmHealthTest', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('timeout', 'int', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterCreateVmHealthTestResult', 'vim.cluster.VsanClusterCreateVmHealthTestResult'), 'System.Read', None), ('queryClusterHealthSystemVersions', 'VsanQueryClusterHealthSystemVersions', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.cluster.VsanClusterHealthSystemVersionResult'), 'System.Read', None), ('getClusterHclInfo', 'VsanClusterGetHclInfo', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterHclInfo', 'vim.cluster.VsanClusterHclInfo'), 'System.Read', None), ('queryCheckLimits', 'VsanQueryClusterCheckLimits', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterLimitHealthResult', 'vim.cluster.VsanClusterLimitHealthResult'), 'System.Read', None), ('queryCaptureVsanPcap', 'VsanQueryClusterCaptureVsanPcap', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ('duration', 'int', 'vim.version.version9', 0, None), ('vmknic', 'vim.cluster.VsanClusterHostVmknicMapping[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeRawPcap', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('includeIgmp', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsMsgTypeFilter', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('cmmdsPorts', 'int[]', 'vim.version.version9', 0 | F_OPTIONAL, None), ('clusterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanVsanClusterPcapResult', 'vim.cluster.VsanVsanClusterPcapResult'), 'System.Read', None), ('checkClusterClomdLiveness', 'VsanCheckClusterClomdLiveness', 'vim.version.version9', (('hosts', 'string[]', 'vim.version.version9', 0, None), ('esxRootPassword', 'string', 'vim.version.version9', 0, None), ), (0, 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.cluster.VsanClusterClomdLivenessResult'), 'System.Read', None), ]) +CreateDataType('vim.host.VSANCmmdsNodeInfo', 'VimHostVSANCmmdsNodeInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('isWitness', 'boolean', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanPhysicalDiskHealth', 'VsanPhysicalDiskHealth', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('uuid', 'string', 'vim.version.version9', 0), ('inCmmds', 'boolean', 'vim.version.version9', 0), ('inVsi', 'boolean', 'vim.version.version9', 0), ('dedupScope', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('formatVersion', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('isAllFlash', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionValue', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionArea', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('congestionHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metadataHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealthDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupUsageHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('capacityHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('summaryHealth', 'string', 'vim.version.version9', 0), ('capacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacity', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('freeBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('hashedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('dedupedBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('scsiDisk', 'vim.host.ScsiDisk', 'vim.version.version9', 0 | F_OPTIONAL), ('usedComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxComponents', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('compLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.DataEfficiencyConfig', 'VsanDataEfficiencyConfig', 'vmodl.DynamicData', 'vim.version.version10', [('dedupEnabled', 'boolean', 'vim.version.version10', 0), ('compressionEnabled', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StorageComplianceResult', 'VsanStorageComplianceResult', 'vmodl.DynamicData', 'vim.version.version9', [('checkTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('profile', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('objectUUID', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('complianceStatus', 'vim.cluster.StorageComplianceStatus', 'vim.version.version9', 0), ('mismatch', 'boolean', 'vim.version.version9', 0), ('violatedPolicies', 'vim.cluster.StoragePolicyStatus[]', 'vim.version.version9', 0 | F_OPTIONAL), ('operationalStatus', 'vim.cluster.StorageOperationalStatus', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthGroup', 'VsanClusterHealthGroup', 'vmodl.DynamicData', 'vim.version.version9', [('groupId', 'string', 'vim.version.version9', 0), ('groupName', 'string', 'vim.version.version9', 0), ('groupHealth', 'string', 'vim.version.version9', 0), ('groupTests', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groupDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanSpaceUsageDetailResult', 'VsanSpaceUsageDetailResult', 'vmodl.DynamicData', 'vim.version.version9', [('spaceUsageByObjectType', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanAttachToSrOperation', 'VsanAttachToSrOperation', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('success', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('srNumber', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanObjectSpaceSummary', 'VsanObjectSpaceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('objType', 'vim.cluster.VsanObjectTypeEnum', 'vim.version.version9', 0 | F_OPTIONAL), ('overheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('temporaryOverheadB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('primaryCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('provisionCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('reservedCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('overReservedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalUsedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('usedB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHclInfo', 'VsanClusterHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hclDbLastUpdate', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('hclDbAgeHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostHclInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfGraph', 'VsanPerfGraph', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0), ('metrics', 'vim.cluster.VsanPerfMetricId[]', 'vim.version.version9', 0), ('unit', 'vim.cluster.VsanPerfStatsUnitType', 'vim.version.version9', 0), ('threshold', 'vim.cluster.VsanPerfThreshold', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultBase', 'VsanClusterHealthResultBase', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfTopEntity', 'VsanPerfTopEntity', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterBalancePerDiskInfo', 'VsanClusterBalancePerDiskInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('fullness', 'long', 'vim.version.version9', 0), ('variance', 'long', 'vim.version.version9', 0), ('fullnessAboveThreshold', 'long', 'vim.version.version9', 0), ('dataToMoveB', 'long', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthTest', 'VsanClusterHealthTest', 'vmodl.DynamicData', 'vim.version.version9', [('testId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testShortDescription', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('testDetails', 'vim.cluster.VsanClusterHealthResultBase[]', 'vim.version.version9', 0 | F_OPTIONAL), ('testActions', 'vim.cluster.VsanClusterHealthAction[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StoragePolicyStatus', 'VsanStoragePolicyStatus', 'vmodl.DynamicData', 'vim.version.version9', [('id', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('expectedValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('currentValue', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfMemberInfo', 'VsanPerfMemberInfo', 'vmodl.DynamicData', 'vim.version.version9', [('thumbprint', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanPerfMetricId', 'VsanPerfMetricId', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rollupType', 'vim.cluster.VsanPerfSummaryType', 'vim.version.version9', 0 | F_OPTIONAL), ('statsType', 'vim.cluster.VsanPerfStatsType', 'vim.version.version9', 0 | F_OPTIONAL), ('name', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('metricsCollectInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VSANWitnessHostInfo', 'VimClusterVSANWitnessHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeUuid', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdName', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFdUuid', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('unicastAgentAddr', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('host', 'vim.HostSystem', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHealthExtMgmtPreCheckResult', 'VsanHealthExtMgmtPreCheckResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallResult', 'boolean', 'vim.version.version9', 0), ('esxVersionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('drsCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('eamConnectionCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installStateCheckPassed', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('results', 'vim.cluster.VsanClusterHealthTest[]', 'vim.version.version9', 0), ('vumRegistered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HostWithHybridDiskgroupIssue', 'VsanHostWithHybridDiskgroupIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfMetricSeriesCSV', 'VsanPerfMetricSeriesCSV', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('values', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfQuerySpec', 'VsanPerfQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('startTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('endTime', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('group', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('labels', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('interval', 'int', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanRepairObjectsResult', 'VsanRepairObjectsResult', 'vmodl.DynamicData', 'vim.version.version9', [('inQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('notInQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterNetworkPartitionInfo', 'VsanClusterNetworkPartitionInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.MixedEsxVersionIssue', 'VsanMixedEsxVersionIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', []) +CreateDataType('vim.cluster.VsanClusterClomdLivenessResult', 'VsanClusterClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('clomdLivenessResult', 'vim.cluster.VsanHostClomdLivenessResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanVsanClusterPcapResult', 'VsanVsanClusterPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanVsanClusterPcapGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanVsanPcapResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfMasterInformation', 'VsanPerfMasterInformation', 'vmodl.DynamicData', 'vim.version.version9', [('secSinceLastStatsWrite', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('secSinceLastStatsCollect', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('statsIntervalSec', 'long', 'vim.version.version9', 0), ('collectionFailureHostUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('renamedStatsDirectories', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('statsDirectoryPercentFree', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostCreateVmHealthTestResult', 'VsanHostCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0), ('fault', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanDiskFormatConversionCheckResult', 'VsanDiskFormatConversionCheckResult', 'vim.VsanUpgradeSystem.PreflightCheckResult', 'vim.version.version10', [('isSupported', 'boolean', 'vim.version.version10', 0), ('targetVersion', 'int', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemObjectsRepairResult', 'VsanClusterHealthSystemObjectsRepairResult', 'vmodl.DynamicData', 'vim.version.version9', [('inRepairingQueueObjects', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('failedRepairObjects', 'vim.host.VsanFailedRepairObjectResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.host.VsanHostHclInfo', 'VsanHostHclInfo', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('hclChecked', 'boolean', 'vim.version.version9', 0), ('releaseName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('controllers', 'vim.host.VsanHclControllerInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VSANStretchedClusterCapability', 'VimClusterVSANStretchedClusterCapability', 'vmodl.DynamicData', 'vim.version.version10', [('hostMoId', 'string', 'vim.version.version10', 0), ('connStatus', 'string', 'vim.version.version10', 0 | F_OPTIONAL), ('isSupported', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL), ('hostCapability', 'vim.host.VSANStretchedClusterHostCapability', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanDiskMappingsConfigSpec', 'VimClusterVsanDiskMappingsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hostDiskMappings', 'vim.cluster.VsanHostDiskMapping[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanHostVmdkLoadTestResult', 'VsanHostVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('issueFound', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkResults', 'vim.host.VsanVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.ReconfigSpec', 'VimVsanReconfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('vsanClusterConfig', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL), ('diskMappingSpec', 'vim.cluster.VsanDiskMappingsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('faultDomainsSpec', 'vim.cluster.VsanFaultDomainsConfigSpec', 'vim.version.version10', 0 | F_OPTIONAL), ('modify', 'boolean', 'vim.version.version10', 0), ('allowReducedRedundancy', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkPeerHealthResult', 'VsanNetworkPeerHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('peer', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('peerVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('smallPingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccessPct', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('onSameIpSubnet', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('sourceVmknicName', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanWitnessSpec', 'VimClusterVsanWitnessSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('diskMapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.DiskMappingCreationSpec', 'VimVsanHostDiskMappingCreationSpec', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('creationType', 'vim.vsan.host.DiskMappingCreationType', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanLimitHealthResult', 'VsanLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0), ('maxComponents', 'int', 'vim.version.version9', 0), ('freeComponents', 'int', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('lowestFreeDiskSpacePct', 'int', 'vim.version.version9', 0), ('usedDiskSpaceB', 'long', 'vim.version.version9', 0), ('totalDiskSpaceB', 'long', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('reservedRcSizeB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VSANPreferredFaultDomainInfo', 'VimClusterVSANPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainName', 'string', 'vim.version.version10', 0), ('preferredFaultDomainId', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanObjectOverallHealth', 'VsanObjectOverallHealth', 'vmodl.DynamicData', 'vim.version.version9', [('objectHealthDetail', 'vim.host.VsanObjectHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('objectVersionCompliance', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanVsanClusterPcapGroup', 'VsanVsanClusterPcapGroup', 'vmodl.DynamicData', 'vim.version.version9', [('master', 'string', 'vim.version.version9', 0), ('members', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultColumnInfo', 'VsanClusterHealthResultColumnInfo', 'vmodl.DynamicData', 'vim.version.version9', [('label', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterNetworkHealthResult', 'VsanClusterNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.host.VsanNetworkHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingMulticastConfig', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('matchingIpSubnets', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('pingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('largePingTestSuccess', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('potentialMulticastIssue', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('otherHostsInVsanCluster', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('partitions', 'vim.cluster.VsanClusterNetworkPartitionInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsWithVsanDisabled', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsDisconnected', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInEsxMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsInVsanMaintenanceMode', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('infoAboutUnexpectedHosts', 'vim.host.VsanQueryResultHostInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfNodeInformation', 'VsanPerfNodeInformation', 'vmodl.DynamicData', 'vim.version.version9', [('version', 'string', 'vim.version.version9', 0), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('isCmmdsMaster', 'boolean', 'vim.version.version9', 0), ('isStatsMaster', 'boolean', 'vim.version.version9', 0), ('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanNodeUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('masterInfo', 'vim.cluster.VsanPerfMasterInformation', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfEntityMetricCSV', 'VsanPerfEntityMetricCSV', 'vmodl.DynamicData', 'vim.version.version9', [('entityRefId', 'string', 'vim.version.version9', 0), ('sampleInfo', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'vim.cluster.VsanPerfMetricSeriesCSV[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.DiskUnhealthIssue', 'VsanDiskUnhealthIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanFaultDomainSpec', 'VimClusterVsanFaultDomainSpec', 'vmodl.DynamicData', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('name', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.vsan.upgradesystem.ObjectInaccessibleIssue', 'VsanObjectInaccessibleIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanDiskFormatConversionSpec', 'VsanDiskFormatConversionSpec', 'vmodl.DynamicData', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthAction', 'VsanClusterHealthAction', 'vmodl.DynamicData', 'vim.version.version9', [('actionId', 'vim.cluster.VsanClusterHealthActionIdEnum', 'vim.version.version9', 0), ('actionLabel', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('actionDescription', 'vmodl.LocalizableMessage', 'vim.version.version9', 0), ('enabled', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemVersionResult', 'VsanClusterHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostResults', 'vim.cluster.VsanHostHealthSystemVersionResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('vcVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterHealthResultRow', 'VsanClusterHealthResultRow', 'vmodl.DynamicData', 'vim.version.version9', [('values', 'string[]', 'vim.version.version9', 0), ('nestedRows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSystemStatusResult', 'VsanClusterHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('status', 'string', 'vim.version.version9', 0), ('goalState', 'string', 'vim.version.version9', 0), ('untrackedHosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('trackedHostsStatus', 'vim.host.VsanHostHealthSystemStatusResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostDiskMapping', 'VimClusterVsanHostDiskMapping', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('cacheDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0 | F_OPTIONAL), ('capacityDisks', 'vim.host.ScsiDisk[]', 'vim.version.version10', 0), ('type', 'vim.cluster.VsanDiskGroupCreationType', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VSANStretchedClusterFaultDomainConfig', 'VimClusterVSANStretchedClusterFaultDomainConfig', 'vmodl.DynamicData', 'vim.version.version10', [('firstFdName', 'string', 'vim.version.version10', 0), ('firstFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0), ('secondFdName', 'string', 'vim.version.version10', 0), ('secondFdHosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VSANStretchedClusterHostInfo', 'VimHostVSANStretchedClusterHostInfo', 'vmodl.DynamicData', 'vim.version.version10', [('nodeInfo', 'vim.host.VSANCmmdsNodeInfo', 'vim.version.version10', 0), ('faultDomainInfo', 'vim.host.VSANCmmdsFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL), ('preferredFaultDomainInfo', 'vim.host.VSANCmmdsPreferredFaultDomainInfo', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HigherObjectsPresentDuringDowngradeIssue', 'VsanHigherObjectsPresentDuringDowngradeIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VSANCmmdsFaultDomainInfo', 'VimHostVSANCmmdsFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomainId', 'string', 'vim.version.version10', 0), ('faultDomainName', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.fault.VsanNodeNotMaster', 'VsanNodeNotMaster', 'vim.fault.VimFault', 'vim.version.version9', [('vsanMasterUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cmmdsMasterButNotStatsMaster', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostHealthSystemVersionResult', 'VsanHostHealthSystemVersionResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('version', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthConfigs', 'VsanClusterHealthConfigs', 'vmodl.DynamicData', 'vim.version.version9', [('enableVsanTelemetry', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryInterval', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanTelemetryProxy', 'vim.cluster.VsanClusterTelemetryProxyConfig', 'vim.version.version9', 0 | F_OPTIONAL), ('configs', 'vim.cluster.VsanClusterHealthResultKeyValuePair[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterWhatifHostFailuresResult', 'VsanClusterWhatifHostFailuresResult', 'vmodl.DynamicData', 'vim.version.version9', [('numFailures', 'long', 'vim.version.version9', 0), ('totalUsedCapacityB', 'long', 'vim.version.version9', 0), ('totalCapacityB', 'long', 'vim.version.version9', 0), ('totalRcReservationB', 'long', 'vim.version.version9', 0), ('totalRcSizeB', 'long', 'vim.version.version9', 0), ('usedComponents', 'long', 'vim.version.version9', 0), ('totalComponents', 'long', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectIdentityAndHealth', 'VsanObjectIdentityAndHealth', 'vmodl.DynamicData', 'vim.version.version9', [('identities', 'vim.cluster.VsanObjectIdentity[]', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceSummary', 'vim.cluster.VsanObjectSpaceSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rawData', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanHclControllerInfo', 'VsanHclControllerInfo', 'vmodl.DynamicData', 'vim.version.version9', [('deviceName', 'string', 'vim.version.version9', 0), ('deviceDisplayName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subVendorId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('subDeviceId', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('extraInfo', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('deviceOnHcl', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releaseSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('releasesOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionsOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('driverVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersionOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfigOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('fwVersion', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('raidConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cacheConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderInfo', 'vim.host.VsanHostCimProviderInfo', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultKeyValuePair', 'VsanClusterHealthResultKeyValuePair', 'vmodl.DynamicData', 'vim.version.version9', [('key', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('value', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.StorageOperationalStatus', 'VsanStorageOperationalStatus', 'vmodl.DynamicData', 'vim.version.version9', [('healthy', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('operationETA', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('operationProgress', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('transitional', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanSpaceUsage', 'VsanSpaceUsage', 'vmodl.DynamicData', 'vim.version.version9', [('totalCapacityB', 'long', 'vim.version.version9', 0), ('freeCapacityB', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceOverview', 'vim.cluster.VsanObjectSpaceSummary', 'vim.version.version9', 0 | F_OPTIONAL), ('spaceDetail', 'vim.cluster.VsanSpaceUsageDetailResult', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthResultTable', 'VsanClusterHealthResultTable', 'vim.cluster.VsanClusterHealthResultBase', 'vim.version.version9', [('columns', 'vim.cluster.VsanClusterHealthResultColumnInfo[]', 'vim.version.version9', 0 | F_OPTIONAL), ('rows', 'vim.cluster.VsanClusterHealthResultRow[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterConfig', 'VsanClusterConfig', 'vmodl.DynamicData', 'vim.version.version9', [('config', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hosts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.VsanHostCapability', 'VimVsanHostVsanHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('host', 'vim.HostSystem', 'vim.version.version10', 0), ('isSupported', 'boolean', 'vim.version.version10', 0), ('isLicensed', 'boolean', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfThreshold', 'VsanPerfThreshold', 'vmodl.DynamicData', 'vim.version.version9', [('direction', 'vim.cluster.VsanPerfThresholdDirectionType', 'vim.version.version9', 0), ('yellow', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('red', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkHealthResult', 'VsanNetworkHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'vim.HostSystem', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanVmknicPresent', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('ipSubnets', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('issueFound', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('peerHealth', 'vim.host.VsanNetworkPeerHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('multicastConfig', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.ConfigInfoEx', 'VsanConfigInfoEx', 'vim.vsan.cluster.ConfigInfo', 'vim.version.version10', [('dataEfficiencyConfig', 'vim.vsan.DataEfficiencyConfig', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkLoadTestResult', 'VsanVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('success', 'boolean', 'vim.version.version9', 0), ('faultMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spec', 'vim.host.VsanVmdkLoadTestSpec', 'vim.version.version9', 0), ('actualDurationSec', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('totalBytes', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('iops', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('tputBps', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('avgLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('maxLatencyUs', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('numIoAboveLatencyThreshold', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterVMsHealthOverallResult', 'VsanClusterVMsHealthOverAllResult', 'vmodl.DynamicData', 'vim.version.version9', [('healthStateList', 'vim.cluster.VsanClusterVMsHealthSummaryResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealthState', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanHostHealthSystemStatusResult', 'VsanHostHealthSystemStatusResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0), ('issues', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterAdvCfgSyncResult', 'VsanClusterAdvCfgSyncResult', 'vmodl.DynamicData', 'vim.version.version9', [('inSync', 'boolean', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('hostValues', 'vim.cluster.VsanClusterAdvCfgSyncHostResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanQueryResultHostInfo', 'VsanQueryResultHostInfo', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostnameInCmmds', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanIpv4Addresses', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.DiskMapInfoEx', 'VimVsanHostDiskMapInfoEx', 'vmodl.DynamicData', 'vim.version.version10', [('mapping', 'vim.vsan.host.DiskMapping', 'vim.version.version10', 0), ('isMounted', 'boolean', 'vim.version.version10', 0), ('isAllFlash', 'boolean', 'vim.version.version10', 0), ('isDataEfficiency', 'boolean', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkLoadTestSpec', 'VsanVmdkLoadTestSpec', 'vmodl.DynamicData', 'vim.version.version9', [('vmdkCreateSpec', 'vim.VirtualDiskManager.FileBackedVirtualDiskSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpec', 'vim.host.VsanVmdkIOLoadSpec', 'vim.version.version9', 0 | F_OPTIONAL), ('vmdkIOSpecSequence', 'vim.host.VsanVmdkIOLoadSpec[]', 'vim.version.version9', 0 | F_OPTIONAL), ('stepDurationSec', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHealthSummary', 'VsanClusterHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('clusterStatus', 'vim.cluster.VsanClusterHealthSystemStatusResult', 'vim.version.version9', 0 | F_OPTIONAL), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterVersions', 'vim.cluster.VsanClusterHealthSystemVersionResult', 'vim.version.version9', 0 | F_OPTIONAL), ('objectHealth', 'vim.host.VsanObjectOverallHealth', 'vim.version.version9', 0 | F_OPTIONAL), ('vmHealth', 'vim.cluster.VsanClusterVMsHealthOverallResult', 'vim.version.version9', 0 | F_OPTIONAL), ('networkHealth', 'vim.cluster.VsanClusterNetworkHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('limitHealth', 'vim.cluster.VsanClusterLimitHealthResult', 'vim.version.version9', 0 | F_OPTIONAL), ('advCfgSync', 'vim.cluster.VsanClusterAdvCfgSyncResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('createVmHealth', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('physicalDisksHealth', 'vim.host.VsanPhysicalDiskHealthSummary[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hclInfo', 'vim.cluster.VsanClusterHclInfo', 'vim.version.version9', 0 | F_OPTIONAL), ('groups', 'vim.cluster.VsanClusterHealthGroup[]', 'vim.version.version9', 0 | F_OPTIONAL), ('overallHealth', 'string', 'vim.version.version9', 0), ('overallHealthDescription', 'string', 'vim.version.version9', 0), ('clomdLiveness', 'vim.cluster.VsanClusterClomdLivenessResult', 'vim.version.version9', 0 | F_OPTIONAL), ('diskBalance', 'vim.cluster.VsanClusterBalanceSummary', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanPerfEntityType', 'VsanPerfEntityType', 'vmodl.DynamicData', 'vim.version.version9', [('name', 'string', 'vim.version.version9', 0), ('id', 'string', 'vim.version.version9', 0), ('graphs', 'vim.cluster.VsanPerfGraph[]', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanNetworkLoadTestResult', 'VsanNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('status', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('client', 'boolean', 'vim.version.version9', 0), ('bandwidthBps', 'long', 'vim.version.version9', 0), ('totalBytes', 'long', 'vim.version.version9', 0), ('lostDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('lossPct', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('sentDatagrams', 'long', 'vim.version.version9', 0 | F_OPTIONAL), ('jitterMs', 'float', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanPhysicalDiskHealthSummary', 'VsanPhysicalDiskHealthSummary', 'vmodl.DynamicData', 'vim.version.version9', [('overallHealth', 'string', 'vim.version.version9', 0), ('heapsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('slabsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('disks', 'vim.host.VsanPhysicalDiskHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('componentsWithIssues', 'vim.host.VsanResourceHealth[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('hostDedupScope', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.host.VsanDiskManagementSystemCapability', 'VimVsanHostVsanDiskManagementSystemCapability', 'vmodl.DynamicData', 'vim.version.version10', [('version', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanHostCimProviderInfo', 'VsanHostCimProviderInfo', 'vmodl.DynamicData', 'vim.version.version9', [('cimProviderSupported', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('installedCIMProvider', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('cimProviderOnHcl', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectInformation', 'VsanObjectInformation', 'vmodl.DynamicData', 'vim.version.version9', [('directoryName', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vsanHealth', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('policyAttributes', 'vim.KeyValue[]', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('spbmComplianceResult', 'vim.cluster.StorageComplianceResult', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectIdentity', 'VsanObjectIdentity', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('type', 'string', 'vim.version.version9', 0), ('vmInstanceUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vmNsObjectUuid', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('vm', 'vim.VirtualMachine', 'vim.version.version9', 0 | F_OPTIONAL), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanResourceHealth', 'VsanResourceHealth', 'vmodl.DynamicData', 'vim.version.version9', [('resource', 'string', 'vim.version.version9', 0), ('health', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanCapability', 'VsanCapability', 'vmodl.DynamicData', 'vim.version.version10', [('target', 'vmodl.ManagedObject', 'vim.version.version10', 0 | F_OPTIONAL), ('capabilities', 'string[]', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanHostClomdLivenessResult', 'VsanHostClomdLivenessResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('clomdStat', 'string', 'vim.version.version9', 0), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanObjectQuerySpec', 'VsanObjectQuerySpec', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('spbmProfileGenerationId', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterLimitHealthResult', 'VsanClusterLimitHealthResult', 'vmodl.DynamicData', 'vim.version.version9', [('issueFound', 'boolean', 'vim.version.version9', 0), ('componentLimitHealth', 'string', 'vim.version.version9', 0), ('diskFreeSpaceHealth', 'string', 'vim.version.version9', 0), ('rcFreeReservationHealth', 'string', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanLimitHealthResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('whatifHostFailures', 'vim.cluster.VsanClusterWhatifHostFailuresResult[]', 'vim.version.version9', 0 | F_OPTIONAL), ('hostsCommFailure', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanStorageWorkloadType', 'VsanStorageWorkloadType', 'vmodl.DynamicData', 'vim.version.version9', [('specs', 'vim.host.VsanVmdkLoadTestSpec[]', 'vim.version.version9', 0), ('typeId', 'string', 'vim.version.version9', 0), ('name', 'string', 'vim.version.version9', 0), ('description', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterAdvCfgSyncHostResult', 'VsanClusterAdvCfgSyncHostResult', 'vmodl.DynamicData', 'vim.version.version9', [('hostname', 'string', 'vim.version.version9', 0), ('value', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.vsan.upgradesystem.ObjectPolicyIssue', 'VsanObjectPolicyIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('uuids', 'string[]', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanPerfTopEntities', 'VsanPerfTopEntities', 'vmodl.DynamicData', 'vim.version.version9', [('metricId', 'vim.cluster.VsanPerfMetricId', 'vim.version.version9', 0), ('entities', 'vim.cluster.VsanPerfTopEntity[]', 'vim.version.version9', 0)]) +CreateDataType('vim.host.VsanProactiveRebalanceInfoEx', 'VsanProactiveRebalanceInfoEx', 'vmodl.DynamicData', 'vim.version.version9', [('running', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL), ('startTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('stopTs', 'vmodl.DateTime', 'vim.version.version9', 0 | F_OPTIONAL), ('varianceThreshold', 'float', 'vim.version.version9', 0 | F_OPTIONAL), ('timeThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('rateThreshold', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterProactiveTestResult', 'VsanClusterProactiveTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('overallStatus', 'string', 'vim.version.version9', 0), ('overallStatusDescription', 'string', 'vim.version.version9', 0), ('timestamp', 'vmodl.DateTime', 'vim.version.version9', 0), ('healthTest', 'vim.cluster.VsanClusterHealthTest', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VSANCmmdsPreferredFaultDomainInfo', 'VimHostVSANCmmdsPreferredFaultDomainInfo', 'vmodl.DynamicData', 'vim.version.version10', [('preferredFaultDomainId', 'string', 'vim.version.version10', 0), ('preferredFaultDomainName', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.cluster.VsanFaultDomainsConfigSpec', 'VimClusterVsanFaultDomainsConfigSpec', 'vmodl.DynamicData', 'vim.version.version10', [('faultDomains', 'vim.cluster.VsanFaultDomainSpec[]', 'vim.version.version10', 0), ('witness', 'vim.cluster.VsanWitnessSpec', 'vim.version.version10', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterHostVmknicMapping', 'VsanClusterHostVmknicMapping', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0)]) +CreateDataType('vim.cluster.VsanClusterVmdkLoadTestResult', 'VsanClusterVmdkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('task', 'vim.Task', 'vim.version.version9', 0 | F_OPTIONAL), ('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0 | F_OPTIONAL), ('hostResults', 'vim.host.VsanHostVmdkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterVMsHealthSummaryResult', 'VsanClusterVMsHealthSummaryResult', 'vmodl.DynamicData', 'vim.version.version9', [('numVMs', 'int', 'vim.version.version9', 0), ('state', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('health', 'string', 'vim.version.version9', 0), ('vmInstanceUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VSANStretchedClusterHostCapability', 'VimHostVSANStretchedClusterHostCapability', 'vmodl.DynamicData', 'vim.version.version10', [('featureVersion', 'string', 'vim.version.version10', 0)]) +CreateDataType('vim.host.VsanFailedRepairObjectResult', 'VsanFailedRepairObjectResult', 'vmodl.DynamicData', 'vim.version.version9', [('uuid', 'string', 'vim.version.version9', 0), ('errMessage', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterCreateVmHealthTestResult', 'VsanClusterCreateVmHealthTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.cluster.VsanHostCreateVmHealthTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanObjectHealth', 'VsanObjectHealth', 'vmodl.DynamicData', 'vim.version.version9', [('numObjects', 'int', 'vim.version.version9', 0), ('health', 'vim.host.VsanObjectHealthState', 'vim.version.version9', 0), ('objUuids', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterBalanceSummary', 'VsanClusterBalanceSummary', 'vmodl.DynamicData', 'vim.version.version9', [('varianceThreshold', 'long', 'vim.version.version9', 0), ('disks', 'vim.cluster.VsanClusterBalancePerDiskInfo[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterTelemetryProxyConfig', 'VsanClusterTelemetryProxyConfig', 'vmodl.DynamicData', 'vim.version.version9', [('host', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('port', 'int', 'vim.version.version9', 0 | F_OPTIONAL), ('user', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('password', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('autoDiscovered', 'boolean', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVmdkIOLoadSpec', 'VsanVmdkIOLoadSpec', 'vmodl.DynamicData', 'vim.version.version9', [('readPct', 'int', 'vim.version.version9', 0), ('oio', 'int', 'vim.version.version9', 0), ('iosizeB', 'int', 'vim.version.version9', 0), ('dataSizeMb', 'long', 'vim.version.version9', 0), ('random', 'boolean', 'vim.version.version9', 0), ('startOffsetB', 'long', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.host.VsanVsanPcapResult', 'VsanVsanPcapResult', 'vmodl.DynamicData', 'vim.version.version9', [('calltime', 'float', 'vim.version.version9', 0), ('vmknic', 'string', 'vim.version.version9', 0), ('tcpdumpFilter', 'string', 'vim.version.version9', 0), ('snaplen', 'int', 'vim.version.version9', 0), ('pkts', 'string[]', 'vim.version.version9', 0 | F_OPTIONAL), ('pcap', 'string', 'vim.version.version9', 0 | F_OPTIONAL), ('error', 'vmodl.MethodFault', 'vim.version.version9', 0 | F_OPTIONAL), ('hostname', 'string', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.cluster.VsanClusterNetworkLoadTestResult', 'VsanClusterNetworkLoadTestResult', 'vmodl.DynamicData', 'vim.version.version9', [('clusterResult', 'vim.cluster.VsanClusterProactiveTestResult', 'vim.version.version9', 0), ('hostResults', 'vim.host.VsanNetworkLoadTestResult[]', 'vim.version.version9', 0 | F_OPTIONAL)]) +CreateDataType('vim.vsan.upgradesystem.HostPropertyRetrieveIssue', 'VsanHostPropertyRetrieveIssue', 'vim.VsanUpgradeSystem.PreflightCheckIssue', 'vim.version.version10', [('hosts', 'vim.HostSystem[]', 'vim.version.version10', 0)]) +CreateEnumType('vim.host.VsanObjectHealthState', 'VsanObjectHealthState', 'vim.version.version9', ['inaccessible' ,'reducedavailabilitywithnorebuild' ,'reducedavailabilitywithnorebuilddelaytimer' ,'reducedavailabilitywithactiverebuild' ,'datamove' ,'nonavailabilityrelatedreconfig' ,'nonavailabilityrelatedincompliance' ,'healthy' ,]) +CreateEnumType('vim.cluster.VsanObjectTypeEnum', 'VsanObjectTypeEnum', 'vim.version.version9', ['vmswap' ,'vdisk' ,'namespace' ,'vmem' ,'statsdb' ,'iscsi' ,'other' ,'fileSystemOverhead' ,'dedupOverhead' ,'checksumOverhead' ,]) +CreateEnumType('vim.cluster.VsanCapabilityType', 'VsanCapabilityType', 'vim.version.version10', ['capability' ,'allflash' ,'stretchedcluster' ,'dataefficiency' ,'clusterconfig' ,'upgrade' ,'objectidentities' ,]) +CreateEnumType('vim.cluster.VsanHealthLogLevelEnum', 'VsanHealthLogLevelEnum', 'vim.version.version9', ['INFO' ,'WARNING' ,'ERROR' ,'DEBUG' ,'CRITICAL' ,]) +CreateEnumType('vim.cluster.VsanPerfSummaryType', 'VsanPerfSummaryType', 'vim.version.version9', ['average' ,'maximum' ,'minimum' ,'latest' ,'summation' ,'none' ,]) +CreateEnumType('vim.cluster.StorageComplianceStatus', 'VsanStorageComplianceStatus', 'vim.version.version9', ['compliant' ,'nonCompliant' ,'unknown' ,'notApplicable' ,]) +CreateEnumType('vim.cluster.VsanPerfStatsUnitType', 'VsanPerfStatsUnitType', 'vim.version.version9', ['number' ,'time_ms' ,'percentage' ,'size_bytes' ,'rate_bytes' ,]) +CreateEnumType('vim.cluster.VsanPerfThresholdDirectionType', 'VsanPerfThresholdDirectionType', 'vim.version.version9', ['upper' ,'lower' ,]) +CreateEnumType('vim.cluster.VsanPerfStatsType', 'VsanPerfStatsType', 'vim.version.version9', ['absolute' ,'delta' ,'rate' ,]) +CreateEnumType('vim.vsan.host.DiskMappingCreationType', 'VimVsanHostDiskMappingCreationType', 'vim.version.version10', ['hybrid' ,'allFlash' ,]) +CreateEnumType('vim.cluster.VsanClusterHealthActionIdEnum', 'VsanClusterHealthActionIdEnum', 'vim.version.version9', ['RepairClusterObjectsAction' ,'UploadHclDb' ,'UpdateHclDbFromInternet' ,'EnableHealthService' ,'DiskBalance' ,'StopDiskBalance' ,'RemediateDedup' ,'UpgradeVsanDiskFormat' ,]) +CreateEnumType('vim.cluster.VsanDiskGroupCreationType', 'VimClusterVsanDiskGroupCreationType', 'vim.version.version10', ['allflash' ,'hybrid' ,]) \ No newline at end of file From 6dbf78470211f9ef6008d7445be63996714e8ffd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 06:46:32 -0400 Subject: [PATCH 554/639] Added utils.vsan.reconfigure_cluster_vsan that reconfigures VSAN on a cluster using the new endpoint --- salt/utils/vsan.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 1b9f796efe..f56ae3ceac 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -156,3 +156,36 @@ def get_cluster_vsan_info(cluster_ref): except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) + + +def reconfigure_cluster_vsan(cluster_ref, cluster_vsan_spec): + ''' + Reconfigures the VSAN system of a cluster. + + cluster_ref + Reference to the cluster + + cluster_vsan_spec + Cluster VSAN reconfigure spec (vim.vsan.ReconfigSpec). + ''' + cluster_name = salt.utils.vmware.get_managed_object_name(cluster_ref) + log.trace('Reconfiguring vsan on cluster \'{0}\': {1}' + ''.format(cluster_name, cluster_vsan_spec)) + si = salt.utils.vmware.get_service_instance_from_managed_object( + cluster_ref) + print salt.utils.vsan.get_vsan_cluster_config_system + vsan_cl_conf_sys = salt.utils.vsan.get_vsan_cluster_config_system(si) + try: + task = vsan_cl_conf_sys.VsanClusterReconfig(cluster_ref, + cluster_vsan_spec) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + _wait_for_tasks([task], si) From 6a86104ec8461ba232b1372754acbf35d0830ef3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 06:51:20 -0400 Subject: [PATCH 555/639] Added tests for salt.utils.vsan.reconfigure_cluster_vsan --- tests/unit/utils/test_vsan.py | 106 ++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index 197ba517de..3593391fd7 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -216,3 +216,109 @@ class GetClusterVsanInfoTestCase(TestCase, LoaderModuleMockMixin): with self.assertRaises(VMwareRuntimeError) as excinfo: vsan.get_cluster_vsan_info(self.mock_cl_ref) self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +@skipIf(not HAS_PYVSAN, 'The \'vsan\' ext library is missing') +class ReconfigureClusterVsanTestCase(TestCase): + '''Tests for salt.utils.vsan.reconfigure_cluster_vsan''' + def setUp(self): + self.mock_si = MagicMock() + self.mock_task = MagicMock() + self.mock_cl_reconf = MagicMock(return_value=self.mock_task) + self.mock_get_vsan_conf_sys = MagicMock( + return_value=MagicMock(VsanClusterReconfig=self.mock_cl_reconf)) + self.mock_cl_ref = MagicMock() + self.mock_cl_vsan_spec = MagicMock() + patches = ( + ('salt.utils.vmware.get_managed_object_name', MagicMock()), + ('salt.utils.vmware.get_service_instance_from_managed_object', + MagicMock(return_value=self.mock_si)), + ('salt.utils.vsan.get_vsan_cluster_config_system', + self.mock_get_vsan_conf_sys), + ('salt.utils.vsan._wait_for_tasks', MagicMock())) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_cl_reconf', 'mock_get_vsan_conf_sys', + 'mock_cl_ref', 'mock_cl_vsan_spec', 'mock_task'): + delattr(self, attr) + + def test_get_cluster_name_call(self): + get_managed_object_name_mock = MagicMock() + with patch('salt.utils.vmware.get_managed_object_name', + get_managed_object_name_mock): + + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + get_managed_object_name_mock.assert_called_once_with( + self.mock_cl_ref) + + def test_get_service_instance_call(self): + get_service_instance_from_managed_object_mock= MagicMock() + with patch( + 'salt.utils.vmware.get_service_instance_from_managed_object', + get_service_instance_from_managed_object_mock): + + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + get_service_instance_from_managed_object_mock.assert_called_once_with( + self.mock_cl_ref) + + def test_get_vsan_cluster_config_system_call(self): + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + self.mock_get_vsan_conf_sys.assert_called_once_with(self.mock_si) + + def test_cluster_reconfig_call(self): + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + self.mock_cl_reconf.assert_called_once_with( + self.mock_cl_ref, self.mock_cl_vsan_spec) + + def test_cluster_reconfig_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value = MagicMock( + VsanClusterReconfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareApiError) as excinfo: + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_cluster_reconfig_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value = MagicMock( + VsanClusterReconfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareApiError) as excinfo: + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_cluster_reconfig_raises_vmodl_runtime_error(self): + exc = vmodl.RuntimeFault() + exc.msg = 'VimRuntime msg' + with patch('salt.utils.vsan.get_vsan_cluster_config_system', + MagicMock(return_value = MagicMock( + VsanClusterReconfig=MagicMock(side_effect=exc)))): + with self.assertRaises(VMwareRuntimeError) as excinfo: + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + self.assertEqual(excinfo.exception.strerror, 'VimRuntime msg') + + def test__wait_for_tasks_call(self): + mock_wait_for_tasks = MagicMock() + with patch('salt.utils.vsan._wait_for_tasks', mock_wait_for_tasks): + vsan.reconfigure_cluster_vsan(self.mock_cl_ref, + self.mock_cl_vsan_spec) + mock_wait_for_tasks.assert_called_once_with([self.mock_task], + self.mock_si) From f796885364b6ac5f774be36869a7ab97b3848378 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 06:52:12 -0400 Subject: [PATCH 556/639] Added utils.vsan._wait_for_tasks that waits for VSAN tasks on the vCenter using the new endpoint --- salt/utils/vsan.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index f56ae3ceac..82f4e2563b 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -189,3 +189,25 @@ def reconfigure_cluster_vsan(cluster_ref, cluster_vsan_spec): log.exception(exc) raise VMwareRuntimeError(exc.msg) _wait_for_tasks([task], si) + + +def _wait_for_tasks(tasks, service_instance): + ''' + Wait for tasks created via the VSAN API + ''' + log.trace('Waiting for vsan tasks: {0}' + ''.format(', '.join([str(t) for t in tasks]))) + try: + vsanapiutils.WaitForTasks(tasks, service_instance) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise VMwareApiError('Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise VMwareRuntimeError(exc.msg) + log.trace('Tasks {0} finished successfully' + ''.format(', '.join([str(t) for t in tasks]))) From 0db8cbaa258f298b3bf44f1a92ed7bb157ce4a22 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 06:52:42 -0400 Subject: [PATCH 557/639] Added tests for utils.vsan._wait_for_tasks --- tests/unit/utils/test_vsan.py | 60 +++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index 3593391fd7..1c84198bb0 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -322,3 +322,63 @@ class ReconfigureClusterVsanTestCase(TestCase): self.mock_cl_vsan_spec) mock_wait_for_tasks.assert_called_once_with([self.mock_task], self.mock_si) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +@skipIf(not HAS_PYVSAN, 'The \'vsan\' ext library is missing') +class _WaitForTasks(TestCase, LoaderModuleMockMixin): + '''Tests for salt.utils.vsan._wait_for_tasks''' + def setup_loader_modules(self): + return {vsan: { + '__virtual__': MagicMock(return_value='vsan')}} + + def setUp(self): + self.mock_si = MagicMock() + self.mock_tasks = MagicMock() + patches = (('salt.utils.vsan.vsanapiutils.WaitForTasks', MagicMock()),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_tasks'): + delattr(self, attr) + + def test_wait_for_tasks_call(self): + mock_wait_for_tasks = MagicMock() + with patch('salt.utils.vsan.vsanapiutils.WaitForTasks', + mock_wait_for_tasks): + vsan._wait_for_tasks(self.mock_tasks, self.mock_si) + mock_wait_for_tasks.assert_called_once_with(self.mock_tasks, + self.mock_si) + + def test_wait_for_tasks_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + with patch('salt.utils.vsan.vsanapiutils.WaitForTasks', + MagicMock(side_effect=exc)): + with self.assertRaises(VMwareApiError) as excinfo: + vsan._wait_for_tasks(self.mock_tasks, self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_wait_for_tasks_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + with patch('salt.utils.vsan.vsanapiutils.WaitForTasks', + MagicMock(side_effect=exc)): + with self.assertRaises(VMwareApiError) as excinfo: + vsan._wait_for_tasks(self.mock_tasks, self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_wait_for_tasks_raises_vmodl_runtime_error(self): + exc = vmodl.RuntimeFault() + exc.msg = 'VimRuntime msg' + with patch('salt.utils.vsan.vsanapiutils.WaitForTasks', + MagicMock(side_effect=exc)): + with self.assertRaises(VMwareRuntimeError) as excinfo: + vsan._wait_for_tasks(self.mock_tasks, self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimRuntime msg') From d69382012d3f5fae83ac3b13989f1d3fcb44d9c4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 07:55:26 -0400 Subject: [PATCH 558/639] Added salt.modules.vsphere._apply_cluster_dict that applies the values in a configuration dictionary to a cluster spec --- salt/modules/vsphere.py | 131 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index c0745a4a59..ed0adc13d6 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3791,6 +3791,137 @@ def list_cluster(datacenter=None, cluster=None, service_instance=None): return _get_cluster_dict(cluster, cluster_ref) +def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, + vsan_61=True): + ''' + Applies the values of cluster_dict dictionary to a cluster spec + (vim.ClusterConfigSpecEx). + + All vsan values (cluster_dict['vsan']) will be applied to + vsan_spec (vim.vsan.cluster.ConfigInfoEx). Can be not omitted + if not required. + + VSAN 6.1 config needs to be applied differently than the post VSAN 6.1 way. + The type of configuration desired is dictated by the flag vsan_61. + ''' + log.trace('Applying cluster dict {0}'.format(cluster_dict)) + if cluster_dict.get('ha'): + ha_dict = cluster_dict['ha'] + if not cluster_spec.dasConfig: + cluster_spec.dasConfig = vim.ClusterDasConfigInfo() + das_config = cluster_spec.dasConfig + if 'enabled' in ha_dict: + das_config.enabled = ha_dict['enabled'] + if ha_dict['enabled']: + # Default values when ha is enabled + das_config.failoverLevel = 1 + if 'admission_control_enabled' in ha_dict: + das_config.admissionControlEnabled = \ + ha_dict['admission_control_enabled'] + if 'admission_control_policy' in ha_dict: + adm_pol_dict = ha_dict['admission_control_policy'] + if not das_config.admissionControlPolicy or \ + not isinstance( + das_config.admissionControlPolicy, + vim.ClusterFailoverResourcesAdmissionControlPolicy): + + das_config.admissionControlPolicy = \ + vim.ClusterFailoverResourcesAdmissionControlPolicy( + cpuFailoverResourcesPercent= + adm_pol_dict['cpu_failover_percent'], + memoryFailoverResourcesPercent= + adm_pol_dict['memory_failover_percent']) + if 'default_vm_settings' in ha_dict: + vm_set_dict = ha_dict['default_vm_settings'] + if not das_config.defaultVmSettings: + das_config.defaultVmSettings = vim.ClusterDasVmSettings() + if 'isolation_response' in vm_set_dict: + das_config.defaultVmSettings.isolationResponse = \ + vm_set_dict['isolation_response'] + if 'restart_priority' in vm_set_dict: + das_config.defaultVmSettings.restartPriority= \ + vm_set_dict['restart_priority'] + if 'hb_ds_candidate_policy' in ha_dict: + das_config.hBDatastoreCandidatePolicy = \ + ha_dict['hb_ds_candidate_policy'] + if 'host_monitoring' in ha_dict: + das_config.hostMonitoring = ha_dict['host_monitoring'] + if 'options' in ha_dict: + das_config.option = [] + for opt_dict in ha_dict['options']: + das_config.option.append( + vim.OptionValue(key=opt_dict['key'])) + if 'value' in opt_dict: + das_config.option[-1].value = opt_dict['value'] + if 'vm_monitoring' in ha_dict: + das_config.vmMonitoring = ha_dict['vm_monitoring'] + cluster_spec.dasConfig = das_config + if cluster_dict.get('drs'): + drs_dict = cluster_dict['drs'] + drs_config = vim.ClusterDrsConfigInfo() + if 'enabled' in drs_dict: + drs_config.enabled = drs_dict['enabled'] + if 'vmotion_rate' in drs_dict: + drs_config.vmotionRate = 6 - drs_dict['vmotion_rate'] + if 'default_vm_behavior' in drs_dict: + drs_config.defaultVmBehavior = \ + vim.DrsBehavior(drs_dict['default_vm_behavior']) + cluster_spec.drsConfig = drs_config + if cluster_dict.get('vm_swap_placement'): + cluster_spec.vmSwapPlacement = cluster_dict['vm_swap_placement'] + if cluster_dict.get('vsan'): + vsan_dict = cluster_dict['vsan'] + if not vsan_61: # VSAN is 6.2 and above + if 'enabled' in vsan_dict: + if not vsan_spec.vsanClusterConfig: + vsan_spec.vsanClusterConfig = \ + vim.vsan.cluster.ConfigInfo() + vsan_spec.vsanClusterConfig.enabled = vsan_dict['enabled'] + if 'auto_claim_storage' in vsan_dict: + if not vsan_spec.vsanClusterConfig: + vsan_spec.vsanClusterConfig = \ + vim.vsan.cluster.ConfigInfo() + if not vsan_spec.vsanClusterConfig.defaultConfig: + vsan_spec.vsanClusterConfig.defaultConfig = \ + vim.VsanClusterConfigInfoHostDefaultInfo() + elif vsan_spec.vsanClusterConfig.defaultConfig.uuid: + # If this remains set it caused an error + vsan_spec.vsanClusterConfig.defaultConfig.uuid = None + vsan_spec.vsanClusterConfig.defaultConfig.autoClaimStorage = \ + vsan_dict['auto_claim_storage'] + if 'compression_enabled' in vsan_dict: + if not vsan_spec.dataEfficiencyConfig: + vsan_spec.dataEfficiencyConfig = \ + vim.vsan.DataEfficiencyConfig() + vsan_spec.dataEfficiencyConfig.compressionEnabled = \ + vsan_dict['compression_enabled'] + if 'dedup_enabled' in vsan_dict: + if not vsan_spec.dataEfficiencyConfig: + vsan_spec.dataEfficiencyConfig = \ + vim.vsan.DataEfficiencyConfig() + vsan_spec.dataEfficiencyConfig.dedupEnabled = \ + vsan_dict['dedup_enabled'] + # In all cases we need to configure the vsan on the cluster + # directly so not to have a missmatch between vsan_spec and + # cluster_spec + if not cluster_spec.vsanConfig: + cluster_spec.vsanConfig = \ + vim.VsanClusterConfigInfo() + vsan_config = cluster_spec.vsanConfig + if 'enabled' in vsan_dict: + vsan_config.enabled = vsan_dict['enabled'] + if 'auto_claim_storage' in vsan_dict: + if not vsan_config.defaultConfig: + vsan_config.defaultConfig = \ + vim.VsanClusterConfigInfoHostDefaultInfo() + elif vsan_config.defaultConfig.uuid: + # If this remains set it caused an error + vsan_config.defaultConfig.uuid = None + vsan_config.defaultConfig.autoClaimStorage = \ + vsan_dict['auto_claim_storage'] + log.trace('cluster_spec = {0}'.format(cluster_spec)) + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 345b783ad6e9b3d3cc4defb3ae16b95dc4965f19 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 06:55:39 -0400 Subject: [PATCH 559/639] Added salt.modules.vsphere.create_cluster that creates a cluster according to spec, including configuring VSAN --- salt/modules/vsphere.py | 105 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index ed0adc13d6..b448c8885d 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -178,10 +178,17 @@ import salt.utils.path import salt.utils.vmware import salt.utils.vsan from salt.exceptions import CommandExecutionError, VMwareSaltError, \ - ArgumentValueError + ArgumentValueError, InvalidConfigError from salt.utils.decorators import depends, ignores_kwargs +from salt.config.schemas.esxcluster import ESXClusterConfigSchema # Import Third Party Libs +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True @@ -3922,6 +3929,102 @@ def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, log.trace('cluster_spec = {0}'.format(cluster_spec)) +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def create_cluster(cluster_dict, datacenter=None, cluster=None, + service_instance=None): + ''' + Creates a cluster. + + Note: cluster_dict['name'] will be overridden by the cluster param value + + config_dict + Dictionary with the config values of the new cluster. + + datacenter + Name of datacenter containing the cluster. + Ignored if already contained by proxy details. + Default value is None. + + cluster + Name of cluster. + Ignored if already contained by proxy details. + Default value is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + # esxdatacenter proxy + salt '*' vsphere.create_cluster cluster_dict=$cluster_dict cluster=cl1 + + # esxcluster proxy + salt '*' vsphere.create_cluster cluster_dict=$cluster_dict + ''' + # Validate cluster dictionary + schema = ESXClusterConfigSchema.serialize() + try: + jsonschema.validate(cluster_dict, schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + # Get required details from the proxy + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + if not cluster: + raise ArgumentValueError('\'cluster\' needs to be specified') + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + cluster = __salt__['esxcluster.get_details']()['cluster'] + + if cluster_dict.get('vsan') and not \ + salt.utils.vsan.vsan_supported(service_instance): + + raise excs.VMwareApiError('VSAN operations are not supported') + si = service_instance + cluster_spec = vim.ClusterConfigSpecEx() + vsan_spec = None + ha_config = None + vsan_61 = None + if cluster_dict.get('vsan'): + # XXX The correct way of retrieving the VSAN data (on the if branch) + # is not supported before 60u2 vcenter + vcenter_info = salt.utils.vmware.get_service_info(si) + if float(vcenter_info.apiVersion) >= 6.0 and \ + int(vcenter_info.build) >= 3634794: # 60u2 + vsan_spec = vim.vsan.ReconfigSpec(modify=True) + vsan_61 = False + # We need to keep HA disabled and enable it afterwards + if cluster_dict.get('ha',{}).get('enabled'): + enable_ha = True + ha_config = cluster_dict['ha'] + del cluster_dict['ha'] + else: + vsan_61 = True + # If VSAN is 6.1 the configuration of VSAN happens when configuring the + # cluster via the regular endpoint + _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61) + salt.utils.vmware.create_cluster(dc_ref, cluster, cluster_spec) + if not vsan_61: + # Only available after VSAN 61 + if vsan_spec: + cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster) + salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec) + if enable_ha: + # Set HA after VSAN has been configured + _apply_cluster_dict(cluster_spec, {'ha': ha_config}) + salt.utils.vmware.update_cluster(cluster_ref, cluster_spec) + # Set HA back on the object + cluster_dict['ha'] = ha_config + return {'create_cluster': True} + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 01f241028aef56abe7f641e315834a411873dcdd Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 07:44:10 -0400 Subject: [PATCH 560/639] Added salt.modules.vsphere.update_cluster that updates a cluster according to spec, including VSAN configuration --- salt/modules/vsphere.py | 112 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index b448c8885d..c81b71bdc0 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4025,6 +4025,118 @@ def create_cluster(cluster_dict, datacenter=None, cluster=None, return {'create_cluster': True} +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def update_cluster(cluster_dict, datacenter=None, cluster=None, + service_instance=None): + ''' + Updates a cluster. + + config_dict + Dictionary with the config values of the new cluster. + + datacenter + Name of datacenter containing the cluster. + Ignored if already contained by proxy details. + Default value is None. + + cluster + Name of cluster. + Ignored if already contained by proxy details. + Default value is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + Default is None. + + .. code-block:: bash + + # esxdatacenter proxy + salt '*' vsphere.update_cluster cluster_dict=$cluster_dict cluster=cl1 + + # esxcluster proxy + salt '*' vsphere.update_cluster cluster_dict=$cluster_dict + + ''' + # Validate cluster dictionary + schema = ESXClusterConfigSchema.serialize() + try: + jsonschema.validate(cluster_dict, schema) + except jsonschema.exceptions.ValidationError as exc: + raise InvalidConfigError(exc) + # Get required details from the proxy + proxy_type = get_proxy_type() + if proxy_type == 'esxdatacenter': + datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] + dc_ref = _get_proxy_target(service_instance) + if not cluster: + raise ArgumentValueError('\'cluster\' needs to be specified') + elif proxy_type == 'esxcluster': + datacenter = __salt__['esxcluster.get_details']()['datacenter'] + dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) + cluster = __salt__['esxcluster.get_details']()['cluster'] + + if cluster_dict.get('vsan') and not \ + salt.utils.vsan.vsan_supported(service_instance): + + raise VMwareApiError('VSAN operations are not supported') + + cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster) + cluster_spec = vim.ClusterConfigSpecEx() + props = salt.utils.vmware.get_properties_of_managed_object( + cluster_ref, properties=['configurationEx']) + # Copy elements we want to update to spec + for p in ['dasConfig', 'drsConfig']: + setattr(cluster_spec, p, getattr(props['configurationEx'], p)) + if props['configurationEx'].vsanConfigInfo: + cluster_spec.vsanConfig = props['configurationEx'].vsanConfigInfo + vsan_spec = None + vsan_61 = None + if cluster_dict.get('vsan'): + # XXX The correct way of retrieving the VSAN data (on the if branch) + # is not supported before 60u2 vcenter + vcenter_info = salt.utils.vmware.get_service_info(service_instance) + if float(vcenter_info.apiVersion) >= 6.0 and \ + int(vcenter_info.build) >= 3634794: # 60u2 + vsan_61 = False + vsan_info = salt.utils.vsan.get_cluster_vsan_info(cluster_ref) + vsan_spec = vim.vsan.ReconfigSpec(modify=True) + # Only interested in the vsanClusterConfig and the + # dataEfficiencyConfig + # vsan_spec.vsanClusterConfig = vsan_info + vsan_spec.dataEfficiencyConfig = vsan_info.dataEfficiencyConfig + vsan_info.dataEfficiencyConfig = None + else: + vsan_61 = True + + _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61) + # We try to reconfigure vsan first as it fails if HA is enabled so the + # command will abort not having any side-effects + # also if HA was previously disabled it can be enabled automatically if + # desired + if vsan_spec: + log.trace('vsan_spec = {0}'.format(vsan_spec)) + salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec) + + # We need to retrieve again the properties and reapply them + # As the VSAN configuration has changed + cluster_spec = vim.ClusterConfigSpecEx() + props = salt.utils.vmware.get_properties_of_managed_object( + cluster_ref, properties=['configurationEx']) + # Copy elements we want to update to spec + for p in ['dasConfig', 'drsConfig']: + setattr(cluster_spec, p, getattr(props['configurationEx'], p)) + if props['configurationEx'].vsanConfigInfo: + cluster_spec.vsanConfig = props['configurationEx'].vsanConfigInfo + # We only need to configure the cluster_spec, as if it were a vsan_61 + # cluster + _apply_cluster_dict(cluster_spec, cluster_dict) + salt.utils.vmware.update_cluster(cluster_ref, cluster_spec) + return {'update_cluster': True} + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From ca9235ff949d80317682f16f6deaaccb2faf4538 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:42:15 -0400 Subject: [PATCH 561/639] Added RecursiveDictDiffer in salt.utils.dictdiffer that recursively compares dictionaries --- salt/utils/dictdiffer.py | 305 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 305 insertions(+) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index 2438a75ebc..7a7fb76a18 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -8,6 +8,8 @@ Originally posted at http://stackoverflow.com/questions/1165352/fast-comparison-between-two-python-dictionary/1165552#1165552 Available at repository: https://github.com/hughdbrown/dictdiffer + + Added the ability to recursively compare dictionaries ''' from __future__ import absolute_import from copy import deepcopy @@ -77,3 +79,306 @@ def deep_diff(old, new, ignore=None): if new: res['new'] = new return res + + +def recursive_diff(past_dict, current_dict, ignore_missing_keys=True): + ''' + Returns a RecursiveDictDiffer object that computes the recursive diffs + between two dictionaries + + past_dict + Past dictionary + + current_dict + Current dictionary + + ignore_missing_keys + Flag specifying whether to ignore keys that no longer exist in the + current_dict, but exist in the past_dict. If true, the diff will + not contain the missing keys. + Default is True. + ''' + return RecursiveDictDiffer(past_dict, current_dict, ignore_missing_keys) + + +class RecursiveDictDiffer(DictDiffer): + ''' + Calculates a recursive diff between the current_dict and the past_dict + creating a diff in the format + + {'new': new_value, 'old': old_value} + + It recursively searches differences in common keys whose values are + dictionaries creating a diff dict in the format + + {'common_key' : {'new': new_value, 'old': old_value} + + The class overrides all DictDiffer methods, returning lists of keys and + subkeys using the . notation (i.e 'common_key1.common_key2.changed_key') + + The class provides access to: + (1) the added, removed, changes keys and subkeys (using the . notation) + ``added``, ``removed``, ``changed`` methods + (2) the diffs in the format aboce (diff property) + ``diffs`` property + (3) a dict with the new changed values only (new_values property) + ``new_values`` property + (4) a dict with the old changed values only (old_values property) + ``old_values`` property + (5) a string representation of the changes in the format: + ``changes_str`` property + + Note: + The <_null_> value is a reserved value + +.. code-block:: text + + common_key1: + common_key2: + changed_key1 from '' to '' + changed_key2 from '[, ..]' to '[, ..]' + common_key3: + changed_key3 from to + + ''' + NONE_VALUE = '<_null_>' + + def __init__(self, past_dict, current_dict, ignore_missing_keys): + ''' + past_dict + Past dictionary. + + current_dict + Current dictionary. + + ignore_missing_keys + Flag specifying whether to ignore keys that no longer exist in the + current_dict, but exist in the past_dict. If true, the diff will + not contain the missing keys. + ''' + super(RecursiveDictDiffer, self).__init__(current_dict, past_dict) + self._diffs = \ + self._get_diffs(self.current_dict, self.past_dict, + ignore_missing_keys) + + @classmethod + def _get_diffs(cls, dict1, dict2, ignore_missing_keys): + ''' + Returns a dict with the differences between dict1 and dict2 + + Notes: + Keys that only exist in dict2 are not included in the diff if + ignore_missing_keys is True, otherwise they are + Simple compares are done on lists + ''' + ret_dict = {} + for p in dict1.keys(): + if p not in dict2: + ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}}) + elif dict1[p] != dict2[p]: + if isinstance(dict1[p], dict) and isinstance(dict2[p], dict): + sub_diff_dict = cls._get_diffs(dict1[p], dict2[p], + ignore_missing_keys) + if sub_diff_dict: + ret_dict.update({p: sub_diff_dict}) + else: + ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}}) + if not ignore_missing_keys: + for p in dict2.keys(): + if p not in dict1.keys(): + ret_dict.update({p: {'new': cls.NONE_VALUE, + 'old': dict2[p]}}) + return ret_dict + + @classmethod + def _get_values(cls, diff_dict, type='new'): + ''' + Returns a dictionaries with the 'new' values in a diff dict. + + type + Which values to return, 'new' or 'old' + ''' + ret_dict = {} + for p in diff_dict.keys(): + if type in diff_dict[p].keys(): + ret_dict.update({p: diff_dict[p][type]}) + else: + ret_dict.update( + {p: cls._get_values(diff_dict[p], type=type)}) + return ret_dict + + @classmethod + def _get_changes(cls, diff_dict): + ''' + Returns a list of string message with the differences in a diff dict. + + Each inner difference is tabulated two space deeper + ''' + changes_strings = [] + for p in diff_dict.keys(): + if sorted(diff_dict[p].keys()) == ['new', 'old']: + # Some string formatting + old_value = diff_dict[p]['old'] + if diff_dict[p]['old'] == cls.NONE_VALUE: + old_value = 'nothing' + elif isinstance(diff_dict[p]['old'], str): + old_value = '\'{0}\''.format(diff_dict[p]['old']) + elif isinstance(diff_dict[p]['old'], list): + old_value = '\'{0}\''.format( + ', '.join(diff_dict[p]['old'])) + new_value = diff_dict[p]['new'] + if diff_dict[p]['new'] == cls.NONE_VALUE: + new_value = 'nothing' + elif isinstance(diff_dict[p]['new'], str): + new_value = '\'{0}\''.format(diff_dict[p]['new']) + elif isinstance(diff_dict[p]['new'], list): + new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new'])) + changes_strings.append('{0} from {1} to {2}'.format( + p, old_value, new_value)) + else: + sub_changes = cls._get_changes(diff_dict[p]) + if sub_changes: + changes_strings.append('{0}:'.format(p)) + changes_strings.extend([' {0}'.format(c) + for c in sub_changes]) + return changes_strings + + def added(self): + ''' + Returns all keys that have been added. + + If the keys are in child dictionaries they will be represented with + . notation + ''' + def _added(diffs, prefix): + keys = [] + for key in diffs.keys(): + if isinstance(diffs[key], dict) and 'old' not in diffs[key]: + keys.extend(_added(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + elif diffs[key]['old'] == self.NONE_VALUE: + if isinstance(diffs[key]['new'], dict): + keys.extend( + _added(diffs[key]['new'], + prefix='{0}{1}.'.format(prefix, key))) + else: + keys.append('{0}{1}'.format(prefix, key)) + return keys + + return _added(self._diffs, prefix='') + + def removed(self): + ''' + Returns all keys that have been removed. + + If the keys are in child dictionaries they will be represented with + . notation + ''' + def _removed(diffs, prefix): + keys = [] + for key in diffs.keys(): + if isinstance(diffs[key], dict) and 'old' not in diffs[key]: + keys.extend(_removed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + elif diffs[key]['new'] == self.NONE_VALUE: + keys.append('{0}{1}'.format(prefix, key)) + elif isinstance(diffs[key]['new'], dict): + keys.extend( + _removed(diffs[key]['new'], + prefix='{0}{1}.'.format(prefix, key))) + return keys + + return _removed(self._diffs, prefix='') + + def changed(self, ignore_unset_values=True): + ''' + Returns all keys that have been changed. + + If the keys are in child dictionaries they will be represented with + . notation + ''' + def _changed(diffs, prefix): + keys = [] + for key in diffs.keys(): + if not isinstance(diffs[key], dict): + continue + + if isinstance(diffs[key], dict) and 'old' not in diffs[key]: + keys.extend(_changed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + continue + if ignore_unset_values: + if 'old' in diffs[key] and 'new' in diffs[key] and \ + diffs[key]['old'] != self.NONE_VALUE and \ + diffs[key]['new'] != self.NONE_VALUE: + if isinstance(diffs[key]['new'], dict): + keys.extend( + _changed(diffs[key]['new'], + prefix='{0}{1}.'.format(prefix, key))) + else: + keys.append('{0}{1}'.format(prefix, key)) + elif isinstance(diffs[key], dict): + keys.extend( + _changed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + else: + if 'old' in diffs[key] and 'new' in diffs[key]: + if isinstance(diffs[key]['new'], dict): + keys.extend( + _changed(diffs[key]['new'], + prefix='{0}{1}.'.format(prefix, key))) + else: + keys.append('{0}{1}'.format(prefix, key)) + elif isinstance(diffs[key], dict): + keys.extend( + _changed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + + return keys + + return _changed(self._diffs, prefix='') + + def unchanged(self): + ''' + Returns all keys that have been unchanged. + + If the keys are in child dictionaries they will be represented with + . notation + ''' + def _unchanged(current_dict, diffs, prefix): + keys = [] + for key in current_dict.keys(): + if not key in diffs: + keys.append('{0}{1}'.format(prefix, key)) + elif isinstance(current_dict[key], dict): + if 'new' in diffs[key]: + # There is a diff + continue + else: + keys.extend( + _unchanged(current_dict[key], + diffs[key], + prefix='{0}{1}.'.format(prefix, key))) + + return keys + return _unchanged(self.current_dict, self._diffs, prefix='') + + @property + def diffs(self): + '''Returns a dict with the recursive diffs current_dict - past_dict''' + return self._diffs + + @property + def new_values(self): + '''Returns a dictionary with the new values''' + return self._get_values(self._diffs, type='new') + + @property + def old_values(self): + '''Returns a dictionary with the old values''' + return self._get_values(self._diffs, type='old') + + @property + def changes_str(self): + '''Returns a string describing the changes''' + return '\n'.join(self._get_changes(self._diffs)) From 81a02f49e839e7de0cadd04e8ccd60b057ca3ca7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:43:09 -0400 Subject: [PATCH 562/639] Added tests for RecursiveDictDiffer --- tests/unit/utils/test_dictdiffer.py | 93 +++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 tests/unit/utils/test_dictdiffer.py diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py new file mode 100644 index 0000000000..fb1acfee53 --- /dev/null +++ b/tests/unit/utils/test_dictdiffer.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import +import copy + +# Import Salt Testing libs +from tests.support.unit import TestCase + +# Import Salt libs +import salt.utils.dictdiffer as dictdiffer + + +NONE = dictdiffer.RecursiveDictDiffer.NONE_VALUE + + +class RecursiveDictDifferTestCase(TestCase): + + def setUp(self): + old_dict = {'a': {'b': 1, 'c': 2, 'e': 'old_value', + 'f': 'old_key'}, + 'j': 'value'} + new_dict = {'a': {'b': 1, 'c': 4, 'e': 'new_value', + 'g': 'new_key'}, + 'h': 'new_key', 'i': None, + 'j': 'value'} + self.recursive_diff = \ + dictdiffer.recursive_diff(old_dict, new_dict, + ignore_missing_keys=False) + self.recursive_diff_ign = dictdiffer.recursive_diff(old_dict, new_dict) + + def tearDown(self): + for attrname in ('recursive_diff', 'recursive_diff_missing_keys'): + try: + delattr(self, attrname) + except AttributeError: + continue + + def test_added(self): + self.assertEqual(self.recursive_diff.added(), ['a.g', 'h', 'i']) + + def test_removed(self): + self.assertEqual(self.recursive_diff.removed(), ['a.f']) + + def test_changed_with_ignore_unset_values(self): + self.assertEqual(self.recursive_diff.changed(ignore_unset_values=True), + ['a.c', 'a.e']) + + def test_changed_without_ignore_unset_values(self): + self.assertEqual(self.recursive_diff.changed(ignore_unset_values=False), + ['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i']) + + def test_unchanged(self): + self.assertEqual(self.recursive_diff.unchanged(), + ['a.b', 'j']) + + def test_diffs(self): + self.assertDictEqual(self.recursive_diff.diffs, + {'a': {'c': {'old': 2, 'new': 4}, + 'e': {'old': 'old_value', + 'new': 'new_value'}, + 'f': {'old': 'old_key', 'new': NONE}, + 'g': {'old': NONE, 'new': 'new_key'}}, + 'h': {'old': NONE, 'new': 'new_key'}, + 'i': {'old': NONE, 'new': None}}) + self.assertDictEqual(self.recursive_diff_ign.diffs, + {'a': {'c': {'old': 2, 'new': 4}, + 'e': {'old': 'old_value', + 'new': 'new_value'}, + 'g': {'old': NONE, 'new': 'new_key'}}, + 'h': {'old': NONE, 'new': 'new_key'}, + 'i': {'old': NONE, 'new': None}}) + + def test_new_values(self): + self.assertDictEqual(self.recursive_diff.new_values, + {'a': {'c': 4, 'e': 'new_value', + 'f': NONE, 'g': 'new_key'}, + 'h': 'new_key', 'i': None}) + + def test_old_values(self): + self.assertDictEqual(self.recursive_diff.old_values, + {'a': {'c': 2, 'e': 'old_value', + 'f': 'old_key', 'g': NONE}, + 'h': NONE, 'i': NONE}) + def test_changes_str(self): + self.assertEqual(self.recursive_diff.changes_str, + 'a:\n' + ' c from 2 to 4\n' + ' e from \'old_value\' to \'new_value\'\n' + ' g from nothing to \'new_key\'\n' + ' f from \'old_key\' to nothing\n' + 'h from nothing to \'new_key\'\n' + 'i from nothing to None') From e1822b4a96df1e232aeb63d63e1af2cf9f2423dc Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:44:18 -0400 Subject: [PATCH 563/639] Added ListDictDiffer that recursively compares lists of dictionaries --- salt/utils/listdiffer.py | 252 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 salt/utils/listdiffer.py diff --git a/salt/utils/listdiffer.py b/salt/utils/listdiffer.py new file mode 100644 index 0000000000..7f549cce1d --- /dev/null +++ b/salt/utils/listdiffer.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +''' +Compare lists of dictionaries by a specified key. + +The following can be retrieved: + (1) List of added, removed, intersect elements + (2) List of diffs having the following format: + : {, 'new': }} + A recursive diff is done between the the values (dicts) with the same + key + (3) List with the new values for each key + (4) List with the old values for each key + (5) List of changed items in the format + ('.', {'old': , 'new': }) + (5) String representations of the list diff + +Note: All dictionaries keys are expected to be strings +''' + +from salt.utils.dictdiffer import recursive_diff + + +def list_diff(list_a, list_b, key): + return ListDictDiffer(list_a, list_b, key) + + +class ListDictDiffer(object): + ''' + Calculates the differences between two lists of dictionaries. + + It matches the items based on a given key and uses the recursive_diff to + diff the two values. + ''' + def __init__(self, current_list, next_list, key): + self._intersect = [] + self._removed = [] + self._added = [] + self._new = next_list + self._current = current_list + self._key = key + for current_item in current_list: + if key not in current_item: + raise ValueError('The supplied key \'{0}\' does not ' + 'exist in item, the available keys are: {1}' + ''.format(key, current_item.keys())) + for next_item in next_list: + if key not in next_item: + raise ValueError('The supplied key \'{0}\' does not ' + 'exist in item, the available keys are: ' + '{1}'.format(key, next_item.keys())) + if next_item[key] == current_item[key]: + item = {key: next_item[key], + 'old': current_item, + 'new': next_item} + self._intersect.append(item) + break + else: + self._removed.append(current_item) + + for next_item in next_list: + for current_item in current_list: + if next_item[key] == current_item[key]: + break + else: + self._added.append(next_item) + + def _get_recursive_difference(self, type): + '''Returns the recursive diff between dict values''' + if type == 'intersect': + return [recursive_diff(item['old'], item['new']) for item in self._intersect] + elif type == 'added': + return [recursive_diff({}, item) for item in self._added] + elif type == 'removed': + return [recursive_diff(item, {}, ignore_missing_keys=False) + for item in self._removed] + elif type == 'all': + recursive_list = [] + recursive_list.extend([recursive_diff(item['old'], item['new']) for item in self._intersect]) + recursive_list.extend([recursive_diff({}, item) for item in self._added]) + recursive_list.extend([recursive_diff(item, {}, + ignore_missing_keys=False) + for item in self._removed]) + return recursive_list + else: + raise ValueError('The given type for recursive list matching ' + 'is not supported.') + + @property + def removed(self): + '''Returns the objects which are removed from the list''' + return self._removed + + @property + def added(self): + '''Returns the objects which are added to the list''' + return self._added + + @property + def intersect(self): + '''Returns the intersect objects''' + return self._intersect + + def remove_diff(self, diff_key=None, diff_list='intersect'): + '''Deletes an attribute from all of the intersect objects''' + if diff_list == 'intersect': + for item in self._intersect: + item['old'].pop(diff_key, None) + item['new'].pop(diff_key, None) + if diff_list == 'removed': + for item in self._removed: + item.pop(diff_key, None) + + @property + def diffs(self): + ''' + Returns a list of dictionaries with key value pairs. + The values are the differences between the items identified by the key. + ''' + differences = [] + for item in self._get_recursive_difference(type='all'): + if item.diffs: + if item.past_dict: + differences.append({item.past_dict[self._key]: item.diffs}) + elif item.current_dict: + differences.append({item.current_dict[self._key]: item.diffs}) + return differences + + @property + def changes_str(self): + '''Returns a string describing the changes''' + changes = '' + for item in self._get_recursive_difference(type='intersect'): + if item.diffs: + changes = ''.join([changes, + # Tabulate comment deeper, show the key attribute and the value + # Next line should be tabulated even deeper, + # every change should be tabulated 1 deeper + '\tidentified by {0} {1}:\n\t{2}\n'.format( + self._key, + item.past_dict[self._key], + item.changes_str.replace('\n', '\n\t'))]) + for item in self._get_recursive_difference(type='removed'): + if item.past_dict: + changes = ''.join([changes, + # Tabulate comment deeper, show the key attribute and the value + '\tidentified by {0} {1}:' + '\n\twill be removed\n'.format(self._key, + item.past_dict[self._key])]) + for item in self._get_recursive_difference(type='added'): + if item.current_dict: + changes = ''.join([changes, + # Tabulate comment deeper, show the key attribute and the value + '\tidentified by {0} {1}:' + '\n\twill be added\n'.format(self._key, + item.current_dict[self._key])]) + return changes + + @property + def changes_str2(self, tab_string=' '): + ''' + Returns a string in a more compact format describing the changes. + + The output better alligns with the one in recursive_diff. + ''' + changes = [] + for item in self._get_recursive_difference(type='intersect'): + if item.diffs: + changes.append('{tab}{0}={1} (updated):\n{tab}{tab}{2}' + ''.format(self._key, item.past_dict[self._key], + item.changes_str.replace( + '\n', + '\n{0}{0}'.format(tab_string)), + tab=tab_string)) + for item in self._get_recursive_difference(type='removed'): + if item.past_dict: + changes.append('{tab}{0}={1} (removed)'.format( + self._key, item.past_dict[self._key], tab=tab_string)) + for item in self._get_recursive_difference(type='added'): + if item.current_dict: + changes.append('{tab}{0}={1} (added): {2}'.format( + self._key, item.current_dict[self._key], + dict(item.current_dict), tab=tab_string)) + return '\n'.join(changes) + + @property + def new_values(self): + '''Returns the new values from the diff''' + def get_new_values_and_key(item): + values = item.new_values + if item.past_dict: + values.update({self._key: item.past_dict[self._key]}) + else: + # This is a new item as it has no past_dict + values.update({self._key: item.current_dict[self._key]}) + return values + + return [get_new_values_and_key(el) + for el in self._get_recursive_difference('all') + if el.diffs and el.current_dict] + + @property + def old_values(self): + '''Returns the old values from the diff''' + def get_old_values_and_key(item): + values = item.old_values + values.update({self._key: item.past_dict[self._key]}) + return values + + return [get_old_values_and_key(el) + for el in self._get_recursive_difference('all') + if el.diffs and el.past_dict] + + def changed(self, selection='all'): + ''' + Returns the list of changed values. + The key is added to each item. + + selection + Specifies the desired changes. + Supported values are + ``all`` - all changed items are included in the output + ``intersect`` - changed items present in both lists are included + ''' + changed = [] + if selection == 'all': + for recursive_item in self._get_recursive_difference(type='all'): + key_val = str(recursive_item.past_dict[self._key]) \ + if self._key in recursive_item.past_dict \ + else str(recursive_item.current_dict[self._key]) + + for change in recursive_item.changed(ignore_unset_values=False): + if change != self._key: + changed.append('.'.join([self._key, key_val, change])) + return changed + elif selection == 'intersect': + for recursive_item in self._get_recursive_difference(type='intersect'): + key_val = str(recursive_item.past_dict[self._key]) \ + if self._key in recursive_item.past_dict \ + else str(recursive_item.current_dict[self._key]) + + for change in recursive_item.changed(ignore_unset_values=False): + if change != self._key: + changed.append('.'.join([self._key, key_val, change])) + return changed + + @property + def current_list(self): + return self._current + + @property + def new_list(self): + return self._new From f0423345845e31fddede5dddd4fad03c57da0958 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:45:02 -0400 Subject: [PATCH 564/639] Added tests for list_diff --- tests/unit/utils/test_listdiffer.py | 90 +++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 tests/unit/utils/test_listdiffer.py diff --git a/tests/unit/utils/test_listdiffer.py b/tests/unit/utils/test_listdiffer.py new file mode 100644 index 0000000000..cd5e626f4e --- /dev/null +++ b/tests/unit/utils/test_listdiffer.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Import python libs +from __future__ import absolute_import +import copy + +# Import Salt Testing libs +from tests.support.unit import TestCase + +# Import Salt libs +from salt.utils.listdiffer import list_diff + +from salt.utils import dictdiffer +NONE = dictdiffer.RecursiveDictDiffer.NONE_VALUE + + +class ListDictDifferTestCase(TestCase): + + def setUp(self): + old_list = [{'key': 1, 'value': 'foo1', 'int_value': 101}, + {'key': 2, 'value': 'foo2', 'int_value': 102}, + {'key': 3, 'value': 'foo3', 'int_value': 103}] + new_list = [{'key': 1, 'value': 'foo1', 'int_value': 101}, + {'key': 2, 'value': 'foo2', 'int_value': 112}, + {'key': 5, 'value': 'foo5', 'int_value': 105}] + self.list_diff = list_diff(old_list, new_list, key='key') + + def tearDown(self): + for attrname in ('list_diff',): + try: + delattr(self, attrname) + except AttributeError: + continue + + def test_added(self): + self.assertEqual(self.list_diff.added, + [{'key': 5, 'value': 'foo5', 'int_value': 105}]) + + def test_removed(self): + self.assertEqual(self.list_diff.removed, + [{'key': 3, 'value': 'foo3', 'int_value': 103}]) + + def test_diffs(self): + self.assertEqual(self.list_diff.diffs, + [{2: {'int_value': {'new': 112, 'old': 102}}}, + # Added items + {5: {'int_value': {'new': 105, 'old': NONE}, + 'key': {'new': 5, 'old': NONE}, + 'value': {'new': 'foo5', 'old': NONE}}}, + # Removed items + {3: {'int_value': {'new': NONE, 'old': 103}, + 'key': {'new': NONE, 'old': 3}, + 'value': {'new': NONE, 'old': 'foo3'}}}]) + + def test_new_values(self): + self.assertEqual(self.list_diff.new_values, + [{'key': 2, 'int_value': 112}, + {'key': 5, 'value': 'foo5', 'int_value': 105}]) + + def test_old_values(self): + self.assertEqual(self.list_diff.old_values, + [{'key': 2, 'int_value': 102}, + {'key': 3, 'value': 'foo3', 'int_value': 103}]) + + def test_changed_all(self): + self.assertEqual(self.list_diff.changed(selection='all'), + ['key.2.int_value', 'key.5.int_value', 'key.5.value', + 'key.3.int_value', 'key.3.value']) + + def test_changed_intersect(self): + self.assertEqual(self.list_diff.changed(selection='intersect'), + ['key.2.int_value']) + + def test_changes_str(self): + self.assertEqual(self.list_diff.changes_str, + '\tidentified by key 2:\n' + '\tint_value from 102 to 112\n' + '\tidentified by key 3:\n' + '\twill be removed\n' + '\tidentified by key 5:\n' + '\twill be added\n') + + + def test_changes_str2(self): + self.assertEqual(self.list_diff.changes_str2, + ' key=2 (updated):\n' + ' int_value from 102 to 112\n' + ' key=3 (removed)\n' + ' key=5 (added): {\'int_value\': 105, \'key\': 5, ' + '\'value\': \'foo5\'}') From 359069904b9da3529e87329e1f1f366478fa7401 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:48:37 -0400 Subject: [PATCH 565/639] Added JSON schema defintions ESX cluster config and all dependencies --- salt/config/schemas/esxcluster.py | 134 +++++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 2 deletions(-) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index f9eb70fb67..5e96c5ef01 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -4,7 +4,7 @@ salt.config.schemas.esxcluster - ~~~~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ESX Cluster configuration schemas ''' @@ -14,9 +14,139 @@ from __future__ import absolute_import # Import Salt libs from salt.utils.schema import (Schema, + DefinitionsSchema, + ComplexSchemaItem, ArrayItem, IntegerItem, - StringItem) + BooleanItem, + StringItem, + AnyOfItem) + + +class OptionValueItem(ComplexSchemaItem): + '''Sechma item of the OptionValue''' + + title = 'OptionValue' + key=StringItem(title='Key', required=True) + value=AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()]) + + +class AdmissionControlPolicyItem(ComplexSchemaItem): + ''' + Schema item of the HA admission control policy + ''' + + title = 'Admission Control Policy' + + cpu_failover_percent = IntegerItem( + title='CPU Failover Percent', + minimum=0, maximum=100) + memory_failover_percent = IntegerItem( + title='Memory Failover Percent', + minimum=0, maximum=100) + + +class DefaultVmSettingsItem(ComplexSchemaItem): + ''' + Schema item of the HA default vm settings + ''' + + title = 'Default VM Settings' + + isolation_response = StringItem( + title='Isolation Response', + enum=['clusterIsolationResponse', 'none', 'powerOff', 'shutdown']) + restart_priority = StringItem( + title='Restart Priority', + enum=['clusterRestartPriority', 'disabled', 'high', 'low', 'medium']) + + +class HAConfigItem(ComplexSchemaItem): + ''' + Schema item of ESX cluster high availability + ''' + + title = 'HA Configuration' + description = 'ESX cluster HA configuration json schema item' + + enabled = BooleanItem( + title='Enabled', + description='Specifies if HA should be enabled') + admission_control_enabled = BooleanItem( + title='Admission Control Enabled') + admission_control_policy = AdmissionControlPolicyItem() + default_vm_settings = DefaultVmSettingsItem() + hb_ds_candidate_policy = StringItem( + title='Hartbeat Datastore Candidate Policy', + enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference', + 'userSelectedDs']) + host_monitoring = StringItem(title='Host Monitoring', + choices=['enabled', 'disabled']) + options = ArrayItem(min_items=1, items=OptionValueItem()) + vm_monitoring = StringItem( + title='Vm Monitoring', + choices=['vmMonitoringDisabled', 'vmAndAppMonitoring', + 'vmMonitoringOnly']) + + +class vSANClusterConfigItem(ComplexSchemaItem): + ''' + Schema item of the ESX cluster vSAN configuration + ''' + + title = 'vSAN Configuration' + description = 'ESX cluster vSAN configurationi item' + + enabled = BooleanItem( + title='Enabled', + description='Specifies if vSAN should be enabled') + auto_claim_storage = BooleanItem( + title='Auto Claim Storage', + description='Specifies whether the storage of member ESXi hosts should ' + 'be automatically claimed for vSAN') + dedup_enabled = BooleanItem( + title='Enabled', + description='Specifies dedup should be enabled') + compression_enabled = BooleanItem( + title='Enabled', + description='Specifies if compression should be enabled') + + +class DRSConfigItem(ComplexSchemaItem): + ''' + Schema item of the ESX cluster DRS configuration + ''' + + title = 'DRS Configuration' + description = 'ESX cluster DRS configuration item' + + enabled = BooleanItem( + title='Enabled', + description='Specifies if DRS should be enabled') + vmotion_rate = IntegerItem( + title='vMotion rate', + description='Aggressiveness to do automatic vMotions: ' + '1 (least aggressive) - 5 (most aggressive)', + minimum=1, + maximum=5) + default_vm_behavior= StringItem( + title='Default VM DRS Behavior', + description='Specifies the default VM DRS behavior', + enum=['fullyAutomated', 'partiallyAutomated', 'manual']) + + +class ESXClusterConfigSchema(DefinitionsSchema): + ''' + Schema of the ESX cluster config + ''' + + title = 'ESX Cluster Configuration Schema' + description = 'ESX cluster configuration schema' + + ha = HAConfigItem() + vsan = vSANClusterConfigItem() + drs = DRSConfigItem() + vm_swap_placement = StringItem(title='VM Swap Placement') class EsxclusterProxySchema(Schema): From 08100d719d8fecc4b6ce15440767421291af2251 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 13 Sep 2017 08:47:23 -0400 Subject: [PATCH 566/639] Added cluster_configured state to create/manage ESX clusters --- salt/states/esxcluster.py | 250 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100644 salt/states/esxcluster.py diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py new file mode 100644 index 0000000000..ecbb1d4c5c --- /dev/null +++ b/salt/states/esxcluster.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +''' +Manage VMware ESXi Clusters. + +Dependencies +============ + +- pyVmomi Python Module + + +pyVmomi +------- + +PyVmomi can be installed via pip: + +.. code-block:: bash + + pip install pyVmomi + +.. note:: + + Version 6.0 of pyVmomi has some problems with SSL error handling on certain + versions of Python. If using version 6.0 of pyVmomi, Python 2.6, + Python 2.7.9, or newer must be present. This is due to an upstream dependency + in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the + version of Python is not in the supported range, you will need to install an + earlier version of pyVmomi. See `Issue #29537`_ for more information. + +.. _Issue #29537: https://github.com/saltstack/salt/issues/29537 + +Based on the note above, to install an earlier version of pyVmomi than the +version currently listed in PyPi, run the following: + +.. code-block:: bash + + pip install pyVmomi==5.5.0.2014.1.1 + +The 5.5.0.2014.1.1 is a known stable version that this original ESXi State +Module was developed against. +''' + +# Import Python Libs +from __future__ import absolute_import +import logging +import traceback + +# Import Salt Libs +import salt.exceptions +from salt.utils.dictdiffer import recursive_diff +from salt.utils.listdiffer import list_diff +from salt.config.schemas.esxcluster import ESXClusterConfigSchema +from salt.utils import dictupdate + +# External libraries +try: + import jsonschema + HAS_JSONSCHEMA = True +except ImportError: + HAS_JSONSCHEMA = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +def __virtual__(): + return HAS_JSONSCHEMA + + +def mod_init(low): + ''' + Retrieves and adapt the login credentials from the proxy connection module + ''' + return True + + +def cluster_configured(name, cluster_config): + ''' + Configures a cluster. Creates a new cluster, if it doesn't exist on the + vCenter or reconfigures it if configured differently + + Supported proxies: esxdatacenter, esxcluster + + name + Name of the state. If the state is run in by an ``esxdatacenter`` + proxy, it will be the name of the cluster. + + cluster_config + Configuration applied to the cluster. + Complex datastructure following the ESXClusterConfigSchema. + Valid example is: + +.. code-block::yaml + + drs: + default_vm_behavior: fullyAutomated + enabled: true + vmotion_rate: 3 + ha: + admission_control + _enabled: false + default_vm_settings: + isolation_response: powerOff + restart_priority: medium + enabled: true + hb_ds_candidate_policy: userSelectedDs + host_monitoring: enabled + options: + - key: das.ignoreinsufficienthbdatastore + value: 'true' + vm_monitoring: vmMonitoringDisabled + vm_swap_placement: vmDirectory + vsan: + auto_claim_storage: false + compression_enabled: true + dedup_enabled: true + enabled: true + + ''' + proxy_type = __salt__['vsphere.get_proxy_type']() + if proxy_type == 'esxdatacenter': + cluster_name, datacenter_name = \ + name, __salt__['esxdatacenter.get_details']()['datacenter'] + elif proxy_type == 'esxcluster': + cluster_name, datacenter_name = \ + __salt__['esxcluster.get_details']()['cluster'], \ + __salt__['esxcluster.get_details']()['datacenter'] + else: + raise salt.exceptions.CommandExecutionError('Unsupported proxy {0}' + ''.format(proxy_type)) + log.info('Running {0} for cluster \'{1}\' in datacenter ' + '\'{2}\''.format(name, cluster_name, datacenter_name)) + cluster_dict = cluster_config + log.trace('cluster_dict = {0}'.format(cluster_dict)) + changes_required = False + ret = {'name': name, + 'changes': {}, 'result': None, 'comment': 'Default'} + comments = [] + changes = {} + changes_required = False + + try: + log.debug('Validating cluster_configured state input') + schema = ESXClusterConfigSchema.serialize() + log.trace('schema = {0}'.format(schema)) + try: + jsonschema.validate(cluster_dict, schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidESXClusterPayloadError(exc) + current = None + si = __salt__['vsphere.get_service_instance_via_proxy']() + try: + current = __salt__['vsphere.list_cluster'](datacenter_name, + cluster_name, + service_instance=si) + except salt.exceptions.VMwareObjectRetrievalError: + changes_required = True + if __opts__['test']: + comments.append('State {0} will create cluster ' + '\'{1}\' in datacenter \'{2}\'.' + ''.format(name, cluster_name, datacenter_name)) + log.info(comments[-1]) + __salt__['vsphere.disconnect'](si) + ret.update({'result': None, + 'comment': '\n'.join(comments)}) + return ret + log.debug ('Creating cluster \'{0}\' in datacenter \'{1}\'. ' + ''.format(cluster_name, datacenter_name)) + __salt__['vsphere.create_cluster'](cluster_dict, + datacenter_name, + cluster_name, + service_instance=si) + comments.append('Created cluster \'{0}\' in datacenter \'{1}\'' + ''.format(cluster_name, datacenter_name)) + log.info(comments[-1]) + changes.update({'new': cluster_dict}) + if current: + # Cluster already exists + # We need to handle lists sepparately + ldiff = None + if 'ha' in cluster_dict and 'options' in cluster_dict['ha']: + ldiff = list_diff(current.get('ha', {}).get('options', []), + cluster_dict.get('ha', {}).get('options', []), + 'key') + log.trace('options diffs = {0}'.format(ldiff.diffs)) + # Remove options if exist + del cluster_dict['ha']['options'] + if 'ha' in current and 'options' in current['ha']: + del current['ha']['options'] + diff = recursive_diff(current, cluster_dict) + log.trace('diffs = {0}'.format(diff.diffs)) + if not (diff.diffs or (ldiff and ldiff.diffs)): + # No differences + comments.append('Cluster \'{0}\' in datacenter \'{1}\' is up ' + 'to date. Nothing to be done.' + ''.format(cluster_name, datacenter_name)) + log.info(comments[-1]) + else: + changes_required = True + changes_str = '' + if diff.diffs: + changes_str = '{0}{1}'.format(changes_str, + diff.changes_str) + if ldiff and ldiff.diffs: + changes_str = '{0}\nha:\n options:\n{1}'.format( + changes_str, + '\n'.join([' {0}'.format(l) for l in + ldiff.changes_str2.split('\n')])) + # Apply the changes + if __opts__['test']: + comments.append( + 'State {0} will update cluster \'{1}\' ' + 'in datacenter \'{2}\':\n{3}' + ''.format(name, cluster_name, + datacenter_name, changes_str)) + else: + new_values = diff.new_values + old_values = diff.old_values + if ldiff and ldiff.new_values: + dictupdate.update( + new_values, {'ha': {'options': ldiff.new_values}}) + if ldiff and ldiff.old_values: + dictupdate.update( + old_values, {'ha': {'options': ldiff.old_values}}) + log.trace('new_values = {0}'.format(new_values)) + __salt__['vsphere.update_cluster'](new_values, + datacenter_name, + cluster_name, + service_instance=si) + comments.append('Updated cluster \'{0}\' in datacenter ' + '\'{1}\''.format(cluster_name, + datacenter_name)) + log.info(comments[-1]) + changes.update({'new': new_values, + 'old': old_values}) + __salt__['vsphere.disconnect'](si) + ret_status = True + if __opts__['test'] and changes_required: + ret_status = None + ret.update({'result': ret_status, + 'comment': '\n'.join(comments), + 'changes': changes}) + return ret + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False, + 'comment': str(exc)}) + return ret From 26207e2accfd53ed913e6df9654b6b87e8cea44b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 11:00:38 -0400 Subject: [PATCH 567/639] Added salt.states.esxcluster._get_vsan_datastore --- salt/states/esxcluster.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index ecbb1d4c5c..7a8a91cc86 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -73,6 +73,22 @@ def mod_init(low): return True +def _get_vsan_datastore(si, cluster_name): + '''Retrieves the vsan_datastore''' + + log.trace('Retrieving vsan datastore') + vsan_datastores = [ds for ds in + __salt__['vsphere.list_datastores_via_proxy']( + service_instance=si) + if ds['type'] == 'vsan'] + + if not vsan_datastores: + raise salt.exceptions.VMwareObjectRetrievalError( + 'No vSAN datastores where retrieved for cluster ' + '\'{0}\''.format(cluster_name)) + return vsan_datastores[0] + + def cluster_configured(name, cluster_config): ''' Configures a cluster. Creates a new cluster, if it doesn't exist on the From 85f328f49c091856bb6672f41dd1bb8d2d336323 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 11:12:28 -0400 Subject: [PATCH 568/639] Added vsan_datastore_configured esxcluster state that renames the vSan datastore on a cluster --- salt/states/esxcluster.py | 69 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 7a8a91cc86..6db8072544 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -256,7 +256,7 @@ def cluster_configured(name, cluster_config): 'comment': '\n'.join(comments), 'changes': changes}) return ret - except salt.exceptions.CommandExecutionError as exc: + except excs.CommandExecutionError as exc: log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) if si: __salt__['vsphere.disconnect'](si) @@ -264,3 +264,70 @@ def cluster_configured(name, cluster_config): 'result': False, 'comment': str(exc)}) return ret + + +def vsan_datastore_configured(name, datastore_name): + ''' + Configures the cluster's vsan_datastore + + WARNING: vsan datastores will not exist until there is at least on host in + the cluster; the state assumes that the datastore exists and errors out if i + it doesn't; it's up to the user to accept the error or enable the state run + when de datastore does exist (grain: vsan_datastore_exists) + ''' + + cluster_name, datacenter_name = \ + __salt__['esxcluster.get_details']()['cluster'], \ + __salt__['esxcluster.get_details']()['datacenter'] + display_name = '{0}/{1}'.format(datacenter_name, cluster_name) + log.info('Running vsan_datastore_configured for ' + '\'{0}\''.format(display_name)) + ret = {'name': name, + 'changes': {}, 'result': None, + 'comment': 'Default'} + comments = [] + changes = {} + changes_required = False + + try: + si = __salt__['vsphere.get_service_instance_via_proxy']() + # Checking if we need to rename the vsan datastore + vsan_ds = _get_vsan_datastore(si, cluster_name) + if vsan_ds['name'] == datastore_name: + comments.append('vSAN datastore is correctly named \'{0}\'. ' + 'Nothing to be done.'.format(vsan_ds['name'])) + log.info(comments[-1]) + else: + # vsan_ds needs to be updated + changes_required = True + if __opts__['test']: + comments.append('State {0} will rename the vSAN datastore to ' + '\'{1}\'.'.format(name, datastore_name)) + log.info(comments[-1]) + else: + log.debug('Renaming vSAN datastore \'{0}\' to \'{1}\'' + ''.format(vsan_ds['name'], datastore_name)) + __salt__['vsphere.rename_datastore']( + datastore_name=vsan_ds['name'], + new_datastore_name=datastore_name, + service_instance=si) + comments.append('Renamed vSAN datastore to \'{0}\'.' + ''.format(datastore_name)) + changes = {'vsan_datastore': {'new': {'name': datastore_name}, + 'old': {'name': vsan_ds['name']}}} + log.info(comments[-1]) + __salt__['vsphere.disconnect'](si) + + ret.update({'result': True if (not changes_required) else None if \ + __opts__['test'] else True, + 'comment': '\n'.join(comments), + 'changes': changes}) + return ret + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False, + 'comment': exc.strerror}) + return ret From 0a5e5066cdff9f663647a671afd307772d95303d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 21:41:00 -0400 Subject: [PATCH 569/639] Added salt.utils.vmware.get_storage_system that retrieves an ESXi host's storage system --- salt/utils/vmware.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 6d2ff92e81..41532236a2 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1215,6 +1215,31 @@ def list_datastores(service_instance): return list_objects(service_instance, vim.Datastore) +def get_storage_system(service_instance, host_ref, hostname=None): + ''' + Returns a host's storage system + ''' + + if not hostname: + hostname = get_managed_object_name(host_ref) + + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='configManager.storageSystem', + type=vim.HostSystem, + skip=False) + objs = get_mors_with_properties(service_instance, + vim.HostStorageSystem, + property_list=['systemFile'], + container_ref=host_ref, + traversal_spec=traversal_spec) + if not objs: + raise salt.exceptions.VMwareObjectRetrievalError( + 'Host\'s \'{0}\' storage system was not retrieved' + ''.format(hostname)) + log.trace('[{0}] Retrieved storage system'.format(hostname)) + return objs[0]['object'] + + def get_hosts(service_instance, datacenter_name=None, host_names=None, cluster_name=None, get_all_hosts=False): ''' From 457246a87143c03f85e7650bc74b22815a57ac71 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 21:42:58 -0400 Subject: [PATCH 570/639] Added tests for salt.utils.vmware.get_storage_system --- tests/unit/utils/vmware/test_storage.py | 111 ++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 tests/unit/utils/vmware/test_storage.py diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py new file mode 100644 index 0000000000..6943247cec --- /dev/null +++ b/tests/unit/utils/vmware/test_storage.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for storage related functions in salt.utils.vmware +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ + PropertyMock +from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ + ArgumentValueError, VMwareRuntimeError + +#i Import Salt libraries +import salt.utils.vmware +# Import Third Party Libs +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetStorageSystemTestCase(TestCase): + '''Tests for salt.utils.vmware.get_storage_system''' + def setUp(self): + self.mock_si = MagicMock(content=MagicMock()) + self.mock_host_ref = MagicMock() + self.mock_get_managed_object_name = MagicMock(return_value='fake_host') + self.mock_traversal_spec = MagicMock() + self.mock_obj = MagicMock() + self.mock_get_mors = \ + MagicMock(return_value=[{'object': self.mock_obj}]) + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + self.mock_get_managed_object_name), + ('salt.utils.vmware.get_mors_with_properties', + self.mock_get_mors), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_host_ref', + 'mock_get_managed_object_name', + 'mock_traversal_spec', 'mock_obj'): + delattr(self, attr) + + def test_no_hostname_argument(self): + salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref) + self.mock_get_managed_object_name.assert_called_once_with( + self.mock_host_ref) + + def test_hostname_argument(self): + salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref, + hostname='fake_host') + self.assertEqual(self.mock_get_managed_object_name.call_count, 0) + + def test_traversal_spec(self): + mock_traversal_spec = MagicMock(return_value=[{'object': + self.mock_obj}]) + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec): + + salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref) + mock_traversal_spec.assert_called_once_with( + path='configManager.storageSystem', + type=vim.HostSystem, + skip=False) + + def test_get_mors_with_properties(self): + salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref) + self.mock_get_mors.assert_called_once_with( + self.mock_si, + vim.HostStorageSystem, + property_list=['systemFile'], + container_ref=self.mock_host_ref, + traversal_spec=self.mock_traversal_spec) + + def test_empty_mors_result(self): + with patch('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref) + self.assertEqual(excinfo.exception.strerror, + 'Host\'s \'fake_host\' storage system was ' + 'not retrieved') + + def test_valid_mors_result(self): + res = salt.utils.vmware.get_storage_system(self.mock_si, + self.mock_host_ref) + self.assertEqual(res, self.mock_obj) From da7fdae103c00b7d5b9a092bf2a451688a57709b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 23:16:06 -0400 Subject: [PATCH 571/639] Added salt.utils.vmware.get_datastores which returns a list of vim.Datastore objects - can retrieve the datastores visible to the root folder, a datacenter, a cluster and an ESXi host - can filter on datastore name and backing disk id (the latter, only if it is a VMFS datastore and the reference is an ESXi host) --- salt/utils/vmware.py | 126 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 41532236a2..981d99efc8 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1215,6 +1215,132 @@ def list_datastores(service_instance): return list_objects(service_instance, vim.Datastore) +def get_datastores(service_instance, reference, datastore_names=None, + backing_disk_ids=None, get_all_datastores=False): + ''' + Returns a list of vim.Datastore objects representing the datastores visible + from a VMware object, filtered by their names, or the backing disk + cannonical name or scsi_addresses + + service_instance + The Service Instance Object from which to obtain datastores. + + reference + The VMware object from which the datastores are visible. + + datastore_names + The list of datastore names to be retrieved. Default value is None. + + backing_disk_ids + The list of canonical names of the disks backing the datastores + to be retrieved. Only supported if reference is a vim.HostSystem. + Default value is None + + get_all_datastores + Specifies whether to retrieve all disks in the host. + Default value is False. + ''' + obj_name = get_managed_object_name(reference) + if get_all_datastores: + log.trace('Retrieving all datastores visible to ' + '\'{0}\''.format(obj_name)) + else: + log.trace('Retrieving datastores visible to \'{0}\': names = ({1}); ' + 'backing disk ids = ({2})'.format(obj_name, datastore_names, + backing_disk_ids)) + if backing_disk_ids and not isinstance(reference, vim.HostSystem): + + raise salt.exceptions.ArgumentValueError( + 'Unsupported reference type \'{0}\' when backing disk filter ' + 'is set'.format(reference.__class__.__name__)) + if (not get_all_datastores) and backing_disk_ids: + # At this point we know the reference is a vim.HostSystem + log.debug('Filtering datastores with backing disk ids: {}' + ''.format(backing_disk_ids)) + storage_system = get_storage_system(service_instance, reference, + obj_name) + props = salt.utils.vmware.get_properties_of_managed_object( + storage_system, ['fileSystemVolumeInfo.mountInfo']) + mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) + disk_datastores = [] + # Non vmfs volumes aren't backed by a disk + for vol in [i.volume for i in mount_infos if \ + isinstance(i.volume, vim.HostVmfsVolume)]: + + if not [e for e in vol.extent if e.diskName in backing_disk_ids]: + # Skip volume if it doesn't contain an extent with a + # canonical name of interest + continue + log.debug('Found datastore \'{0}\' for disk id(s) \'{1}\'' + ''.format(vol.name, + [e.diskName for e in vol.extent])) + disk_datastores.append(vol.name) + log.debug('Datastore found for disk filter: {}' + ''.format(disk_datastores)) + if datastore_names: + datastore_names.extend(disk_datastores) + else: + datastore_names = disk_datastores + + if (not get_all_datastores) and (not datastore_names): + log.trace('No datastore to be filtered after retrieving the datastores ' + 'backed by the disk id(s) \'{0}\''.format(backing_disk_ids)) + return [] + + log.trace('datastore_names = {0}'.format(datastore_names)) + + # Use the default traversal spec + if isinstance(reference, vim.HostSystem): + # Create a different traversal spec for hosts because it looks like the + # default doesn't retrieve the datastores + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + name='host_datastore_traversal', + path='datastore', + skip=False, + type=vim.HostSystem) + elif isinstance(reference, vim.ClusterComputeResource): + # Traversal spec for clusters + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + name='cluster_datastore_traversal', + path='datastore', + skip=False, + type=vim.ClusterComputeResource) + elif isinstance(reference, vim.Datacenter): + # Traversal spec for clusters + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + name='datacenter_datastore_traversal', + path='datastore', + skip=False, + type=vim.Datacenter) + elif isinstance(reference, vim.Folder) and \ + get_managed_object_name(reference) == 'Datacenters': + # Traversal of root folder (doesn't support multiple levels of Folders) + traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( + path='childEntity', + selectSet = [ + vmodl.query.PropertyCollector.TraversalSpec( + path='datastore', + skip=False, + type=vim.Datacenter)], + skip=False, + type=vim.Folder) + else: + raise salt.exceptions.ArgumentValueError( + 'Unsupported reference type \'{0}\'' + ''.format(reference.__class__.__name__)) + + items = get_mors_with_properties(service_instance, + object_type=vim.Datastore, + property_list=['name'], + container_ref=reference, + traversal_spec=traversal_spec) + log.trace('Retrieved {0} datastores'.format(len(items))) + items = [i for i in items if get_all_datastores or i['name'] in + datastore_names] + log.trace('Filtered datastores: {0}'.format([i['name'] for i in items])) + return [i['object'] for i in items] + + def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system From 51e881e5cfa24d5a56e7c0a383683f68c78355c3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 23:19:42 -0400 Subject: [PATCH 572/639] Added tests for salt.utils.vmware.get_datastores --- tests/unit/utils/vmware/test_storage.py | 224 ++++++++++++++++++++++++ 1 file changed, 224 insertions(+) diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 6943247cec..0f405aee75 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -109,3 +109,227 @@ class GetStorageSystemTestCase(TestCase): res = salt.utils.vmware.get_storage_system(self.mock_si, self.mock_host_ref) self.assertEqual(res, self.mock_obj) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetDatastoresTestCase(TestCase): + '''Tests for salt.utils.vmware.get_datastores''' + + def setUp(self): + self.mock_si = MagicMock() + self.mock_reference = MagicMock(spec=vim.HostSystem) + self.mock_mount_infos = [ + MagicMock(volume=MagicMock(spec=vim.HostVmfsVolume, + extent=[MagicMock( + diskName='fake_disk2')])), + MagicMock(volume=MagicMock(spec=vim.HostVmfsVolume, + extent=[MagicMock( + diskName='fake_disk3')]))] + self.mock_mount_infos[0].volume.name = 'fake_ds2' + self.mock_mount_infos[1].volume.name = 'fake_ds3' + self.mock_entries = [{'name': 'fake_ds1', 'object': MagicMock()}, + {'name': 'fake_ds2', 'object': MagicMock()}, + {'name': 'fake_ds3', 'object': MagicMock()}] + self.mock_storage_system = MagicMock() + self.mock_get_storage_system = MagicMock( + return_value=self.mock_storage_system) + self.mock_get_managed_object_name = MagicMock(return_value='fake_host') + self.mock_traversal_spec = MagicMock() + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + self.mock_get_managed_object_name), + ('salt.utils.vmware.get_storage_system', + self.mock_get_storage_system), + ('salt.utils.vmware.get_properties_of_managed_object', + MagicMock(return_value={'fileSystemVolumeInfo.mountInfo': + self.mock_mount_infos})), + ('salt.utils.vmware.get_mors_with_properties', + MagicMock(return_value=self.mock_entries)), + ('salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + MagicMock(return_value=self.mock_traversal_spec))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_reference', 'mock_storage_system', + 'mock_get_storage_system', 'mock_mount_infos', + 'mock_entries', 'mock_get_managed_object_name', + 'mock_traversal_spec'): + delattr(self, attr) + + def test_get_reference_name_call(self): + salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference) + self.mock_get_managed_object_name.assert_called_once_with( + self.mock_reference) + + def test_get_no_datastores(self): + res = salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference) + self.assertEqual(res, []) + + def test_get_storage_system_call(self): + salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference, + backing_disk_ids=['fake_disk1']) + self.mock_get_storage_system.assert_called_once_with( + self.mock_si, self.mock_reference, 'fake_host') + + + def test_get_mount_info_call(self): + mock_get_properties_of_managed_object = MagicMock() + with patch('salt.utils.vmware.get_properties_of_managed_object', + mock_get_properties_of_managed_object): + salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference, + backing_disk_ids=['fake_disk1']) + mock_get_properties_of_managed_object.assert_called_once_with( + self.mock_storage_system, ['fileSystemVolumeInfo.mountInfo']) + + def test_backing_disks_no_mount_info(self): + with patch('salt.utils.vmware.get_properties_of_managed_object', + MagicMock(return_value={})): + res = salt.utils.vmware.get_datastores( + self.mock_si, self.mock_reference, + backing_disk_ids=['fake_disk_id']) + self.assertEqual(res, []) + + def test_host_traversal_spec(self): + # Reference is of type vim.HostSystem + mock_traversal_spec_init = MagicMock() + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec_init): + + salt.utils.vmware.get_datastores( + self.mock_si, + self.mock_reference, + get_all_datastores=True) + mock_traversal_spec_init.assert_called_once_with( + name='host_datastore_traversal', + path='datastore', + skip=False, + type=vim.HostSystem) + + def test_cluster_traversal_spec(self): + mock_traversal_spec_init = MagicMock() + # Reference is of type vim.ClusterComputeResource + mock_reference = MagicMock(spec=vim.ClusterComputeResource) + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec_init): + + salt.utils.vmware.get_datastores( + self.mock_si, + mock_reference, + get_all_datastores=True) + mock_traversal_spec_init.assert_called_once_with( + name='cluster_datastore_traversal', + path='datastore', + skip=False, + type=vim.ClusterComputeResource) + + def test_datacenter_traversal_spec(self): + mock_traversal_spec_init = MagicMock() + # Reference is of type vim.ClusterComputeResource + mock_reference = MagicMock(spec=vim.Datacenter) + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec_init): + + salt.utils.vmware.get_datastores( + self.mock_si, + mock_reference, + get_all_datastores=True) + mock_traversal_spec_init.assert_called_once_with( + name='datacenter_datastore_traversal', + path='datastore', + skip=False, + type=vim.Datacenter) + + def test_root_folder_traversal_spec(self): + mock_traversal_spec_init = MagicMock(return_value='traversal') + mock_reference = MagicMock(spec=vim.Folder) + with patch('salt.utils.vmware.get_managed_object_name', + MagicMock(side_effect=['fake_host', 'Datacenters'])): + with patch( + 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', + mock_traversal_spec_init): + + salt.utils.vmware.get_datastores( + self.mock_si, + mock_reference, + get_all_datastores=True) + + mock_traversal_spec_init.assert_called([ + call(path='childEntity', + selectSet=['traversal'], + skip=False, + type=vim.Folder), + call(path='datastore', + skip=False, + type=vim.Datacenter)]) + + def test_unsupported_reference_type(self): + class FakeClass(object): + pass + + mock_reference = MagicMock(spec=FakeClass) + with self.assertRaises(ArgumentValueError) as excinfo: + salt.utils.vmware.get_datastores( + self.mock_si, + mock_reference, + get_all_datastores=True) + self.assertEqual(excinfo.exception.strerror, + 'Unsupported reference type \'FakeClass\'') + + def test_get_mors_with_properties(self): + mock_get_mors_with_properties = MagicMock() + with patch('salt.utils.vmware.get_mors_with_properties', + mock_get_mors_with_properties): + salt.utils.vmware.get_datastores( + self.mock_si, + self.mock_reference, + get_all_datastores=True) + mock_get_mors_with_properties.assert_called_once_with( + self.mock_si, + object_type=vim.Datastore, + property_list=['name'], + container_ref=self.mock_reference, + traversal_spec=self.mock_traversal_spec) + + def test_get_all_datastores(self): + res = salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference, + get_all_datastores=True) + self.assertEqual(res, [self.mock_entries[0]['object'], + self.mock_entries[1]['object'], + self.mock_entries[2]['object']]) + + def test_get_datastores_filtered_by_name(self): + res = salt.utils.vmware.get_datastores(self.mock_si, + self.mock_reference, + datastore_names=['fake_ds1', + 'fake_ds2']) + self.assertEqual(res, [self.mock_entries[0]['object'], + self.mock_entries[1]['object']]) + + def test_get_datastores_filtered_by_backing_disk(self): + res = salt.utils.vmware.get_datastores( + self.mock_si, self.mock_reference, + backing_disk_ids=['fake_disk2', 'fake_disk3']) + self.assertEqual(res, [self.mock_entries[1]['object'], + self.mock_entries[2]['object']]) + + def test_get_datastores_filtered_by_both_name_and_backing_disk(self): + # Simulate VMware data model for volumes fake_ds2, fake_ds3 + res = salt.utils.vmware.get_datastores( + self.mock_si, self.mock_reference, + datastore_names=['fake_ds1'], + backing_disk_ids= ['fake_disk3']) + self.assertEqual(res, [self.mock_entries[0]['object'], + self.mock_entries[2]['object']]) From 792f4e985a2d183d8ddf73a6f84f5f766573fbd9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 23:20:45 -0400 Subject: [PATCH 573/639] Added salt.utils.vmware.rename_datastore --- salt/utils/vmware.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 981d99efc8..f921c28ca4 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1341,6 +1341,34 @@ def get_datastores(service_instance, reference, datastore_names=None, return [i['object'] for i in items] +def rename_datastore(datastore_ref, new_datastore_name): + ''' + Renames a datastore + + datastore_ref + vim.Datastore reference to the datastore object to be changed + + new_datastore_name + New datastore name + ''' + ds_name = get_managed_object_name(datastore_ref) + log.debug('Renaming datastore \'{0}\' to ' + '\'{1}\''.format(ds_name, new_datastore_name)) + try: + datastore_ref.RenameDatastore(new_datastore_name) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + def get_storage_system(service_instance, host_ref, hostname=None): ''' Returns a host's storage system From 643f38c98c60c140e4709a24cc32e3c03c9901ad Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Fri, 15 Sep 2017 23:21:25 -0400 Subject: [PATCH 574/639] Added tests for salt.utils.vmware.rename_datastore --- tests/unit/utils/vmware/test_storage.py | 63 +++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 0f405aee75..0319bb1411 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -333,3 +333,66 @@ class GetDatastoresTestCase(TestCase): backing_disk_ids= ['fake_disk3']) self.assertEqual(res, [self.mock_entries[0]['object'], self.mock_entries[2]['object']]) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class RenameDatastoreTestCase(TestCase): + '''Tests for salt.utils.vmware.rename_datastore''' + + def setUp(self): + self.mock_ds_ref = MagicMock() + self.mock_get_managed_object_name = MagicMock(return_value='fake_ds') + + patches = ( + ('salt.utils.vmware.get_managed_object_name', + self.mock_get_managed_object_name),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ds_ref', 'mock_get_managed_object_name'): + delattr(self, attr) + + def test_datastore_name_call(self): + salt.utils.vmware.rename_datastore(self.mock_ds_ref, + 'fake_new_name') + self.mock_get_managed_object_name.assert_called_once_with( + self.mock_ds_ref) + + def test_rename_datastore_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_ds_ref).RenameDatastore = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.rename_datastore(self.mock_ds_ref, + 'fake_new_name') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_rename_datastore_raise_vim_fault(self): + exc = vim.VimFault() + exc.msg = 'vim_fault' + type(self.mock_ds_ref).RenameDatastore = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.rename_datastore(self.mock_ds_ref, + 'fake_new_name') + self.assertEqual(excinfo.exception.message, 'vim_fault') + + def test_rename_datastore_raise_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'runtime_fault' + type(self.mock_ds_ref).RenameDatastore = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.rename_datastore(self.mock_ds_ref, + 'fake_new_name') + self.assertEqual(excinfo.exception.message, 'runtime_fault') + + def test_rename_datastore(self): + ret = salt.utils.vmware.rename_datastore(self.mock_ds_ref, + 'fake_new_name') + self.mock_ds_ref.RenameDatastore.assert_called_once_with( + 'fake_new_name') From 0949e57211eba8c75d0fec58bfd217f171c600eb Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 16 Sep 2017 07:32:21 -0400 Subject: [PATCH 575/639] Added salt.modules.vspere.list_datastores_via_proxy that returns a list of datastore representations - can filter on datastore name, backing disk id, backing_disk_scsi_addresses (the last two only apply to VMFS datastore and if the proxy ``esxi`` - pointing to an an ESXi host) --- salt/modules/vsphere.py | 85 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index c81b71bdc0..1055aceebf 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4137,6 +4137,91 @@ def update_cluster(cluster_dict, datacenter=None, cluster=None, return {'update_cluster': True} +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None, + backing_disk_scsi_addresses=None, + service_instance=None): + ''' + Returns a list of dict representations of the datastores visible to the + proxy object. The list of datastores can be filtered by datastore names, + backing disk ids (canonical names) or backing disk scsi addresses. + + Supported proxy types: esxi, esxcluster, esxdatacenter + + datastore_names + List of the names of datastores to filter on + + backing_disk_ids + List of canonical names of the backing disks of the datastores to filer. + Default is None. + + backing_disk_scsi_addresses + List of scsi addresses of the backing disks of the datastores to filter. + Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_datastores_via_proxy + + salt '*' vsphere.list_datastores_via_proxy datastore_names=[ds1, ds2] + ''' + target = _get_proxy_target(service_instance) + target_name = salt.utils.vmware.get_managed_object_name(target) + log.trace('target name = {}'.format(target_name)) + + # Default to getting all disks if no filtering is done + get_all_datastores = True if \ + not (datastore_names or backing_disk_ids or + backing_disk_scsi_addresses) else False + # Get the ids of the disks with the scsi addresses + if backing_disk_scsi_addresses: + log.debug('Retrieving disk ids for scsi addresses ' + '\'{0}\''.format(backing_disk_scsi_addresses)) + disk_ids = [d.canonicalName for d in + salt.utils.vmware.get_disks( + target, scsi_addresses=backing_disk_scsi_addresses)] + log.debug('Found disk ids \'{}\''.format(disk_ids)) + backing_disk_ids = backing_disk_ids.extend(disk_ids) if \ + backing_disk_ids else disk_ids + datastores = salt.utils.vmware.get_datastores(service_instance, + target, + datastore_names, + backing_disk_ids, + get_all_datastores) + + # Search for disk backed datastores if target is host + # to be able to add the backing_disk_ids + mount_infos = [] + if isinstance(target, vim.HostSystem): + storage_system = salt.utils.vmware.get_storage_system( + service_instance, target, target_name) + props = salt.utils.vmware.get_properties_of_managed_object( + storage_system, ['fileSystemVolumeInfo.mountInfo']) + mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) + ret_dict = [] + for ds in datastores: + ds_dict = {'name': ds.name, + 'type': ds.summary.type, + 'free_space': ds.summary.freeSpace, + 'capacity': ds.summary.capacity} + backing_disk_ids = [] + for vol in [i.volume for i in mount_infos if \ + i.volume.name == ds.name and \ + isinstance(i.volume, vim.HostVmfsVolume)]: + + backing_disk_ids.extend([e.diskName for e in vol.extent]) + if backing_disk_ids: + ds_dict['backing_disk_ids'] = backing_disk_ids + ret_dict.append(ds_dict) + return ret_dict + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From b027579e06269645446e9c9a28103f08dc95e353 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 16 Sep 2017 08:10:42 -0400 Subject: [PATCH 576/639] Added salt.modules.vspere.rename_datastore that renames a datastore --- salt/modules/vsphere.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 1055aceebf..3f103c35c2 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -178,7 +178,7 @@ import salt.utils.path import salt.utils.vmware import salt.utils.vsan from salt.exceptions import CommandExecutionError, VMwareSaltError, \ - ArgumentValueError, InvalidConfigError + ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema @@ -4222,6 +4222,44 @@ def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None, return ret_dict +@depends(HAS_PYVMOMI) +@supports_proxies('esxi', 'esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def rename_datastore(datastore_name, new_datastore_name, + service_instance=None): + ''' + Renames a datastore. The datastore needs to be visible to the proxy. + + datastore_name + Current datastore name. + + new_datastore_name + New datastore name. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.rename_datastore old_name new_name + ''' + # Argument validation + log.trace('Renaming datastore {0} to {1}' + ''.format(datastore_name, new_datastore_name)) + target = _get_proxy_target(service_instance) + datastores = salt.utils.vmware.get_datastores( + service_instance, + target, + datastore_names=[datastore_name]) + if not datastores: + raise VMwareObjectRetrievalError('Datastore \'{0}\' was not found' + ''.format(datastore_name)) + ds = datastores[0] + salt.utils.vmware.rename_datastore(ds, new_datastore_name) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 71b10b81f9779eb151a7519265c0f516d8d55164 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 16 Sep 2017 09:15:00 -0400 Subject: [PATCH 577/639] Added tests for salt.modules.vspere.rename_datastore --- tests/unit/modules/test_vsphere.py | 83 +++++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_vsphere.py b/tests/unit/modules/test_vsphere.py index ce9f813094..56669b900e 100644 --- a/tests/unit/modules/test_vsphere.py +++ b/tests/unit/modules/test_vsphere.py @@ -12,7 +12,7 @@ from __future__ import absolute_import # Import Salt Libs import salt.modules.vsphere as vsphere from salt.exceptions import CommandExecutionError, VMwareSaltError, \ - ArgumentValueError + ArgumentValueError, VMwareObjectRetrievalError # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin @@ -1243,6 +1243,87 @@ class ListClusterTestCase(TestCase, LoaderModuleMockMixin): self.mock__get_cluster_dict.assert_called_once_with('cl', self.mock_cl) +@skipIf(NO_MOCK, NO_MOCK_REASON) +class RenameDatastoreTestCase(TestCase, LoaderModuleMockMixin): + '''Tests for salt.modules.vsphere.rename_datastore''' + def setup_loader_modules(self): + return { + vsphere: { + '__virtual__': MagicMock(return_value='vsphere'), + '_get_proxy_connection_details': MagicMock(), + 'get_proxy_type': MagicMock(return_value='esxdatacenter') + } + } + + def setUp(self): + self.mock_si = MagicMock() + self.mock_target = MagicMock() + self.mock_ds_ref = MagicMock() + self.mock_get_datastores = MagicMock(return_value=[self.mock_ds_ref]) + self.mock_rename_datastore = MagicMock() + patches = ( + ('salt.utils.vmware.get_service_instance', + MagicMock(return_value=self.mock_si)), + ('salt.modules.vsphere._get_proxy_target', + MagicMock(return_value=self.mock_target)), + ('salt.utils.vmware.get_datastores', + self.mock_get_datastores), + ('salt.utils.vmware.rename_datastore', + self.mock_rename_datastore)) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_target', 'mock_ds_ref', + 'mock_get_datastores', 'mock_rename_datastore'): + delattr(self, attr) + + def test_supported_proxes(self): + supported_proxies = ['esxi', 'esxcluster', 'esxdatacenter'] + for proxy_type in supported_proxies: + with patch('salt.modules.vsphere.get_proxy_type', + MagicMock(return_value=proxy_type)): + vsphere.rename_datastore('current_ds_name', 'new_ds_name') + + def test_default_service_instance(self): + mock__get_proxy_target = MagicMock() + with patch('salt.modules.vsphere._get_proxy_target', + mock__get_proxy_target): + vsphere.rename_datastore('current_ds_name', 'new_ds_name') + mock__get_proxy_target.assert_called_once_with(self.mock_si) + + def test_defined_service_instance(self): + mock_si = MagicMock() + mock__get_proxy_target = MagicMock() + with patch('salt.modules.vsphere._get_proxy_target', + mock__get_proxy_target): + vsphere.rename_datastore('current_ds_name', 'new_ds_name', + service_instance=mock_si) + + mock__get_proxy_target.assert_called_once_with(mock_si) + + def test_get_datastore_call(self): + vsphere.rename_datastore('current_ds_name', 'new_ds_name') + self.mock_get_datastores.assert_called_once_with( + self.mock_si, self.mock_target, + datastore_names=['current_ds_name']) + + def test_get_no_datastores(self): + with patch('salt.utils.vmware.get_datastores', + MagicMock(return_value=[])): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + vsphere.rename_datastore('current_ds_name', 'new_ds_name') + self.assertEqual(excinfo.exception.strerror, + 'Datastore \'current_ds_name\' was not found') + + def test_rename_datastore_call(self): + vsphere.rename_datastore('current_ds_name', 'new_ds_name') + self.mock_rename_datastore.assert_called_once_with( + self.mock_ds_ref, 'new_ds_name') + + @skipIf(NO_MOCK, NO_MOCK_REASON) class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin): '''Tests for salt.modules.vsphere._get_proxy_target''' From 693d23421dd2c1d3c22856ca96ef2fccc960054e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 16 Sep 2017 12:12:26 -0400 Subject: [PATCH 578/639] Added ESXClusterEntitySchema and LicenseSchema JSON schemas --- salt/config/schemas/esxcluster.py | 41 +++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index 5e96c5ef01..bb69547c8b 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -16,6 +16,7 @@ from __future__ import absolute_import from salt.utils.schema import (Schema, DefinitionsSchema, ComplexSchemaItem, + DictItem, ArrayItem, IntegerItem, BooleanItem, @@ -149,6 +150,46 @@ class ESXClusterConfigSchema(DefinitionsSchema): vm_swap_placement = StringItem(title='VM Swap Placement') +class ESXClusterEntitySchema(Schema): + '''Schema of the ESX cluster entity''' + + title = 'ESX Cluster Entity Schema' + description = 'ESX cluster entity schema' + + type = StringItem(title='Type', + description='Specifies the entity type', + required=True, + enum=['cluster']) + + datacenter = StringItem(title='Datacenter', + description='Specifies the cluster datacenter', + required=True, + pattern='\w+') + + cluster = StringItem(title='Cluster', + description='Specifies the cluster name', + required=True, + pattern='\w+') + + +class LicenseSchema(Schema): + ''' + Schema item of the ESX cluster vSAN configuration + ''' + + title = 'Licenses schema' + description = 'License configuration schema' + + licenses = DictItem( + title='Licenses', + description='Dictionary containing the license name to key mapping', + required=True, + additional_properties=StringItem( + title='License Key', + description='Specifies the license key', + pattern='^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$')) + + class EsxclusterProxySchema(Schema): ''' Schema of the esxcluster proxy input From 4394601ca1e7075d4a5ef62694093bedb10afc42 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sat, 16 Sep 2017 12:22:07 -0400 Subject: [PATCH 579/639] Added VCenterEntitySchema JSON schema --- salt/config/schemas/vcenter.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 salt/config/schemas/vcenter.py diff --git a/salt/config/schemas/vcenter.py b/salt/config/schemas/vcenter.py new file mode 100644 index 0000000000..4867923f27 --- /dev/null +++ b/salt/config/schemas/vcenter.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Rod McKenzie (roderick.mckenzie@morganstanley.com)` + :codeauthor: :email:`Alexandru Bleotu (alexandru.bleotu@morganstanley.com)` + + salt.config.schemas.vcenter + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + VCenter configuration schemas +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt libs +from salt.utils.schema import (Schema, + StringItem) + + +class VCenterEntitySchema(Schema): + ''' + Entity Schema for a VCenter. + ''' + title = 'VCenter Entity Schema' + description = 'VCenter entity schema' + type = StringItem(title='Type', + description='Specifies the entity type', + required=True, + enum=['vcenter']) + + vcenter = StringItem(title='vCenter', + description='Specifies the vcenter hostname', + required=True) From ee47cd0c00a442dce28c0eb52f12259696d6f87c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:43:20 -0400 Subject: [PATCH 580/639] Added salt.utils.vmware.get_license_manager --- salt/utils/vmware.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index f921c28ca4..2774915be1 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1005,6 +1005,31 @@ def list_objects(service_instance, vim_object, properties=None): return items +def get_license_manager(service_instance): + ''' + Returns the license manager. + + service_instance + The Service Instance Object from which to obrain the license manager. + ''' + + log.debug('Retrieving license manager') + try: + lic_manager = service_instance.content.licenseManager + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + return lic_manager + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From 9f566fbe9c5d4fcb6832f6fcdb9073be16ae9086 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:44:33 -0400 Subject: [PATCH 581/639] Added tests for salt.utils.vmware.get_license_manager --- tests/unit/utils/vmware/test_license.py | 80 +++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 tests/unit/utils/vmware/test_license.py diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py new file mode 100644 index 0000000000..5d6c963813 --- /dev/null +++ b/tests/unit/utils/vmware/test_license.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Alexandru Bleotu ` + + Tests for license related functions in salt.utils.vmware +''' + +# Import python libraries +from __future__ import absolute_import +import logging + +# Import Salt testing libraries +from tests.support.unit import TestCase, skipIf +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ + PropertyMock + + +# Import Salt libraries +import salt.utils.vmware +from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ + VMwareRuntimeError + +# Import Third Party Libs +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# Get Logging Started +log = logging.getLogger(__name__) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetLicenseManagerTestCase(TestCase): + '''Tests for salt.utils.vmware.get_license_manager''' + + def setUp(self): + self.mock_si = MagicMock() + self.mock_lic_mgr = MagicMock() + type(self.mock_si.content).licenseManager = PropertyMock( + return_value=self.mock_lic_mgr) + + def tearDown(self): + for attr in ('mock_si', 'mock_lic_mgr'): + delattr(self, attr) + + def test_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_si.content).licenseManager = PropertyMock( + side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_license_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_raise_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_si.content).licenseManager = PropertyMock( + side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_license_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_raise_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_si.content).licenseManager = PropertyMock( + side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_license_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_valid_assignment_manager(self): + ret = salt.utils.vmware.get_license_manager(self.mock_si) + self.assertEqual(ret, self.mock_lic_mgr) From 1d57000f18cc58e27c93f70321ad0b98573e0bf7 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:46:03 -0400 Subject: [PATCH 582/639] Added salt.utils.vmware.get_license_assignment_manager --- salt/utils/vmware.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 2774915be1..9695bbfe65 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1030,6 +1030,35 @@ def get_license_manager(service_instance): return lic_manager +def get_license_assignment_manager(service_instance): + ''' + Returns the license assignment manager. + + service_instance + The Service Instance Object from which to obrain the license manager. + ''' + + log.debug('Retrieving license assignment manager') + try: + lic_assignment_manager = \ + service_instance.content.licenseManager.licenseAssignmentManager + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not lic_assignment_manager: + raise salt.exceptions.VMwareObjectRetrievalError( + 'License assignment manager was not retrieved') + return lic_assignment_manager + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From e844dbceeb889399fdad773407a6a70615042eb9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:47:18 -0400 Subject: [PATCH 583/639] Added tests for salt.utils.vmware.get_license_assignment_manager --- tests/unit/utils/vmware/test_license.py | 57 +++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index 5d6c963813..da5691892c 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -78,3 +78,60 @@ class GetLicenseManagerTestCase(TestCase): def test_valid_assignment_manager(self): ret = salt.utils.vmware.get_license_manager(self.mock_si) self.assertEqual(ret, self.mock_lic_mgr) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetLicenseAssignmentManagerTestCase(TestCase): + '''Tests for salt.utils.vmware.get_license_assignment_manager''' + + def setUp(self): + self.mock_si = MagicMock() + self.mock_lic_assign_mgr = MagicMock() + type(self.mock_si.content.licenseManager).licenseAssignmentManager = \ + PropertyMock(return_value=self.mock_lic_assign_mgr) + + def tearDown(self): + for attr in ('mock_si', 'mock_lic_assign_mgr'): + delattr(self, attr) + + def test_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_si.content.licenseManager).licenseAssignmentManager = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_license_assignment_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_raise_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_si.content.licenseManager).licenseAssignmentManager = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_license_assignment_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_raise_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_si.content.licenseManager).licenseAssignmentManager = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_license_assignment_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_empty_license_assignment_manager(self): + type(self.mock_si.content.licenseManager).licenseAssignmentManager = \ + PropertyMock(return_value=None) + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.vmware.get_license_assignment_manager(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'License assignment manager was not retrieved') + + def test_valid_assignment_manager(self): + ret = salt.utils.vmware.get_license_assignment_manager(self.mock_si) + self.assertEqual(ret, self.mock_lic_assign_mgr) From 67ef6adcb98791537c9be420ed3891a7f0232611 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:48:40 -0400 Subject: [PATCH 584/639] Added salt.utils.vmware.get_licenses --- salt/utils/vmware.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 9695bbfe65..0e3ef6e868 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1059,6 +1059,36 @@ def get_license_assignment_manager(service_instance): return lic_assignment_manager +def get_licenses(service_instance, license_manager=None): + ''' + Returns the licenses on a specific instance. + + service_instance + The Service Instance Object from which to obrain the licenses. + + license_manager + The License Manager object of the service instance. If not provided it + will be retrieved. + ''' + + if not license_manager: + license_manager = get_license_manager(service_instance) + log.debug('Retrieving licenses') + try: + return license_manager.licenses + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From e660af495bcabf9222be443cb6afff34c03818a9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:50:07 -0400 Subject: [PATCH 585/639] Added tests for salt.utils.vmware.get_licenses --- tests/unit/utils/vmware/test_license.py | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index da5691892c..91158bb547 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -135,3 +135,76 @@ class GetLicenseAssignmentManagerTestCase(TestCase): def test_valid_assignment_manager(self): ret = salt.utils.vmware.get_license_assignment_manager(self.mock_si) self.assertEqual(ret, self.mock_lic_assign_mgr) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetLicensesTestCase(TestCase): + '''Tests for salt.utils.vmware.get_licenses''' + + def setUp(self): + self.mock_si = MagicMock() + self.mock_licenses = [MagicMock(), MagicMock()] + self.mock_lic_mgr = MagicMock() + type(self.mock_lic_mgr).licenses = \ + PropertyMock(return_value=self.mock_licenses) + patches = ( + ('salt.utils.vmware.get_license_manager', + MagicMock(return_value=self.mock_lic_mgr)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_lic_mgr', 'mock_licenses'): + delattr(self, attr) + + def test_no_license_manager_passed_in(self): + mock_get_license_manager = MagicMock() + with patch('salt.utils.vmware.get_license_manager', + mock_get_license_manager): + salt.utils.vmware.get_licenses(self.mock_si) + mock_get_license_manager.assert_called_once_with(self.mock_si) + + def test_license_manager_passed_in(self): + mock_licenses = PropertyMock() + mock_lic_mgr = MagicMock() + type(mock_lic_mgr).licenses = mock_licenses + mock_get_license_manager = MagicMock() + with patch('salt.utils.vmware.get_license_manager', + mock_get_license_manager): + salt.utils.vmware.get_licenses(self.mock_si, + license_manager=mock_lic_mgr) + self.assertEqual(mock_get_license_manager.call_count, 0) + self.assertEqual(mock_licenses.call_count, 1) + + def test_raise_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_lic_mgr).licenses = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_licenses(self.mock_si) + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_raise_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_lic_mgr).licenses = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_licenses(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_raise_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_lic_mgr).licenses = PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_licenses(self.mock_si) + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_valid_licenses(self): + ret = salt.utils.vmware.get_licenses(self.mock_si) + self.assertEqual(ret, self.mock_licenses) From 3e2ff2ad86ae8ddb5f130a2b862dda2394fad3a2 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:51:38 -0400 Subject: [PATCH 586/639] Added salt.utils.vmware.add_license --- salt/utils/vmware.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 0e3ef6e868..8449dc9731 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1089,6 +1089,45 @@ def get_licenses(service_instance, license_manager=None): raise salt.exceptions.VMwareRuntimeError(exc.msg) +def add_license(service_instance, key, description, license_manager=None): + ''' + Adds a license. + + service_instance + The Service Instance Object. + + key + The key of the license to add. + + description + The description of the license to add. + + license_manager + The License Manager object of the service instance. If not provided it + will be retrieved. + ''' + if not license_manager: + license_manager = get_license_manager(service_instance) + label = vim.KeyValue() + label.key='VpxClientLicenseLabel' + label.value=description + log.debug('Adding license \'{}\''.format(description)) + try: + license = license_manager.AddLicense(key, [label]) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + return license + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From ca33adcee7a39c90a1fbe9217d57e3567b1917a5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:53:03 -0400 Subject: [PATCH 587/639] Added tests for salt.utils.vmware.add_license --- tests/unit/utils/vmware/test_license.py | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index 91158bb547..118cc53f8e 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -208,3 +208,102 @@ class GetLicensesTestCase(TestCase): def test_valid_licenses(self): ret = salt.utils.vmware.get_licenses(self.mock_si) self.assertEqual(ret, self.mock_licenses) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class AddLicenseTestCase(TestCase): + '''Tests for salt.utils.vmware.add_license''' + + def setUp(self): + self.mock_si = MagicMock() + self.mock_license = MagicMock() + self.mock_add_license = MagicMock(return_value=self.mock_license) + self.mock_lic_mgr = MagicMock(AddLicense=self.mock_add_license) + self.mock_label = MagicMock() + patches = ( + ('salt.utils.vmware.get_license_manager', + MagicMock(return_value=self.mock_lic_mgr)), + ('salt.utils.vmware.vim.KeyValue', + MagicMock(return_value=self.mock_label))) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_si', 'mock_lic_mgr', 'mock_license', + 'mock_add_license', 'mock_label'): + delattr(self, attr) + + def test_no_license_manager_passed_in(self): + mock_get_license_manager = MagicMock() + with patch('salt.utils.vmware.get_license_manager', + mock_get_license_manager): + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + mock_get_license_manager.assert_called_once_with(self.mock_si) + + def test_license_manager_passed_in(self): + mock_get_license_manager = MagicMock() + with patch('salt.utils.vmware.get_license_manager', + mock_get_license_manager): + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description', + license_manager=self.mock_lic_mgr) + self.assertEqual(mock_get_license_manager.call_count, 0) + self.assertEqual(self.mock_add_license.call_count, 1) + + def test_label_settings(self): + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.assertEqual(self.mock_label.key, 'VpxClientLicenseLabel') + self.assertEqual(self.mock_label.value, 'fake_license_description') + + def test_add_license_arguments(self): + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.mock_add_license.assert_called_once_with('fake_license_key', + [self.mock_label]) + + def test_add_license_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_lic_mgr.AddLicense = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_add_license_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_lic_mgr.AddLicense = MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_add_license_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_lic_mgr.AddLicense = MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_valid_license_added(self): + ret = salt.utils.vmware.add_license(self.mock_si, + 'fake_license_key', + 'fake_license_description') + self.assertEqual(ret, self.mock_license) From 5f268fc5d9530c43327d00984dc17e7ac092222e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:54:19 -0400 Subject: [PATCH 588/639] Added salt.utils.vmware.get_assigned_licenses --- salt/utils/vmware.py | 86 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 8449dc9731..8597d68172 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1128,6 +1128,92 @@ def add_license(service_instance, key, description, license_manager=None): return license +def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, + license_assignment_manager=None): + ''' + Returns the licenses assigned to an entity. If entity ref is not provided, + then entity_name is assumed to be the vcenter. This is later checked if + the entity name is provided. + + service_instance + The Service Instance Object from which to obtain the licenses. + + entity_ref + VMware entity to get the assigned licenses for. + If None, the entity is the vCenter itself. + Default is None. + + entity_name + Entity name used in logging. + Default is None. + + license_assignment_manager + The LicenseAssignmentManager object of the service instance. + If not provided it will be retrieved. + Default is None. + ''' + if not license_assignment_manager: + license_assignment_manager = \ + get_license_assignment_manager(service_instance) + if not entity_name: + raise salt.exceptions.ArgumentValueError('No entity_name passed') + # If entity_ref is not defined, then interested in the vcenter + entity_id = None + entity_type = 'moid' + check_name = False + if not entity_ref: + if entity_name: + check_name = True + entity_type = 'uuid' + try: + entity_id = service_instance.content.about.instanceUuid + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + else: + entity_id = entity_ref._moId + + log.trace('Retrieving licenses assigned to \'{0}\''.format(entity_name)) + try: + assignments = \ + license_assignment_manager.QueryAssignedLicenses(entity_id) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + + if entity_type == 'uuid' and len(assignments) > 1: + log.trace('Unexpectectedly retrieved more than one' + ' VCenter license assignment.') + raise salt.exceptions.VMwareObjectRetrievalError( + 'Unexpected return. Expect only a single assignment') + + if check_name == True: + if entity_name != assignments[0].entityDisplayName: + log.trace('Getting license info for wrong vcenter: ' + '{0} != {1}'.format(entity_name, + assignments[0].entityDisplayName)) + raise salt.exceptions.VMwareObjectRetrievalError( + 'Got license assignment info for a different vcenter') + + return [a.assignedLicense for a in assignments] + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From e0ae5ecd83b984ac099af4c521ec48270f944b75 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:55:21 -0400 Subject: [PATCH 589/639] Added tests for salt.utils.vmware.get_assigned_licenses --- tests/unit/utils/vmware/test_license.py | 177 ++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index 118cc53f8e..4722a88335 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -307,3 +307,180 @@ class AddLicenseTestCase(TestCase): 'fake_license_key', 'fake_license_description') self.assertEqual(ret, self.mock_license) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class GetAssignedLicensesTestCase(TestCase): + '''Tests for salt.utils.vmware.get_assigned_licenses''' + + def setUp(self): + self.mock_ent_id = MagicMock() + self.mock_si = MagicMock() + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(return_value=self.mock_ent_id) + self.mock_moid = MagicMock() + self.prop_mock_moid = PropertyMock(return_value=self.mock_moid) + self.mock_entity_ref = MagicMock() + type(self.mock_entity_ref)._moId = self.prop_mock_moid + self.mock_assignments = [MagicMock(entityDisplayName='fake_ent1'), + MagicMock(entityDisplayName='fake_ent2')] + self.mock_query_assigned_licenses = MagicMock( + return_value=[MagicMock(assignedLicense=self.mock_assignments[0]), + MagicMock(assignedLicense=self.mock_assignments[1])]) + self.mock_lic_assign_mgr = MagicMock( + QueryAssignedLicenses=self.mock_query_assigned_licenses) + patches = ( + ('salt.utils.vmware.get_license_assignment_manager', + MagicMock(return_value=self.mock_lic_assign_mgr)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def tearDown(self): + for attr in ('mock_ent_id', 'mock_si', 'mock_moid', 'prop_mock_moid', + 'mock_entity_ref', 'mock_assignments', + 'mock_query_assigned_licenses', 'mock_lic_assign_mgr'): + delattr(self, attr) + + def test_no_license_assignment_manager_passed_in(self): + mock_get_license_assign_manager = MagicMock() + with patch('salt.utils.vmware.get_license_assignment_manager', + mock_get_license_assign_manager): + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + mock_get_license_assign_manager.assert_called_once_with(self.mock_si) + + def test_license_assignment_manager_passed_in(self): + mock_get_license_assign_manager = MagicMock() + with patch('salt.utils.vmware.get_license_assignment_manager', + mock_get_license_assign_manager): + salt.utils.vmware.get_assigned_licenses( + self.mock_si, self.mock_entity_ref, 'fake_entity_name', + license_assignment_manager=self.mock_lic_assign_mgr) + self.assertEqual(mock_get_license_assign_manager.call_count, 0) + + def test_entity_name(self): + mock_trace = MagicMock() + with patch('salt.log.setup.SaltLoggingClass.trace', mock_trace): + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + mock_trace.assert_called_once_with('Retrieving licenses assigned to ' + '\'fake_entity_name\'') + + def test_instance_uuid(self): + mock_instance_uuid_prop = PropertyMock() + type(self.mock_si.content.about).instanceUuid = mock_instance_uuid_prop + self.mock_lic_assign_mgr.QueryAssignedLicenses = MagicMock( + return_value=[MagicMock(entityDisplayName='fake_vcenter')]) + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(mock_instance_uuid_prop.call_count, 1) + + def test_instance_uuid_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_instance_uuid_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_instance_uuid_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_vcenter_entity_too_many_assignements(self): + self.mock_lic_assign_mgr.QueryAssignedLicenses = MagicMock( + return_value=[MagicMock(), MagicMock()]) + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(excinfo.exception.strerror, + 'Unexpected return. Expect only a single assignment') + + def test_wrong_vcenter_name(self): + self.mock_lic_assign_mgr.QueryAssignedLicenses = MagicMock( + return_value=[MagicMock(entityDisplayName='bad_vcenter')]) + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.assertEqual(excinfo.exception.strerror, + 'Got license assignment info for a different vcenter') + + def test_query_assigned_licenses_vcenter(self): + with self.assertRaises(VMwareObjectRetrievalError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + entity_name='fake_vcenter') + self.mock_query_assigned_licenses.assert_called_once_with( + self.mock_ent_id) + + def test_query_assigned_licenses_with_entity(self): + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + self.mock_query_assigned_licenses.assert_called_once_with( + self.mock_moid) + + def test_query_assigned_licenses_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_lic_assign_mgr.QueryAssignedLicenses = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_query_assigned_licenses_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_lic_assign_mgr.QueryAssignedLicenses = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_query_assigned_licenses_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_lic_assign_mgr.QueryAssignedLicenses = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_valid_assignments(self): + ret = salt.utils.vmware.get_assigned_licenses(self.mock_si, + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(ret, self.mock_assignments) From 5c953af15b5d5cfde9aa1179a7b5a0724928e4ee Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:56:17 -0400 Subject: [PATCH 590/639] Added salt.utils.vmware.assign_license --- salt/utils/vmware.py | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index 8597d68172..fc09c4478d 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1214,6 +1214,78 @@ def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, return [a.assignedLicense for a in assignments] +def assign_license(service_instance, license_key, license_name, + entity_ref=None, entity_name=None, + license_assignment_manager=None): + ''' + Assigns a license to an entity. + + service_instance + The Service Instance Object from which to obrain the licenses. + + license_key + The key of the license to add. + + license_name + The description of the license to add. + + entity_ref + VMware entity to assign the license to. + If None, the entity is the vCenter itself. + Default is None. + + entity_name + Entity name used in logging. + Default is None. + + license_assignment_manager + The LicenseAssignmentManager object of the service instance. + If not provided it will be retrieved + Default is None. + ''' + if not license_assignment_manager: + license_assignment_manager = \ + get_license_assignment_manager(service_instance) + entity_id = None + + if not entity_ref: + # vcenter + try: + entity_id = service_instance.content.about.instanceUuid + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + raise salt.exceptions.VMwareRuntimeError(exc.msg) + if not entity_name: + entity_name = 'vCenter' + else: + # e.g. vsan cluster or host + entity_id = entity_ref._moId + + log.trace('Assigning license to \'{0}\''.format(entity_name)) + try: + license = license_assignment_manager.UpdateAssignedLicense( + entity_id, + license_key) + except vim.fault.NoPermission as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError( + 'Not enough permissions. Required privilege: ' + '{0}'.format(exc.privilegeId)) + except vim.fault.VimFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareApiError(exc.msg) + except vmodl.RuntimeFault as exc: + log.exception(exc) + raise salt.exceptions.VMwareRuntimeError(exc.msg) + return license + + def list_datacenters(service_instance): ''' Returns a list of datacenters associated with a given service instance. From 14deb253791ae0143fb49581bef843f5226cd83a Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 19:57:21 -0400 Subject: [PATCH 591/639] Added tests for salt.utils.vmware.assign_license --- tests/unit/utils/vmware/test_license.py | 177 ++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index 4722a88335..5ca92a705a 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -484,3 +484,180 @@ class GetAssignedLicensesTestCase(TestCase): self.mock_entity_ref, 'fake_entity_name') self.assertEqual(ret, self.mock_assignments) + + +@skipIf(NO_MOCK, NO_MOCK_REASON) +@skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') +class AssignLicenseTestCase(TestCase): + '''Tests for salt.utils.vmware.assign_license''' + + def setUp(self): + self.mock_ent_id = MagicMock() + self.mock_si = MagicMock() + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(return_value=self.mock_ent_id) + self.mock_lic_key = MagicMock() + self.mock_moid = MagicMock() + self.prop_mock_moid = PropertyMock(return_value=self.mock_moid) + self.mock_entity_ref = MagicMock() + type(self.mock_entity_ref)._moId = self.prop_mock_moid + self.mock_license = MagicMock() + self.mock_update_assigned_license = MagicMock( + return_value=self.mock_license) + self.mock_lic_assign_mgr = MagicMock( + UpdateAssignedLicense=self.mock_update_assigned_license) + patches = ( + ('salt.utils.vmware.get_license_assignment_manager', + MagicMock(return_value=self.mock_lic_assign_mgr)),) + for mod, mock in patches: + patcher = patch(mod, mock) + patcher.start() + self.addCleanup(patcher.stop) + + def test_no_license_assignment_manager_passed_in(self): + mock_get_license_assign_manager = MagicMock() + with patch('salt.utils.vmware.get_license_assignment_manager', + mock_get_license_assign_manager): + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + mock_get_license_assign_manager.assert_called_once_with(self.mock_si) + + def test_license_assignment_manager_passed_in(self): + mock_get_license_assign_manager = MagicMock() + with patch('salt.utils.vmware.get_license_assignment_manager', + mock_get_license_assign_manager): + salt.utils.vmware.assign_license( + self.mock_si, self.mock_lic_key, 'fake_license_name', + self.mock_entity_ref, 'fake_entity_name', + license_assignment_manager=self.mock_lic_assign_mgr) + self.assertEqual(mock_get_license_assign_manager.call_count, 0) + self.assertEqual(self.mock_update_assigned_license.call_count, 1) + + def test_entity_name(self): + mock_trace = MagicMock() + with patch('salt.log.setup.SaltLoggingClass.trace', mock_trace): + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + mock_trace.assert_called_once_with('Assigning license to ' + '\'fake_entity_name\'') + + def test_instance_uuid(self): + mock_instance_uuid_prop = PropertyMock() + type(self.mock_si.content.about).instanceUuid = mock_instance_uuid_prop + self.mock_lic_assign_mgr.UpdateAssignedLicense= MagicMock( + return_value=[MagicMock(entityDisplayName='fake_vcenter')]) + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + entity_name='fake_entity_name') + self.assertEqual(mock_instance_uuid_prop.call_count, 1) + + def test_instance_uuid_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + entity_name='fake_entity_name') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_instance_uuid_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + entity_name='fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_instance_uuid_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + type(self.mock_si.content.about).instanceUuid = \ + PropertyMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + entity_name='fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_update_assigned_licenses_vcenter(self): + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + entity_name='fake_entity_name') + self.mock_update_assigned_license.assert_called_once_with( + self.mock_ent_id, self.mock_lic_key) + + def test_update_assigned_licenses_call_with_entity(self): + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + self.mock_update_assigned_license.assert_called_once_with( + self.mock_moid, self.mock_lic_key) + + def test_update_assigned_licenses_raises_no_permission(self): + exc = vim.fault.NoPermission() + exc.privilegeId = 'Fake privilege' + self.mock_lic_assign_mgr.UpdateAssignedLicense = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, + 'Not enough permissions. Required privilege: ' + 'Fake privilege') + + def test_update_assigned_licenses_raises_vim_fault(self): + exc = vim.fault.VimFault() + exc.msg = 'VimFault msg' + self.mock_lic_assign_mgr.UpdateAssignedLicense = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareApiError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'VimFault msg') + + def test_update_assigned_licenses_raises_runtime_fault(self): + exc = vmodl.RuntimeFault() + exc.msg = 'RuntimeFault msg' + self.mock_lic_assign_mgr.UpdateAssignedLicense = \ + MagicMock(side_effect=exc) + with self.assertRaises(VMwareRuntimeError) as excinfo: + salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(excinfo.exception.strerror, 'RuntimeFault msg') + + def test_valid_assignments(self): + ret = salt.utils.vmware.assign_license(self.mock_si, + self.mock_lic_key, + 'fake_license_name', + self.mock_entity_ref, + 'fake_entity_name') + self.assertEqual(ret, self.mock_license) From 6f7b1dde12318659bcc463d6c085fa4626c1cdf1 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:04:56 -0400 Subject: [PATCH 592/639] Imported additional JSON schema definitions in salt.modules.vsphere --- salt/modules/vsphere.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 3f103c35c2..96ef174021 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -180,7 +180,9 @@ import salt.utils.vsan from salt.exceptions import CommandExecutionError, VMwareSaltError, \ ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError from salt.utils.decorators import depends, ignores_kwargs -from salt.config.schemas.esxcluster import ESXClusterConfigSchema +from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ + ESXClusterEntitySchema +from salt.config.schemas.vcenter import VCenterEntitySchema # Import Third Party Libs try: From 02f9aca827a2d567bd49a1f322f978f07a500758 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:06:37 -0400 Subject: [PATCH 593/639] Added salt.modules.vsphere.list_licenses that lists all licenses on a vCenter --- salt/modules/vsphere.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 96ef174021..c3afd2fe03 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4262,6 +4262,33 @@ def rename_datastore(datastore_name, new_datastore_name, return True +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def list_licenses(service_instance=None): + ''' + Lists all licenses on a vCenter. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_licenses + ''' + log.trace('Retrieving all licenses') + licenses = salt.utils.vmware.get_licenses(service_instance) + ret_dict = [{'key': l.licenseKey, + 'name': l.name, + 'description': l.labels[0].value if l.labels else None, + # VMware handles unlimited capacity as 0 + 'capacity': l.total if l.total > 0 else sys.maxsize, + 'used': l.used if l.used else 0} + for l in licenses] + return ret_dict + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 31894c23c3e9482e01a97b5011968cc53ec33012 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:08:37 -0400 Subject: [PATCH 594/639] Added salt.modules.vsphere.add_license --- salt/modules/vsphere.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index c3afd2fe03..55c61e4c8f 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4289,6 +4289,37 @@ def list_licenses(service_instance=None): return ret_dict +@depends(HAS_PYVMOMI) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def add_license(key, description, safety_checks=True, + service_instance=None): + ''' + Adds a license to the vCenter or ESXi host + + key + License key. + + description + License description added in as a label. + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.add_license key= desc='License desc' + ''' + log.trace('Adding license \'{0}\''.format(key)) + salt.utils.vmware.add_license(service_instance, key, description) + return True + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 906babd358a13c2d4b9e24996f836401a781de25 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:10:15 -0400 Subject: [PATCH 595/639] Added salt.modules.vsphere._get_entity that describes a cluster and a vcenter --- salt/modules/vsphere.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 55c61e4c8f..334e003c44 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4320,6 +4320,41 @@ def add_license(key, description, safety_checks=True, return True +def _get_entity(service_instance, entity): + ''' + Returns the entity associated with the entity dict representation + + Supported entities: cluster, vcenter + + Expected entity format: + + .. code-block:: python + + cluster: + {'type': 'cluster', + 'datacenter': , + 'cluster': } + vcenter: + {'type': 'vcenter'} + + service_instance + Service instance (vim.ServiceInstance) of the vCenter. + + entity + Entity dict in the format above + ''' + + log.trace('Retrieving entity: {0}'.format(entity)) + if entity['type'] == 'cluster': + dc_ref = salt.utils.vmware.get_datacenter(service_instance, + entity['datacenter']) + return salt.utils.vmware.get_cluster(dc_ref, entity['cluster']) + elif entity['type'] == 'vcenter': + return None + raise ArgumentValueError('Unsupported entity type \'{0}\'' + ''.format(entity['type'])) + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 25ac4400db2e0e5acf62b998b97629fb60853dd4 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:12:16 -0400 Subject: [PATCH 596/639] Added salt.modules.vsphere._validate_entity that validates a cluster and a vcenter description --- salt/modules/vsphere.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 334e003c44..983d150c28 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4355,6 +4355,29 @@ def _get_entity(service_instance, entity): ''.format(entity['type'])) +def _validate_entity(entity): + ''' + Validates the entity dict representation + + entity + Dictionary representation of an entity. + See ``_get_entity`` docstrings for format. + ''' + + #Validate entity: + if entity['type'] == 'cluster': + schema = ESXClusterEntitySchema.serialize() + elif entity['type'] == 'vcenter': + schema = VCenterEntitySchema.serialize() + else: + raise ArgumentValueError('Unsupported entity type \'{0}\'' + ''.format(entity['type'])) + try: + jsonschema.validate(entity, schema) + except jsonschema.exceptions.ValidationError as exc: + raise excs.InvalidEntityError(exc) + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 4656f26264f75e369d5b77e727cbff8f34072359 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:14:12 -0400 Subject: [PATCH 597/639] Added salt.modules.vsphere.list_assigned_licenses that list the licenses assigned to an entity --- salt/modules/vsphere.py | 47 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 983d150c28..af3b6c0dd0 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4378,6 +4378,53 @@ def _validate_entity(entity): raise excs.InvalidEntityError(exc) +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def list_assigned_licenses(entity, entity_display_name, license_keys=None, + service_instance=None): + ''' + Lists the licenses assigned to an entity + + entity + Dictionary representation of an entity. + See ``_get_entity`` docstrings for format. + + entity_display_name + Entity name used in logging + + license_keys: + List of license keys to be retrieved. Default is None. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.list_assigned_licenses + entity={type:cluster,datacenter:dc,cluster:cl} + entiy_display_name=cl + ''' + log.trace('Listing assigned licenses of entity {0}' + ''.format(entity)) + _validate_entity(entity) + + assigned_licenses = salt.utils.vmware.get_assigned_licenses( + service_instance, + entity_ref=_get_entity(service_instance, entity), + entity_name=entity_display_name) + + return [{'key': l.licenseKey, + 'name': l.name, + 'description': l.labels[0].value if l.labels else None, + # VMware handles unlimited capacity as 0 + 'capacity': l.total if l.total > 0 else sys.maxsize} + for l in assigned_licenses if (license_keys is None) or + (l.licenseKey in license_keys)] + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From d0a794663ebe8a2d6d7d0fc4c7acc3740f58442d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:15:03 -0400 Subject: [PATCH 598/639] Added salt.modules.vsphere.assign_license that assigns a license to an entity --- salt/modules/vsphere.py | 51 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index af3b6c0dd0..dce2df9831 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -4425,6 +4425,57 @@ def list_assigned_licenses(entity, entity_display_name, license_keys=None, (l.licenseKey in license_keys)] +@depends(HAS_PYVMOMI) +@depends(HAS_JSONSCHEMA) +@supports_proxies('esxcluster', 'esxdatacenter') +@gets_service_instance_via_proxy +def assign_license(license_key, license_name, entity, entity_display_name, + safety_checks=True, service_instance=None): + ''' + Assigns a license to an entity + + license_key + Key of the license to assign + See ``_get_entity`` docstrings for format. + + license_name + Display name of license + + entity + Dictionary representation of an entity + + entity_display_name + Entity name used in logging + + safety_checks + Specify whether to perform safety check or to skip the checks and try + performing the required task. Default is False. + + service_instance + Service instance (vim.ServiceInstance) of the vCenter/ESXi host. + Default is None. + + .. code-block:: bash + + salt '*' vsphere.assign_license license_key=00000:00000 + license name=test entity={type:cluster,datacenter:dc,cluster:cl} + ''' + log.trace('Assigning license {0} to entity {1}' + ''.format(license_key, entity)) + _validate_entity(entity) + if safety_checks: + licenses = salt.utils.vmware.get_licenses(service_instance) + if not [l for l in licenses if l.licenseKey == license_key]: + raise VMwareObjectRetrievalError('License \'{0}\' wasn\'t found' + ''.format(license_name)) + salt.utils.vmware.assign_license( + service_instance, + license_key, + license_name, + entity_ref=_get_entity(service_instance, entity), + entity_name=entity_display_name) + + def _check_hosts(service_instance, host, host_names): ''' Helper function that checks to see if the host provided is a vCenter Server or From 7bb54430ab6038dc13e4a9c0983eae40585b8be8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Sun, 17 Sep 2017 20:17:41 -0400 Subject: [PATCH 599/639] Added licenses_configured state that checks the the VSAN licenses are added to the vCenter and assigned to the cluster --- salt/states/esxcluster.py | 192 +++++++++++++++++++++++++++++++++++++- 1 file changed, 191 insertions(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 6db8072544..d26191504d 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -43,12 +43,14 @@ Module was developed against. from __future__ import absolute_import import logging import traceback +import sys # Import Salt Libs import salt.exceptions from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff -from salt.config.schemas.esxcluster import ESXClusterConfigSchema +from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ + LicenseSchema from salt.utils import dictupdate # External libraries @@ -331,3 +333,191 @@ def vsan_datastore_configured(name, datastore_name): 'result': False, 'comment': exc.strerror}) return ret + + +def licenses_configured(name, licenses=None): + ''' + Configures licenses on the cluster entity + + Checks if each license exists on the server: + - if it doesn't, it creates it + Check if license is assigned to the cluster: + - if it's not assigned to the cluster: + - assign it to the cluster if there is space + - error if there's no space + - if it's assigned to the cluster nothing needs to be done + ''' + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': 'Default'} + if not licenses: + raise salt.exceptions.ArgumentValueError('No licenses provided') + cluster_name, datacenter_name = \ + __salt__['esxcluster.get_details']()['cluster'], \ + __salt__['esxcluster.get_details']()['datacenter'] + display_name = '{0}/{1}'.format(datacenter_name, cluster_name) + log.info('Running licenses configured for \'{0}\''.format(display_name)) + log.trace('licenses = {0}'.format(licenses)) + entity = {'type': 'cluster', + 'datacenter': datacenter_name, + 'cluster': cluster_name} + log.trace('entity = {0}'.format(entity)) + + comments = [] + changes = {} + old_licenses = [] + new_licenses = [] + has_errors = False + needs_changes = False + try: + # Validate licenses + log.debug('Validating licenses') + schema = LicenseSchema.serialize() + try: + jsonschema.validate({'licenses': licenses}, schema) + except jsonschema.exceptions.ValidationError as exc: + raise salt.exceptions.InvalidLicenseError(exc) + + si = __salt__['vsphere.get_service_instance_via_proxy']() + # Retrieve licenses + existing_licenses = __salt__['vsphere.list_licenses']( + service_instance=si) + remaining_licenses = existing_licenses[:] + # Cycle through licenses + for license_name, license in licenses.items(): + # Check if license already exists + filtered_licenses = [l for l in existing_licenses + if l['key'] == license] + # TODO Update license description - not of interest right now + if not filtered_licenses: + # License doesn't exist - add and assign to cluster + needs_changes = True + if __opts__['test']: + # If it doesn't exist it clearly needs to be assigned as + # well so we can stop the check here + comments.append('State {0} will add license \'{1}\', ' + 'and assign it to cluster \'{2}\'.' + ''.format(name, license_name, display_name)) + log.info(comments[-1]) + continue + else: + try: + existing_license = __salt__['vsphere.add_license']( + key=license, description=license_name, + service_instance=si) + except salt.exceptions.VMwareApiError as ex: + comments.append(ex.err_msg) + log.error(comments[-1]) + has_errors = True + continue + comments.append('Added license \'{0}\'.' + ''.format(license_name)) + log.info(comments[-1]) + else: + # License exists let's check if it's assigned to the cluster + comments.append('License \'{0}\' already exists. ' + 'Nothing to be done.'.format(license_name)) + log.info(comments[-1]) + existing_license = filtered_licenses[0] + + log.debug('Checking licensed entities...'.format(license_name)) + assigned_licenses = __salt__['vsphere.list_assigned_licenses']( + entity=entity, + entity_display_name=display_name, + service_instance=si) + + # Checking if any of the licenses already assigned have the same + # name as the new license; the already assigned license would be + # replaced by the new license + # + # Licenses with different names but matching features would be + # replaced as well, but searching for those would be very complex + # + # the name check if good enough for now + already_assigned_license = assigned_licenses[0] if \ + assigned_licenses else None + + if already_assigned_license and \ + already_assigned_license['key'] == license: + + # License is already assigned to entity + comments.append('License \'{0}\' already assigned to ' + 'cluster \'{1}\'. Nothing to be done.' + ''.format(license_name, display_name)) + log.info(comments[-1]) + continue + + needs_changes = True + # License needs to be assigned to entity + + if existing_license['capacity'] <= existing_license['used']: + # License is already fully used + comments.append('Cannot assign license \'{0}\' to cluster ' + '\'{1}\'. No free capacity available.' + ''.format(license_name, display_name)) + log.error(comments[-1]) + has_errors = True + continue + + # Assign license + if __opts__['test']: + comments.append('State {0} will assign license \'{1}\' ' + 'to cluster \'{2}\'.'.format( + name, license_name, display_name)) + log.info(comments[-1]) + else: + try: + __salt__['vsphere.assign_license']( + license_key=license, + license_name=license_name, + entity=entity, + entity_display_name=display_name, + service_instance=si) + except salt.exceptions.VMwareApiError as ex: + comments.append(ex.err_msg) + log.error(comments[-1]) + has_errors = True + continue + comments.append('Assigned license \'{0}\' to cluster \'{1}\'.' + ''.format(license_name, display_name)) + log.info(comments[-1]) + # Note: Because the already_assigned_license was retrieved + # from the assignment license manager it doesn't have a used + # value - that's a limitation from VMware. The license would + # need to be retrieved again from the license manager to get + # the value + + # Hide license keys + assigned_license = __salt__['vsphere.list_assigned_licenses']( + entity=entity, + entity_display_name=display_name, + service_instance=si)[0] + assigned_license['key'] = '' + if already_assigned_license: + already_assigned_license['key'] = '' + if already_assigned_license and \ + already_assigned_license['capacity'] == sys.maxsize: + + already_assigned_license['capacity'] = 'Unlimited' + + changes[license_name] = {'new': assigned_license, + 'old': already_assigned_license} + continue + __salt__['vsphere.disconnect'](si) + + ret.update({'result': True if (not needs_changes) else None if \ + __opts__['test'] else False if has_errors else \ + True, + 'comment': '\n'.join(comments), + 'changes': changes if not __opts__['test'] else {}}) + + return ret + except salt.exceptions.CommandExecutionError as exc: + log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) + if si: + __salt__['vsphere.disconnect'](si) + ret.update({ + 'result': False, + 'comment': exc.strerror}) + return ret From 9f0d639358db98c31c382ccf545f63912b6abb34 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 08:57:04 -0400 Subject: [PATCH 600/639] Skip ext vsan libraries pylint checks --- salt/ext/vsan/vsanapiutils.py | 1 + salt/ext/vsan/vsanmgmtObjects.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/ext/vsan/vsanapiutils.py b/salt/ext/vsan/vsanapiutils.py index fce4945e23..bb7ef05556 100644 --- a/salt/ext/vsan/vsanapiutils.py +++ b/salt/ext/vsan/vsanapiutils.py @@ -7,6 +7,7 @@ Copyright 2016 VMware, Inc. All rights reserved. This module defines basic helper functions used in the sampe codes """ +# pylint: skip-file __author__ = 'VMware, Inc' from pyVmomi import vim, vmodl, SoapStubAdapter diff --git a/salt/ext/vsan/vsanmgmtObjects.py b/salt/ext/vsan/vsanmgmtObjects.py index ebad265adb..15165afbaf 100644 --- a/salt/ext/vsan/vsanmgmtObjects.py +++ b/salt/ext/vsan/vsanmgmtObjects.py @@ -1,3 +1,4 @@ +# pylint: skip-file from pyVmomi.VmomiSupport import CreateDataType, CreateManagedType, CreateEnumType, AddVersion, AddVersionParent, F_LINK, F_LINKABLE, F_OPTIONAL CreateManagedType('vim.cluster.VsanPerformanceManager', 'VsanPerformanceManager', 'vmodl.ManagedObject', 'vim.version.version9', [], [('setStatsObjectPolicy', 'VsanPerfSetStatsObjectPolicy', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('deleteStatsObject', 'VsanPerfDeleteStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'boolean', 'boolean'), 'System.Read', None), ('createStatsObjectTask', 'VsanPerfCreateStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('deleteStatsObjectTask', 'VsanPerfDeleteStatsObjectTask', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.Task', 'vim.Task'), 'System.Read', None), ('queryClusterHealth', 'VsanPerfQueryClusterHealth', 'vim.version.version9', (('cluster', 'vim.ClusterComputeResource', 'vim.version.version9', 0, None), ), (0, 'vmodl.DynamicData[]', 'vmodl.DynamicData[]'), 'System.Read', None), ('queryStatsObjectInformation', 'VsanPerfQueryStatsObjectInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanObjectInformation', 'vim.cluster.VsanObjectInformation'), 'System.Read', None), ('queryNodeInformation', 'VsanPerfQueryNodeInformation', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfNodeInformation[]', 'vim.cluster.VsanPerfNodeInformation[]'), 'System.Read', None), ('queryVsanPerf', 'VsanPerfQueryPerf', 'vim.version.version9', (('querySpecs', 'vim.cluster.VsanPerfQuerySpec[]', 'vim.version.version9', 0, None), ('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'vim.cluster.VsanPerfEntityMetricCSV[]', 'vim.cluster.VsanPerfEntityMetricCSV[]'), 'System.Read', None), ('getSupportedEntityTypes', 'VsanPerfGetSupportedEntityTypes', 'vim.version.version9', tuple(), (0 | F_OPTIONAL, 'vim.cluster.VsanPerfEntityType[]', 'vim.cluster.VsanPerfEntityType[]'), 'System.Read', None), ('createStatsObject', 'VsanPerfCreateStatsObject', 'vim.version.version9', (('cluster', 'vim.ComputeResource', 'vim.version.version9', 0 | F_OPTIONAL, None), ('profile', 'vim.vm.ProfileSpec', 'vim.version.version9', 0 | F_OPTIONAL, None), ), (0, 'string', 'string'), 'System.Read', None), ]) @@ -139,4 +140,4 @@ CreateEnumType('vim.cluster.VsanPerfThresholdDirectionType', 'VsanPerfThresholdD CreateEnumType('vim.cluster.VsanPerfStatsType', 'VsanPerfStatsType', 'vim.version.version9', ['absolute' ,'delta' ,'rate' ,]) CreateEnumType('vim.vsan.host.DiskMappingCreationType', 'VimVsanHostDiskMappingCreationType', 'vim.version.version10', ['hybrid' ,'allFlash' ,]) CreateEnumType('vim.cluster.VsanClusterHealthActionIdEnum', 'VsanClusterHealthActionIdEnum', 'vim.version.version9', ['RepairClusterObjectsAction' ,'UploadHclDb' ,'UpdateHclDbFromInternet' ,'EnableHealthService' ,'DiskBalance' ,'StopDiskBalance' ,'RemediateDedup' ,'UpgradeVsanDiskFormat' ,]) -CreateEnumType('vim.cluster.VsanDiskGroupCreationType', 'VimClusterVsanDiskGroupCreationType', 'vim.version.version10', ['allflash' ,'hybrid' ,]) \ No newline at end of file +CreateEnumType('vim.cluster.VsanDiskGroupCreationType', 'VimClusterVsanDiskGroupCreationType', 'vim.version.version10', ['allflash' ,'hybrid' ,]) From f7f4f34c3ad6b20d2a4a25d717663a6b70afa7b5 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 09:21:36 -0400 Subject: [PATCH 601/639] Moved ignore_unset_values parameter as a RecursiveDictDiffer attribute --- salt/utils/dictdiffer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index 7a7fb76a18..ac7b9b0e51 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -160,6 +160,8 @@ class RecursiveDictDiffer(DictDiffer): self._diffs = \ self._get_diffs(self.current_dict, self.past_dict, ignore_missing_keys) + # Ignores unet values when assessing the changes + self.ignore_unset_values = True @classmethod def _get_diffs(cls, dict1, dict2, ignore_missing_keys): From 4f4db431fa953cc2d06892b162c1b574d52d9580 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 09:23:11 -0400 Subject: [PATCH 602/639] Fixed tests for the ignore_unset_values change --- tests/unit/utils/test_dictdiffer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py index fb1acfee53..f2eb73d485 100644 --- a/tests/unit/utils/test_dictdiffer.py +++ b/tests/unit/utils/test_dictdiffer.py @@ -43,11 +43,13 @@ class RecursiveDictDifferTestCase(TestCase): self.assertEqual(self.recursive_diff.removed(), ['a.f']) def test_changed_with_ignore_unset_values(self): - self.assertEqual(self.recursive_diff.changed(ignore_unset_values=True), + self.recursive_diff.ignore_unset_values = True + self.assertEqual(self.recursive_diff.changed(), ['a.c', 'a.e']) def test_changed_without_ignore_unset_values(self): - self.assertEqual(self.recursive_diff.changed(ignore_unset_values=False), + self.recursive_diff.ignore_unset_values = False + self.assertEqual(self.recursive_diff.changed(), ['a.c', 'a.e', 'a.g', 'a.f', 'h', 'i']) def test_unchanged(self): From 8cdaf01642c0133407cad47ceb9b76045053cdb8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 09:26:50 -0400 Subject: [PATCH 603/639] pylint --- salt/states/esxcluster.py | 8 ++++---- salt/utils/dictdiffer.py | 26 ++++++++++++------------- salt/utils/vmware.py | 12 ++++++------ tests/unit/utils/vmware/test_storage.py | 11 +++++------ 4 files changed, 28 insertions(+), 29 deletions(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index d26191504d..3209611f39 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -181,8 +181,8 @@ def cluster_configured(name, cluster_config): ret.update({'result': None, 'comment': '\n'.join(comments)}) return ret - log.debug ('Creating cluster \'{0}\' in datacenter \'{1}\'. ' - ''.format(cluster_name, datacenter_name)) + log.debug('Creating cluster \'{0}\' in datacenter \'{1}\'. ' + ''.format(cluster_name, datacenter_name)) __salt__['vsphere.create_cluster'](cluster_dict, datacenter_name, cluster_name, @@ -258,7 +258,7 @@ def cluster_configured(name, cluster_config): 'comment': '\n'.join(comments), 'changes': changes}) return ret - except excs.CommandExecutionError as exc: + except CommandExecutionError as exc: log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) if si: __salt__['vsphere.disconnect'](si) @@ -320,7 +320,7 @@ def vsan_datastore_configured(name, datastore_name): log.info(comments[-1]) __salt__['vsphere.disconnect'](si) - ret.update({'result': True if (not changes_required) else None if \ + ret.update({'result': True if (not changes_required) else None if __opts__['test'] else True, 'comment': '\n'.join(comments), 'changes': changes}) diff --git a/salt/utils/dictdiffer.py b/salt/utils/dictdiffer.py index ac7b9b0e51..b007742083 100644 --- a/salt/utils/dictdiffer.py +++ b/salt/utils/dictdiffer.py @@ -283,16 +283,16 @@ class RecursiveDictDiffer(DictDiffer): keys.extend(_removed(diffs[key], prefix='{0}{1}.'.format(prefix, key))) elif diffs[key]['new'] == self.NONE_VALUE: - keys.append('{0}{1}'.format(prefix, key)) + keys.append('{0}{1}'.format(prefix, key)) elif isinstance(diffs[key]['new'], dict): - keys.extend( - _removed(diffs[key]['new'], - prefix='{0}{1}.'.format(prefix, key))) + keys.extend( + _removed(diffs[key]['new'], + prefix='{0}{1}.'.format(prefix, key))) return keys return _removed(self._diffs, prefix='') - def changed(self, ignore_unset_values=True): + def changed(self): ''' Returns all keys that have been changed. @@ -309,7 +309,7 @@ class RecursiveDictDiffer(DictDiffer): keys.extend(_changed(diffs[key], prefix='{0}{1}.'.format(prefix, key))) continue - if ignore_unset_values: + if self.ignore_unset_values: if 'old' in diffs[key] and 'new' in diffs[key] and \ diffs[key]['old'] != self.NONE_VALUE and \ diffs[key]['new'] != self.NONE_VALUE: @@ -320,9 +320,9 @@ class RecursiveDictDiffer(DictDiffer): else: keys.append('{0}{1}'.format(prefix, key)) elif isinstance(diffs[key], dict): - keys.extend( - _changed(diffs[key], - prefix='{0}{1}.'.format(prefix, key))) + keys.extend( + _changed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) else: if 'old' in diffs[key] and 'new' in diffs[key]: if isinstance(diffs[key]['new'], dict): @@ -332,9 +332,9 @@ class RecursiveDictDiffer(DictDiffer): else: keys.append('{0}{1}'.format(prefix, key)) elif isinstance(diffs[key], dict): - keys.extend( - _changed(diffs[key], - prefix='{0}{1}.'.format(prefix, key))) + keys.extend( + _changed(diffs[key], + prefix='{0}{1}.'.format(prefix, key))) return keys @@ -350,7 +350,7 @@ class RecursiveDictDiffer(DictDiffer): def _unchanged(current_dict, diffs, prefix): keys = [] for key in current_dict.keys(): - if not key in diffs: + if key not in diffs: keys.append('{0}{1}'.format(prefix, key)) elif isinstance(current_dict[key], dict): if 'new' in diffs[key]: diff --git a/salt/utils/vmware.py b/salt/utils/vmware.py index fc09c4478d..b239b269b0 100644 --- a/salt/utils/vmware.py +++ b/salt/utils/vmware.py @@ -1109,8 +1109,8 @@ def add_license(service_instance, key, description, license_manager=None): if not license_manager: license_manager = get_license_manager(service_instance) label = vim.KeyValue() - label.key='VpxClientLicenseLabel' - label.value=description + label.key = 'VpxClientLicenseLabel' + label.value = description log.debug('Adding license \'{}\''.format(description)) try: license = license_manager.AddLicense(key, [label]) @@ -1203,7 +1203,7 @@ def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, raise salt.exceptions.VMwareObjectRetrievalError( 'Unexpected return. Expect only a single assignment') - if check_name == True: + if check_name: if entity_name != assignments[0].entityDisplayName: log.trace('Getting license info for wrong vcenter: ' '{0} != {1}'.format(entity_name, @@ -1545,8 +1545,8 @@ def get_datastores(service_instance, reference, datastore_names=None, mount_infos = props.get('fileSystemVolumeInfo.mountInfo', []) disk_datastores = [] # Non vmfs volumes aren't backed by a disk - for vol in [i.volume for i in mount_infos if \ - isinstance(i.volume, vim.HostVmfsVolume)]: + for vol in [i.volume for i in mount_infos if + isinstance(i.volume, vim.HostVmfsVolume)]: if not [e for e in vol.extent if e.diskName in backing_disk_ids]: # Skip volume if it doesn't contain an extent with a @@ -1598,7 +1598,7 @@ def get_datastores(service_instance, reference, datastore_names=None, # Traversal of root folder (doesn't support multiple levels of Folders) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='childEntity', - selectSet = [ + selectSet=[ vmodl.query.PropertyCollector.TraversalSpec( path='datastore', skip=False, diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 0319bb1411..0b31abe3b2 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -11,8 +11,7 @@ import logging # Import Salt testing libraries from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ - PropertyMock +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call from salt.exceptions import VMwareObjectRetrievalError, VMwareApiError, \ ArgumentValueError, VMwareRuntimeError @@ -28,6 +27,7 @@ except ImportError: # Get Logging Started log = logging.getLogger(__name__) + @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(not HAS_PYVMOMI, 'The \'pyvmomi\' library is missing') class GetStorageSystemTestCase(TestCase): @@ -179,7 +179,6 @@ class GetDatastoresTestCase(TestCase): self.mock_get_storage_system.assert_called_once_with( self.mock_si, self.mock_reference, 'fake_host') - def test_get_mount_info_call(self): mock_get_properties_of_managed_object = MagicMock() with patch('salt.utils.vmware.get_properties_of_managed_object', @@ -200,7 +199,7 @@ class GetDatastoresTestCase(TestCase): def test_host_traversal_spec(self): # Reference is of type vim.HostSystem - mock_traversal_spec_init = MagicMock() + mock_traversal_spec_init = MagicMock() with patch( 'salt.utils.vmware.vmodl.query.PropertyCollector.TraversalSpec', mock_traversal_spec_init): @@ -330,7 +329,7 @@ class GetDatastoresTestCase(TestCase): res = salt.utils.vmware.get_datastores( self.mock_si, self.mock_reference, datastore_names=['fake_ds1'], - backing_disk_ids= ['fake_disk3']) + backing_disk_ids=['fake_disk3']) self.assertEqual(res, [self.mock_entries[0]['object'], self.mock_entries[2]['object']]) @@ -393,6 +392,6 @@ class RenameDatastoreTestCase(TestCase): def test_rename_datastore(self): ret = salt.utils.vmware.rename_datastore(self.mock_ds_ref, - 'fake_new_name') + 'fake_new_name') self.mock_ds_ref.RenameDatastore.assert_called_once_with( 'fake_new_name') From f54ddae6bba689bca8a07a6cd49796436c1336d8 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 20 Sep 2017 09:57:57 -0600 Subject: [PATCH 604/639] fix do test where space was seperated by a newline --- tests/unit/utils/test_schema.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/unit/utils/test_schema.py b/tests/unit/utils/test_schema.py index 689bd8a1b8..108afcf458 100644 --- a/tests/unit/utils/test_schema.py +++ b/tests/unit/utils/test_schema.py @@ -198,9 +198,8 @@ class ConfigTestCase(TestCase): }, 'ssh_key_names': { 'type': 'string', - 'description': 'The names of an SSH key being managed on Digital ' - 'Ocean account which will be used to authenticate ' - 'on the deployed VMs', + 'description': 'The names of an SSH key being managed on DigitalOcean ' + 'account which will be used to authenticate on the deployed VMs', 'title': 'SSH Key Names' } }, From a41b5072daa0a15ac87ae5cff91577df504e57d3 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 13:40:36 -0400 Subject: [PATCH 605/639] more pylint --- salt/config/schemas/esxcluster.py | 6 +++--- salt/modules/vsphere.py | 4 ++-- salt/states/esxcluster.py | 17 ++++++++--------- salt/utils/listdiffer.py | 2 +- salt/utils/vsan.py | 1 - tests/unit/utils/test_dictdiffer.py | 4 ++-- tests/unit/utils/test_vsan.py | 8 ++++---- tests/unit/utils/vmware/test_license.py | 4 ++-- tests/unit/utils/vmware/test_storage.py | 3 +-- 9 files changed, 23 insertions(+), 26 deletions(-) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index bb69547c8b..a9a8e53b2f 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -28,8 +28,8 @@ class OptionValueItem(ComplexSchemaItem): '''Sechma item of the OptionValue''' title = 'OptionValue' - key=StringItem(title='Key', required=True) - value=AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()]) + key = StringItem(title='Key', required=True) + value = AnyOfItem(items=[StringItem(), BooleanItem(), IntegerItem()]) class AdmissionControlPolicyItem(ComplexSchemaItem): @@ -130,7 +130,7 @@ class DRSConfigItem(ComplexSchemaItem): '1 (least aggressive) - 5 (most aggressive)', minimum=1, maximum=5) - default_vm_behavior= StringItem( + default_vm_behavior = StringItem( title='Default VM DRS Behavior', description='Specifies the default VM DRS behavior', enum=['fullyAutomated', 'partiallyAutomated', 'manual']) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index dce2df9831..fccea9ebb1 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -3848,7 +3848,7 @@ def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, das_config.defaultVmSettings.isolationResponse = \ vm_set_dict['isolation_response'] if 'restart_priority' in vm_set_dict: - das_config.defaultVmSettings.restartPriority= \ + das_config.defaultVmSettings.restartPriority = \ vm_set_dict['restart_priority'] if 'hb_ds_candidate_policy' in ha_dict: das_config.hBDatastoreCandidatePolicy = \ @@ -3880,7 +3880,7 @@ def _apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec=None, cluster_spec.vmSwapPlacement = cluster_dict['vm_swap_placement'] if cluster_dict.get('vsan'): vsan_dict = cluster_dict['vsan'] - if not vsan_61: # VSAN is 6.2 and above + if not vsan_61: # VSAN is 6.2 and above if 'enabled' in vsan_dict: if not vsan_spec.vsanClusterConfig: vsan_spec.vsanClusterConfig = \ diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 3209611f39..0417ac3281 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -46,7 +46,7 @@ import traceback import sys # Import Salt Libs -import salt.exceptions +from salt.exceptions CommandExecutionError from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ @@ -157,7 +157,7 @@ def cluster_configured(name, cluster_config): changes_required = False try: - log.debug('Validating cluster_configured state input') + log.trace('Validating cluster_configured state input') schema = ESXClusterConfigSchema.serialize() log.trace('schema = {0}'.format(schema)) try: @@ -181,7 +181,7 @@ def cluster_configured(name, cluster_config): ret.update({'result': None, 'comment': '\n'.join(comments)}) return ret - log.debug('Creating cluster \'{0}\' in datacenter \'{1}\'. ' + log.trace('Creating cluster \'{0}\' in datacenter \'{1}\'. ' ''.format(cluster_name, datacenter_name)) __salt__['vsphere.create_cluster'](cluster_dict, datacenter_name, @@ -307,7 +307,7 @@ def vsan_datastore_configured(name, datastore_name): '\'{1}\'.'.format(name, datastore_name)) log.info(comments[-1]) else: - log.debug('Renaming vSAN datastore \'{0}\' to \'{1}\'' + log.trace('Renaming vSAN datastore \'{0}\' to \'{1}\'' ''.format(vsan_ds['name'], datastore_name)) __salt__['vsphere.rename_datastore']( datastore_name=vsan_ds['name'], @@ -372,7 +372,7 @@ def licenses_configured(name, licenses=None): needs_changes = False try: # Validate licenses - log.debug('Validating licenses') + log.trace('Validating licenses') schema = LicenseSchema.serialize() try: jsonschema.validate({'licenses': licenses}, schema) @@ -421,7 +421,7 @@ def licenses_configured(name, licenses=None): log.info(comments[-1]) existing_license = filtered_licenses[0] - log.debug('Checking licensed entities...'.format(license_name)) + log.trace('Checking licensed entities...') assigned_licenses = __salt__['vsphere.list_assigned_licenses']( entity=entity, entity_display_name=display_name, @@ -506,9 +506,8 @@ def licenses_configured(name, licenses=None): continue __salt__['vsphere.disconnect'](si) - ret.update({'result': True if (not needs_changes) else None if \ - __opts__['test'] else False if has_errors else \ - True, + ret.update({'result': True if (not needs_changes) else None if + __opts__['test'] else False if has_errors else True, 'comment': '\n'.join(comments), 'changes': changes if not __opts__['test'] else {}}) diff --git a/salt/utils/listdiffer.py b/salt/utils/listdiffer.py index 7f549cce1d..d1cf3a8bd9 100644 --- a/salt/utils/listdiffer.py +++ b/salt/utils/listdiffer.py @@ -16,7 +16,7 @@ The following can be retrieved: Note: All dictionaries keys are expected to be strings ''' - +from __future__ import absolute_import from salt.utils.dictdiffer import recursive_diff diff --git a/salt/utils/vsan.py b/salt/utils/vsan.py index 82f4e2563b..8ad713cd3e 100644 --- a/salt/utils/vsan.py +++ b/salt/utils/vsan.py @@ -173,7 +173,6 @@ def reconfigure_cluster_vsan(cluster_ref, cluster_vsan_spec): ''.format(cluster_name, cluster_vsan_spec)) si = salt.utils.vmware.get_service_instance_from_managed_object( cluster_ref) - print salt.utils.vsan.get_vsan_cluster_config_system vsan_cl_conf_sys = salt.utils.vsan.get_vsan_cluster_config_system(si) try: task = vsan_cl_conf_sys.VsanClusterReconfig(cluster_ref, diff --git a/tests/unit/utils/test_dictdiffer.py b/tests/unit/utils/test_dictdiffer.py index f2eb73d485..2c6243bbd8 100644 --- a/tests/unit/utils/test_dictdiffer.py +++ b/tests/unit/utils/test_dictdiffer.py @@ -2,7 +2,6 @@ # Import python libs from __future__ import absolute_import -import copy # Import Salt Testing libs from tests.support.unit import TestCase @@ -43,7 +42,7 @@ class RecursiveDictDifferTestCase(TestCase): self.assertEqual(self.recursive_diff.removed(), ['a.f']) def test_changed_with_ignore_unset_values(self): - self.recursive_diff.ignore_unset_values = True + self.recursive_diff.ignore_unset_values = True self.assertEqual(self.recursive_diff.changed(), ['a.c', 'a.e']) @@ -84,6 +83,7 @@ class RecursiveDictDifferTestCase(TestCase): {'a': {'c': 2, 'e': 'old_value', 'f': 'old_key', 'g': NONE}, 'h': NONE, 'i': NONE}) + def test_changes_str(self): self.assertEqual(self.recursive_diff.changes_str, 'a:\n' diff --git a/tests/unit/utils/test_vsan.py b/tests/unit/utils/test_vsan.py index 1c84198bb0..9d76d6dcae 100644 --- a/tests/unit/utils/test_vsan.py +++ b/tests/unit/utils/test_vsan.py @@ -259,7 +259,7 @@ class ReconfigureClusterVsanTestCase(TestCase): self.mock_cl_ref) def test_get_service_instance_call(self): - get_service_instance_from_managed_object_mock= MagicMock() + get_service_instance_from_managed_object_mock = MagicMock() with patch( 'salt.utils.vmware.get_service_instance_from_managed_object', get_service_instance_from_managed_object_mock): @@ -284,7 +284,7 @@ class ReconfigureClusterVsanTestCase(TestCase): exc = vim.fault.NoPermission() exc.privilegeId = 'Fake privilege' with patch('salt.utils.vsan.get_vsan_cluster_config_system', - MagicMock(return_value = MagicMock( + MagicMock(return_value=MagicMock( VsanClusterReconfig=MagicMock(side_effect=exc)))): with self.assertRaises(VMwareApiError) as excinfo: vsan.reconfigure_cluster_vsan(self.mock_cl_ref, @@ -297,7 +297,7 @@ class ReconfigureClusterVsanTestCase(TestCase): exc = vim.fault.VimFault() exc.msg = 'VimFault msg' with patch('salt.utils.vsan.get_vsan_cluster_config_system', - MagicMock(return_value = MagicMock( + MagicMock(return_value=MagicMock( VsanClusterReconfig=MagicMock(side_effect=exc)))): with self.assertRaises(VMwareApiError) as excinfo: vsan.reconfigure_cluster_vsan(self.mock_cl_ref, @@ -308,7 +308,7 @@ class ReconfigureClusterVsanTestCase(TestCase): exc = vmodl.RuntimeFault() exc.msg = 'VimRuntime msg' with patch('salt.utils.vsan.get_vsan_cluster_config_system', - MagicMock(return_value = MagicMock( + MagicMock(return_value=MagicMock( VsanClusterReconfig=MagicMock(side_effect=exc)))): with self.assertRaises(VMwareRuntimeError) as excinfo: vsan.reconfigure_cluster_vsan(self.mock_cl_ref, diff --git a/tests/unit/utils/vmware/test_license.py b/tests/unit/utils/vmware/test_license.py index 5ca92a705a..471cb0b4c1 100644 --- a/tests/unit/utils/vmware/test_license.py +++ b/tests/unit/utils/vmware/test_license.py @@ -11,7 +11,7 @@ import logging # Import Salt testing libraries from tests.support.unit import TestCase, skipIf -from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, call, \ +from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, \ PropertyMock @@ -550,7 +550,7 @@ class AssignLicenseTestCase(TestCase): def test_instance_uuid(self): mock_instance_uuid_prop = PropertyMock() type(self.mock_si.content.about).instanceUuid = mock_instance_uuid_prop - self.mock_lic_assign_mgr.UpdateAssignedLicense= MagicMock( + self.mock_lic_assign_mgr.UpdateAssignedLicense = MagicMock( return_value=[MagicMock(entityDisplayName='fake_vcenter')]) salt.utils.vmware.assign_license(self.mock_si, self.mock_lic_key, diff --git a/tests/unit/utils/vmware/test_storage.py b/tests/unit/utils/vmware/test_storage.py index 0b31abe3b2..43434225ae 100644 --- a/tests/unit/utils/vmware/test_storage.py +++ b/tests/unit/utils/vmware/test_storage.py @@ -391,7 +391,6 @@ class RenameDatastoreTestCase(TestCase): self.assertEqual(excinfo.exception.message, 'runtime_fault') def test_rename_datastore(self): - ret = salt.utils.vmware.rename_datastore(self.mock_ds_ref, - 'fake_new_name') + salt.utils.vmware.rename_datastore(self.mock_ds_ref, 'fake_new_name') self.mock_ds_ref.RenameDatastore.assert_called_once_with( 'fake_new_name') From aa26f0bb837a993c73880ca8ea8b069f0527479b Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 13:47:24 -0400 Subject: [PATCH 606/639] Fixed list_differ because of ignore_unset_values change in dictdiffer --- salt/utils/listdiffer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/utils/listdiffer.py b/salt/utils/listdiffer.py index d1cf3a8bd9..d0451766c9 100644 --- a/salt/utils/listdiffer.py +++ b/salt/utils/listdiffer.py @@ -224,21 +224,25 @@ class ListDictDiffer(object): changed = [] if selection == 'all': for recursive_item in self._get_recursive_difference(type='all'): + # We want the unset values as well + recursive_item.ignore_unset_values = False key_val = str(recursive_item.past_dict[self._key]) \ if self._key in recursive_item.past_dict \ else str(recursive_item.current_dict[self._key]) - for change in recursive_item.changed(ignore_unset_values=False): + for change in recursive_item.changed(): if change != self._key: changed.append('.'.join([self._key, key_val, change])) return changed elif selection == 'intersect': + # We want the unset values as well for recursive_item in self._get_recursive_difference(type='intersect'): + recursive_item.ignore_unset_values = False key_val = str(recursive_item.past_dict[self._key]) \ if self._key in recursive_item.past_dict \ else str(recursive_item.current_dict[self._key]) - for change in recursive_item.changed(ignore_unset_values=False): + for change in recursive_item.changed(): if change != self._key: changed.append('.'.join([self._key, key_val, change])) return changed From 819e015d333dd458eb451a36fccc542b61e16e2e Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 17:02:17 -0400 Subject: [PATCH 607/639] Added InvalidEntityError in salt.exceptions --- salt/exceptions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/exceptions.py b/salt/exceptions.py index 6283ddf729..2ddbae3682 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -403,6 +403,12 @@ class ArgumentValueError(CommandExecutionError): ''' +class InvalidEntityError(CommandExecutionError): + ''' + Used when an entity fails validation + ''' + + # VMware related exceptions class VMwareSaltError(CommandExecutionError): ''' From 23f58c0ff36c28e0c981e41c894cbdaf016a2c72 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 17:07:17 -0400 Subject: [PATCH 608/639] Even more pylint --- salt/config/schemas/esxcluster.py | 6 +++--- salt/modules/vsphere.py | 17 +++++++++-------- salt/states/esxcluster.py | 4 ++-- tests/unit/utils/test_listdiffer.py | 2 -- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index a9a8e53b2f..c9fe62fd84 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -164,12 +164,12 @@ class ESXClusterEntitySchema(Schema): datacenter = StringItem(title='Datacenter', description='Specifies the cluster datacenter', required=True, - pattern='\w+') + pattern=r'\w+') cluster = StringItem(title='Cluster', description='Specifies the cluster name', required=True, - pattern='\w+') + pattern=r'\w+') class LicenseSchema(Schema): @@ -187,7 +187,7 @@ class LicenseSchema(Schema): additional_properties=StringItem( title='License Key', description='Specifies the license key', - pattern='^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$')) + pattern=r'^(\w{5}-\w{5}-\w{5}-\w{5}-\w{5})$')) class EsxclusterProxySchema(Schema): diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index fccea9ebb1..ca3bc13b77 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -178,7 +178,8 @@ import salt.utils.path import salt.utils.vmware import salt.utils.vsan from salt.exceptions import CommandExecutionError, VMwareSaltError, \ - ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError + ArgumentValueError, InvalidConfigError, VMwareObjectRetrievalError, \ + VMwareApiError, InvalidEntityError from salt.utils.decorators import depends, ignores_kwargs from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ ESXClusterEntitySchema @@ -3988,7 +3989,7 @@ def create_cluster(cluster_dict, datacenter=None, cluster=None, if cluster_dict.get('vsan') and not \ salt.utils.vsan.vsan_supported(service_instance): - raise excs.VMwareApiError('VSAN operations are not supported') + raise VMwareApiError('VSAN operations are not supported') si = service_instance cluster_spec = vim.ClusterConfigSpecEx() vsan_spec = None @@ -3999,11 +4000,11 @@ def create_cluster(cluster_dict, datacenter=None, cluster=None, # is not supported before 60u2 vcenter vcenter_info = salt.utils.vmware.get_service_info(si) if float(vcenter_info.apiVersion) >= 6.0 and \ - int(vcenter_info.build) >= 3634794: # 60u2 + int(vcenter_info.build) >= 3634794: # 60u2 vsan_spec = vim.vsan.ReconfigSpec(modify=True) vsan_61 = False # We need to keep HA disabled and enable it afterwards - if cluster_dict.get('ha',{}).get('enabled'): + if cluster_dict.get('ha', {}).get('enabled'): enable_ha = True ha_config = cluster_dict['ha'] del cluster_dict['ha'] @@ -4101,7 +4102,7 @@ def update_cluster(cluster_dict, datacenter=None, cluster=None, # is not supported before 60u2 vcenter vcenter_info = salt.utils.vmware.get_service_info(service_instance) if float(vcenter_info.apiVersion) >= 6.0 and \ - int(vcenter_info.build) >= 3634794: # 60u2 + int(vcenter_info.build) >= 3634794: # 60u2 vsan_61 = False vsan_info = salt.utils.vsan.get_cluster_vsan_info(cluster_ref) vsan_spec = vim.vsan.ReconfigSpec(modify=True) @@ -4213,8 +4214,8 @@ def list_datastores_via_proxy(datastore_names=None, backing_disk_ids=None, 'free_space': ds.summary.freeSpace, 'capacity': ds.summary.capacity} backing_disk_ids = [] - for vol in [i.volume for i in mount_infos if \ - i.volume.name == ds.name and \ + for vol in [i.volume for i in mount_infos if + i.volume.name == ds.name and isinstance(i.volume, vim.HostVmfsVolume)]: backing_disk_ids.extend([e.diskName for e in vol.extent]) @@ -4375,7 +4376,7 @@ def _validate_entity(entity): try: jsonschema.validate(entity, schema) except jsonschema.exceptions.ValidationError as exc: - raise excs.InvalidEntityError(exc) + raise InvalidEntityError(exc) @depends(HAS_PYVMOMI) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 0417ac3281..ed4cd1af6b 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -46,7 +46,7 @@ import traceback import sys # Import Salt Libs -from salt.exceptions CommandExecutionError +from salt.exceptions import CommandExecutionError from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ @@ -107,7 +107,7 @@ def cluster_configured(name, cluster_config): Complex datastructure following the ESXClusterConfigSchema. Valid example is: -.. code-block::yaml + .. code-block::yaml drs: default_vm_behavior: fullyAutomated diff --git a/tests/unit/utils/test_listdiffer.py b/tests/unit/utils/test_listdiffer.py index cd5e626f4e..ae8288c81c 100644 --- a/tests/unit/utils/test_listdiffer.py +++ b/tests/unit/utils/test_listdiffer.py @@ -2,7 +2,6 @@ # Import python libs from __future__ import absolute_import -import copy # Import Salt Testing libs from tests.support.unit import TestCase @@ -80,7 +79,6 @@ class ListDictDifferTestCase(TestCase): '\tidentified by key 5:\n' '\twill be added\n') - def test_changes_str2(self): self.assertEqual(self.list_diff.changes_str2, ' key=2 (updated):\n' From 491faaa2abab66cbcbd9374cb6cd0349c2183b0d Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Wed, 20 Sep 2017 17:20:39 -0400 Subject: [PATCH 609/639] Review changes --- salt/config/schemas/esxcluster.py | 2 +- salt/states/esxcluster.py | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/config/schemas/esxcluster.py b/salt/config/schemas/esxcluster.py index c9fe62fd84..ba88357cf7 100644 --- a/salt/config/schemas/esxcluster.py +++ b/salt/config/schemas/esxcluster.py @@ -78,7 +78,7 @@ class HAConfigItem(ComplexSchemaItem): admission_control_policy = AdmissionControlPolicyItem() default_vm_settings = DefaultVmSettingsItem() hb_ds_candidate_policy = StringItem( - title='Hartbeat Datastore Candidate Policy', + title='Heartbeat Datastore Candidate Policy', enum=['allFeasibleDs', 'allFeasibleDsWithUserPreference', 'userSelectedDs']) host_monitoring = StringItem(title='Host Monitoring', diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index ed4cd1af6b..2e1da41846 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -270,12 +270,11 @@ def cluster_configured(name, cluster_config): def vsan_datastore_configured(name, datastore_name): ''' - Configures the cluster's vsan_datastore + Configures the cluster's VSAN datastore - WARNING: vsan datastores will not exist until there is at least on host in - the cluster; the state assumes that the datastore exists and errors out if i - it doesn't; it's up to the user to accept the error or enable the state run - when de datastore does exist (grain: vsan_datastore_exists) + WARNING: The VSAN datastore is created automatically after the first + ESXi host is added to the cluster; the state assumes that the datastore + exists and errors if it doesn't. ''' cluster_name, datacenter_name = \ From 2f3be5785b85160fa130714441bf69fb19bd18f9 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 20 Sep 2017 15:55:37 -0600 Subject: [PATCH 610/639] there were more --- tests/unit/utils/test_schema.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/tests/unit/utils/test_schema.py b/tests/unit/utils/test_schema.py index 108afcf458..edf8841b6b 100644 --- a/tests/unit/utils/test_schema.py +++ b/tests/unit/utils/test_schema.py @@ -274,9 +274,8 @@ class ConfigTestCase(TestCase): }, 'ssh_key_names': { 'type': 'string', - 'description': 'The names of an SSH key being managed on Digital ' - 'Ocean account which will be used to authenticate ' - 'on the deployed VMs', + 'description': 'The names of an SSH key being managed on DigitalOcean ' + 'account which will be used to authenticate on the deployed VMs', 'title': 'SSH Key Names' } }, @@ -329,9 +328,8 @@ class ConfigTestCase(TestCase): }, 'ssh_key_names': { 'type': 'string', - 'description': 'The names of an SSH key being managed on Digital ' - 'Ocean account which will be used to authenticate ' - 'on the deployed VMs', + 'description': 'The names of an SSH key being managed on DigitalOcean ' + 'account which will be used to authenticate on the deployed VMs', 'title': 'SSH Key Names' } }, @@ -402,9 +400,8 @@ class ConfigTestCase(TestCase): }, 'ssh_key_names': { 'type': 'string', - 'description': 'The names of an SSH key being managed on Digital ' - 'Ocean account which will be used to authenticate ' - 'on the deployed VMs', + 'description': 'The names of an SSH key being managed on DigitalOcean ' + 'account which will be used to authenticate on the deployed VMs', 'title': 'SSH Key Names' }, 'ssh_key_file_2': { @@ -415,9 +412,8 @@ class ConfigTestCase(TestCase): }, 'ssh_key_names_2': { 'type': 'string', - 'description': 'The names of an SSH key being managed on Digital ' - 'Ocean account which will be used to authenticate ' - 'on the deployed VMs', + 'description': 'The names of an SSH key being managed on DigitalOcean ' + 'account which will be used to authenticate on the deployed VMs', 'title': 'SSH Key Names' } }, From 69049c1da63ca11732c7cd8d33017713d6af44dc Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Tue, 19 Sep 2017 17:10:57 -0400 Subject: [PATCH 611/639] INFRA-5461 - more boto_elb AWS rate limiting fixes --- salt/modules/boto_elb.py | 74 +++++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index 9b300d368f..73d6ebf16b 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -161,10 +161,9 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elb.exists myelb region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) - wait = 60 - orig_wait = wait + retries = 30 - while True: + while retries: try: lb = conn.get_all_load_balancers(load_balancer_names=[name]) lb = lb[0] @@ -205,16 +204,15 @@ def get_elb_config(name, region=None, key=None, keyid=None, profile=None): ret['policies'] = policies return ret except boto.exception.BotoServerError as error: - if getattr(error, 'error_code', '') == 'Throttling': - if wait > 0: - sleep = wait if wait % 5 == wait else 5 - log.info('Throttled by AWS API, will retry in 5 seconds.') - time.sleep(sleep) - wait -= sleep - continue - log.error('API still throttling us after {0} seconds!'.format(orig_wait)) + if error.error_code == 'Throttling': + log.debug('Throttled by AWS API, will retry in 5 seconds.') + time.sleep(5) + retries -= 1 + continue + log.error('Error fetching config for ELB {0}: {1}'.format(name, error.message)) log.error(error) return {} + return {} def listener_dict_to_tuple(listener): @@ -515,31 +513,37 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None): salt myminion boto_elb.get_attributes myelb ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) + retries = 30 - try: - lbattrs = conn.get_all_lb_attributes(name) - ret = odict.OrderedDict() - ret['access_log'] = odict.OrderedDict() - ret['cross_zone_load_balancing'] = odict.OrderedDict() - ret['connection_draining'] = odict.OrderedDict() - ret['connecting_settings'] = odict.OrderedDict() - al = lbattrs.access_log - czlb = lbattrs.cross_zone_load_balancing - cd = lbattrs.connection_draining - cs = lbattrs.connecting_settings - ret['access_log']['enabled'] = al.enabled - ret['access_log']['s3_bucket_name'] = al.s3_bucket_name - ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix - ret['access_log']['emit_interval'] = al.emit_interval - ret['cross_zone_load_balancing']['enabled'] = czlb.enabled - ret['connection_draining']['enabled'] = cd.enabled - ret['connection_draining']['timeout'] = cd.timeout - ret['connecting_settings']['idle_timeout'] = cs.idle_timeout - return ret - except boto.exception.BotoServerError as error: - log.debug(error) - log.error('ELB {0} does not exist: {1}'.format(name, error)) - return {} + while retries: + try: + lbattrs = conn.get_all_lb_attributes(name) + ret = odict.OrderedDict() + ret['access_log'] = odict.OrderedDict() + ret['cross_zone_load_balancing'] = odict.OrderedDict() + ret['connection_draining'] = odict.OrderedDict() + ret['connecting_settings'] = odict.OrderedDict() + al = lbattrs.access_log + czlb = lbattrs.cross_zone_load_balancing + cd = lbattrs.connection_draining + cs = lbattrs.connecting_settings + ret['access_log']['enabled'] = al.enabled + ret['access_log']['s3_bucket_name'] = al.s3_bucket_name + ret['access_log']['s3_bucket_prefix'] = al.s3_bucket_prefix + ret['access_log']['emit_interval'] = al.emit_interval + ret['cross_zone_load_balancing']['enabled'] = czlb.enabled + ret['connection_draining']['enabled'] = cd.enabled + ret['connection_draining']['timeout'] = cd.timeout + ret['connecting_settings']['idle_timeout'] = cs.idle_timeout + return ret + except boto.exception.BotoServerError as error: + if e.error_code == 'Throttling': + log.debug("Throttled by AWS API, will retry in 5 seconds...") + time.sleep(5) + retries -= 1 + continue + log.error('ELB {0} does not exist: {1}'.format(name, error.message)) + return {} def set_attributes(name, attributes, region=None, key=None, keyid=None, From 45096a5d5635a33b67b1f8204b377ab16c0785e7 Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Tue, 19 Sep 2017 17:14:25 -0400 Subject: [PATCH 612/639] INFRA-5461 - sanity check return --- salt/modules/boto_elb.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index 73d6ebf16b..e026622e32 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -544,6 +544,7 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None): continue log.error('ELB {0} does not exist: {1}'.format(name, error.message)) return {} + return {} def set_attributes(name, attributes, region=None, key=None, keyid=None, From d2106f79bbd3f4747d01bd3bf7a307bfeb1178e4 Mon Sep 17 00:00:00 2001 From: Tom Williams Date: Wed, 20 Sep 2017 21:10:44 -0400 Subject: [PATCH 613/639] INFRA-5461 - Use the same var name in both places, Tom :) --- salt/modules/boto_elb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/modules/boto_elb.py b/salt/modules/boto_elb.py index e026622e32..6f6bb4c6e9 100644 --- a/salt/modules/boto_elb.py +++ b/salt/modules/boto_elb.py @@ -536,13 +536,13 @@ def get_attributes(name, region=None, key=None, keyid=None, profile=None): ret['connection_draining']['timeout'] = cd.timeout ret['connecting_settings']['idle_timeout'] = cs.idle_timeout return ret - except boto.exception.BotoServerError as error: + except boto.exception.BotoServerError as e: if e.error_code == 'Throttling': log.debug("Throttled by AWS API, will retry in 5 seconds...") time.sleep(5) retries -= 1 continue - log.error('ELB {0} does not exist: {1}'.format(name, error.message)) + log.error('ELB {0} does not exist: {1}'.format(name, e.message)) return {} return {} From 5f47ef721c9c9f11559115694ee7e2acac5f849a Mon Sep 17 00:00:00 2001 From: Raymond Piller Date: Wed, 20 Sep 2017 21:41:08 -0500 Subject: [PATCH 614/639] Salt Repo has Deb 9 and 8 --- doc/topics/installation/debian.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/installation/debian.rst b/doc/topics/installation/debian.rst index 36a47fa8ff..369991ebaa 100644 --- a/doc/topics/installation/debian.rst +++ b/doc/topics/installation/debian.rst @@ -18,7 +18,7 @@ Installation from official Debian and Raspbian repositories is described Installation from the Official SaltStack Repository =================================================== -Packages for Debian 8 (Jessie) and Debian 7 (Wheezy) are available in the +Packages for Debian 9 (Stretch) and Debian 8 (Jessie) are available in the Official SaltStack repository. Instructions are at https://repo.saltstack.com/#debian. From 5d45329111e12ea11c1868a33fdc3d24c10f5597 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 00:05:10 -0500 Subject: [PATCH 615/639] salt.utils.gitfs: Enforce fetch URL along with refspecs and http.sslVerify This fixes a corner case in which someone is using the `name` config param for a given gitfs/git_pillar remote, and then changes the URL for that remote (for instance, between https and ssh). We've simply never enforced the fetch URL in the git config for a given remote's cachedir, since the cachedir is typically determined by hashing the URL (or branch + URL for git_pillar). In those cases, changing the URL changes the cachedir path, and results in a new repo being init'ed and the correct URL being added to the git config as part of the initialization. But, when using the `name` param, the path to the cachedir would remain constant no matter what the URL is. This means that when the URL is changed in the gitfs/git_pillar config, it isn't actually updated in the git config file for that cachedir. With this change, the new GitConfigParser is used to examine the fetch URL and update it if necessary. --- salt/utils/gitfs.py | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 53482c4ccb..471ce74707 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -618,20 +618,41 @@ class GitProvider(object): log.error('Failed to read from git config file %s', git_config) else: # We are currently enforcing the following git config items: - # 1. refspecs used in fetch - # 2. http.sslVerify + # 1. Fetch URL + # 2. refspecs used in fetch + # 3. http.sslVerify conf_changed = False + remote_section = 'remote "origin"' - # 1. refspecs + # 1. URL try: - refspecs = sorted( - conf.get('remote "origin"', 'fetch', as_list=True)) + url = conf.get(remote_section, 'url') except salt.utils.configparser.NoSectionError: # First time we've init'ed this repo, we need to add the # section for the remote to the git config - conf.add_section('remote "origin"') - conf.set('remote "origin"', 'url', self.url) + conf.add_section(remote_section) conf_changed = True + url = None + log.debug( + 'Current fetch URL for %s remote \'%s\': %s (desired: %s)', + self.role, self.id, url, self.url + ) + if url != self.url: + conf.set(remote_section, 'url', self.url) + log.debug( + 'Fetch URL for %s remote \'%s\' set to %s', + self.role, self.id, self.url + ) + conf_changed = True + + # 2. refspecs + try: + refspecs = sorted( + conf.get(remote_section, 'fetch', as_list=True)) + except salt.utils.configparser.NoOptionError: + # No 'fetch' option present in the remote section. Should never + # happen, but if it does for some reason, don't let it cause a + # traceback. refspecs = [] desired_refspecs = sorted(self.refspecs) log.debug( @@ -639,14 +660,14 @@ class GitProvider(object): self.role, self.id, refspecs, desired_refspecs ) if refspecs != desired_refspecs: - conf.set_multivar('remote "origin"', 'fetch', self.refspecs) + conf.set_multivar(remote_section, 'fetch', self.refspecs) log.debug( 'Refspecs for %s remote \'%s\' set to %s', self.role, self.id, desired_refspecs ) conf_changed = True - # 2. http.sslVerify + # 3. http.sslVerify try: ssl_verify = conf.get('http', 'sslVerify') except salt.utils.configparser.NoSectionError: From 1706c24add21a35728353c3155e33f86ac9ede98 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 04:14:59 -0400 Subject: [PATCH 616/639] Imported salt.exceptions in salt.states.esxcluster --- salt/states/esxcluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 2e1da41846..4a752326e3 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -46,7 +46,7 @@ import traceback import sys # Import Salt Libs -from salt.exceptions import CommandExecutionError +from salt.exceptions from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ From ee0dcd7b6741a32244ad6b671e93e9b28a971298 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 05:01:19 -0400 Subject: [PATCH 617/639] More pylint --- salt/states/esxcluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 4a752326e3..4152c6f6df 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -46,7 +46,7 @@ import traceback import sys # Import Salt Libs -from salt.exceptions +import salt.exceptions from salt.utils.dictdiffer import recursive_diff from salt.utils.listdiffer import list_diff from salt.config.schemas.esxcluster import ESXClusterConfigSchema, \ From 4a35fe5019da5d2fc440438af35c6b97c9f198fa Mon Sep 17 00:00:00 2001 From: Silvio Moioli Date: Wed, 20 Sep 2017 14:32:47 +0200 Subject: [PATCH 618/639] multiprocessing minion option: documentation fixes --- doc/man/salt.7 | 1 + doc/ref/configuration/minion.rst | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/doc/man/salt.7 b/doc/man/salt.7 index d6cfe937a1..86c463b771 100644 --- a/doc/man/salt.7 +++ b/doc/man/salt.7 @@ -10795,6 +10795,7 @@ cmd_whitelist_glob: .UNINDENT .UNINDENT .SS Thread Settings +.SS \fBmultiprocessing\fP .sp Default: \fBTrue\fP .sp diff --git a/doc/ref/configuration/minion.rst b/doc/ref/configuration/minion.rst index e5779d7584..3438bfca03 100644 --- a/doc/ref/configuration/minion.rst +++ b/doc/ref/configuration/minion.rst @@ -2404,11 +2404,14 @@ Thread Settings .. conf_minion:: multiprocessing +``multiprocessing`` +------- + Default: ``True`` -If `multiprocessing` is enabled when a minion receives a +If ``multiprocessing`` is enabled when a minion receives a publication a new process is spawned and the command is executed therein. -Conversely, if `multiprocessing` is disabled the new publication will be run +Conversely, if ``multiprocessing`` is disabled the new publication will be run executed in a thread. From cbefa4d3fdd5119af47fa2523a72d5c576d23f1c Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 06:42:52 -0400 Subject: [PATCH 619/639] Fixed except statement --- salt/states/esxcluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 4152c6f6df..aa1ad881cd 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -258,7 +258,7 @@ def cluster_configured(name, cluster_config): 'comment': '\n'.join(comments), 'changes': changes}) return ret - except CommandExecutionError as exc: + except salt.exceptions.CommandExecutionError as exc: log.error('Error: {0}\n{1}'.format(exc, traceback.format_exc())) if si: __salt__['vsphere.disconnect'](si) From fda66f3e9265146fa11a706b68bfaadd11451095 Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Thu, 21 Sep 2017 13:38:27 +0200 Subject: [PATCH 620/639] corrected identation --- salt/returners/zabbix_return.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index bdf94d9749..a5e79ca8e0 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -57,20 +57,20 @@ def zbx(): def zabbix_send(key, host, output): with salt.utils.fopen(zbx()['zabbix_config'],'r') as file_handle: - for line in file_handle: - if "ServerActive" in line: - flag = "true" - server = line.rsplit('=') - server = server[1].rsplit(',') - for s in server: - cmd = zbx()['sender'] + " -z " + s.replace('\n','') + " -s " + host + " -k " + key + " -o \"" + output +"\"" - __salt__['cmd.shell'](cmd) - break - else: - flag = "false" - if flag == 'false': - cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" - f.close() + for line in file_handle: + if "ServerActive" in line: + flag = "true" + server = line.rsplit('=') + server = server[1].rsplit(',') + for s in server: + cmd = zbx()['sender'] + " -z " + s.replace('\n','') + " -s " + host + " -k " + key + " -o \"" + output +"\"" + __salt__['cmd.shell'](cmd) + break + else: + flag = "false" + if flag == 'false': + cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" + file_handle.close() def returner(ret): From bd88c375d876d8d329e0011277f9a9de3e4713d5 Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 21 Sep 2017 11:51:15 -0400 Subject: [PATCH 621/639] Add autodoc file for new purefa execution module --- doc/ref/modules/all/index.rst | 1 + doc/ref/modules/all/salt.modules.purefa.rst | 6 ++++++ salt/modules/purefa.py | 3 +++ 3 files changed, 10 insertions(+) create mode 100644 doc/ref/modules/all/salt.modules.purefa.rst diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst index c878eeaa35..b9f4b3f35c 100644 --- a/doc/ref/modules/all/index.rst +++ b/doc/ref/modules/all/index.rst @@ -327,6 +327,7 @@ execution modules ps publish puppet + purefa pushbullet pushover_notify pw_group diff --git a/doc/ref/modules/all/salt.modules.purefa.rst b/doc/ref/modules/all/salt.modules.purefa.rst new file mode 100644 index 0000000000..1a1b80ce1e --- /dev/null +++ b/doc/ref/modules/all/salt.modules.purefa.rst @@ -0,0 +1,6 @@ +=================== +salt.modules.purefa +=================== + +.. automodule:: salt.modules.purefa + :members: diff --git a/salt/modules/purefa.py b/salt/modules/purefa.py index c604839752..14beb37bef 100644 --- a/salt/modules/purefa.py +++ b/salt/modules/purefa.py @@ -31,6 +31,9 @@ Installation Prerequisites :maturity: new :requires: purestorage :platform: all + +.. versionadded:: Oxygen + ''' # Import Python libs From 5f30477a6abe3c36cb07fd44e977f21fe1b6b116 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 11:54:55 -0400 Subject: [PATCH 622/639] Added python/pyvmomi compatibility check to salt.states.esxcluster --- salt/states/esxcluster.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index aa1ad881cd..508bb800ea 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -60,12 +60,29 @@ try: except ImportError: HAS_JSONSCHEMA = False +try: + from pyVmomi import VmomiSupport + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + # Get Logging Started log = logging.getLogger(__name__) def __virtual__(): - return HAS_JSONSCHEMA + if not HAS_JSONSCHEMA: + return False, 'State module did not load: jsonschema not found' + if not HAS_PYVMOMI: + return False, 'State module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('State module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') + return True def mod_init(low): From 2f55841037c6632f6e49193ad2d052b4288d92f9 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 12:13:08 -0400 Subject: [PATCH 623/639] Added python/pyvmomi compatibility check to salt.modules.vsphere --- salt/modules/vsphere.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index ca3bc13b77..0d02de55fd 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -193,7 +193,7 @@ except ImportError: HAS_JSONSCHEMA = False try: - from pyVmomi import vim, vmodl + from pyVmomi import vim, vmodl, VmomiSupport HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -211,6 +211,17 @@ __proxyenabled__ = ['esxi', 'esxcluster', 'esxdatacenter'] def __virtual__(): + if not HAS_JSONSCHEMA: + return False, 'Execution module did not load: jsonschema not found' + if not HAS_PYVMOMI: + return False, 'Execution module did not load: pyVmomi not found' + + # We check the supported vim versions to infer the pyVmomi version + if 'vim25/6.0' in VmomiSupport.versionMap and \ + sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): + + return False, ('Execution module did not load: Incompatible versions ' + 'of Python and pyVmomi present. See Issue #29537.') return __virtualname__ From f46afa0fa93aa7333bb389540e8219e2393d46e8 Mon Sep 17 00:00:00 2001 From: Alexandru Bleotu Date: Thu, 21 Sep 2017 12:15:40 -0400 Subject: [PATCH 624/639] Removed references to Pthon 2.6 as it's no longer supported in Salt --- salt/modules/vsphere.py | 4 ++-- salt/states/esxcluster.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 0d02de55fd..d6aabb74e4 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -24,8 +24,8 @@ PyVmomi can be installed via pip: .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain - versions of Python. If using version 6.0 of pyVmomi, Python 2.6, - Python 2.7.9, or newer must be present. This is due to an upstream dependency + versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9, + or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. diff --git a/salt/states/esxcluster.py b/salt/states/esxcluster.py index 508bb800ea..77b6eb0ec6 100644 --- a/salt/states/esxcluster.py +++ b/salt/states/esxcluster.py @@ -20,8 +20,8 @@ PyVmomi can be installed via pip: .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain - versions of Python. If using version 6.0 of pyVmomi, Python 2.6, - Python 2.7.9, or newer must be present. This is due to an upstream dependency + versions of Python. If using version 6.0 of pyVmomi, Python 2.7.9, + or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. From fd1d89d3852a71571ec87c9cc52f6a1156c1db6a Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 14 Sep 2017 18:51:40 -0500 Subject: [PATCH 625/639] Reduce unnecessary file downloading in archive/file states The file.managed state, which is used by the archive.extracted state to download the source archive, at some point recently was modified to clear the file from the minion cache. This caused unnecessary re-downloading on subsequent runs, which slows down states considerably when dealing with larger archives. This commit makes the following changes to improve this: 1. The fileclient now accepts a `source_hash` argument, which will cause the client's get_url function to skip downloading http(s) and ftp files if the file is already cached, and its hash matches the passed hash. This argument has also been added to the `cp.get_url` and `cp.cache_file` function. 2. We no longer try to download the file when it's an http(s) or ftp URL when running `file.source_list`. 3. Where `cp.cache_file` is used, we pass the `source_hash` if it is available. 4. A `cache_source` argument has been added to the `file.managed` state, defaulting to `True`. This is now used to control whether or not the source file is cleared from the minion cache when the state completes. 5. Two new states (`file.cached` and `file.not_cached`) have been added to managed files in the minion cache. In addition, the `archive.extracted` state has been modified in the following ways: 1. For consistency with `file.managed`, a `cache_source` argument has been added. This also deprecates `keep`. If `keep` is used, `cache_source` assumes its value, and a warning is added to the state return to let the user know to update their SLS. 2. The variable name `cached_source` (used internally in the `archive.extracted` state) has been renamed to `cached` to reduce confusion with the new `cache_source` argument. 3. The new `file.cached` and `file.not_cached` states are now used to manage the source tarball instead of `file.managed`. This improves disk usage and reduces unnecessary complexity in the state as we no longer keep a copy of the archive in a separate location within the cachedir. We now only use the copy downloaded using `cp.cache_file` within the `file.cached` state. This change has also necessitated a new home for hash files tracked by the `source_hash_update` argument, in a subdirectory of the minion cachedir called `archive_hash`. --- salt/fileclient.py | 25 +++- salt/modules/archive.py | 31 +++- salt/modules/cp.py | 28 +++- salt/modules/file.py | 69 +++++---- salt/states/archive.py | 204 +++++++++++-------------- salt/states/file.py | 323 ++++++++++++++++++++++++++++++++++++++-- salt/utils/files.py | 22 +++ 7 files changed, 531 insertions(+), 171 deletions(-) diff --git a/salt/fileclient.py b/salt/fileclient.py index cb3b210a03..35c63b2cb1 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -185,12 +185,13 @@ class Client(object): ''' raise NotImplementedError - def cache_file(self, path, saltenv=u'base', cachedir=None): + def cache_file(self, path, saltenv=u'base', cachedir=None, source_hash=None): ''' Pull a file down from the file server and store it in the minion file cache ''' - return self.get_url(path, u'', True, saltenv, cachedir=cachedir) + return self.get_url( + path, u'', True, saltenv, cachedir=cachedir, source_hash=source_hash) def cache_files(self, paths, saltenv=u'base', cachedir=None): ''' @@ -470,7 +471,7 @@ class Client(object): return ret def get_url(self, url, dest, makedirs=False, saltenv=u'base', - no_cache=False, cachedir=None): + no_cache=False, cachedir=None, source_hash=None): ''' Get a single file from a URL. ''' @@ -525,6 +526,18 @@ class Client(object): return u'' elif not no_cache: dest = self._extrn_path(url, saltenv, cachedir=cachedir) + if source_hash is not None: + try: + source_hash = source_hash.split('=')[-1] + form = salt.utils.files.HASHES_REVMAP[len(source_hash)] + if salt.utils.get_hash(dest, form) == source_hash: + log.debug( + 'Cached copy of %s (%s) matches source_hash %s, ' + 'skipping download', url, dest, source_hash + ) + return dest + except (AttributeError, KeyError, IOError, OSError): + pass destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) @@ -532,7 +545,9 @@ class Client(object): if url_data.scheme == u's3': try: def s3_opt(key, default=None): - u'''Get value of s3. from Minion config or from Pillar''' + ''' + Get value of s3. from Minion config or from Pillar + ''' if u's3.' + key in self.opts: return self.opts[u's3.' + key] try: @@ -785,7 +800,7 @@ class Client(object): def _extrn_path(self, url, saltenv, cachedir=None): ''' - Return the extn_filepath for a given url + Return the extrn_filepath for a given url ''' url_data = urlparse(url) if salt.utils.platform.is_windows(): diff --git a/salt/modules/archive.py b/salt/modules/archive.py index 70ef0bdecc..7d627f7fdb 100644 --- a/salt/modules/archive.py +++ b/salt/modules/archive.py @@ -60,7 +60,8 @@ def list_(name, strip_components=None, clean=False, verbose=False, - saltenv='base'): + saltenv='base', + source_hash=None): ''' .. versionadded:: 2016.11.0 .. versionchanged:: 2016.11.2 @@ -149,6 +150,14 @@ def list_(name, ``archive``. This is only applicable when ``archive`` is a file from the ``salt://`` fileserver. + source_hash + If ``name`` is an http(s)/ftp URL and the file exists in the minion's + file cache, this option can be passed to keep the minion from + re-downloading the archive if the cached copy matches the specified + hash. + + .. versionadded:: Oxygen + .. _tarfile: https://docs.python.org/2/library/tarfile.html .. _xz: http://tukaani.org/xz/ @@ -160,6 +169,7 @@ def list_(name, salt '*' archive.list /path/to/myfile.tar.gz strip_components=1 salt '*' archive.list salt://foo.tar.gz salt '*' archive.list https://domain.tld/myfile.zip + salt '*' archive.list https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 salt '*' archive.list ftp://10.1.2.3/foo.rar ''' def _list_tar(name, cached, decompress_cmd, failhard=False): @@ -309,7 +319,7 @@ def list_(name, ) return dirs, files, [] - cached = __salt__['cp.cache_file'](name, saltenv) + cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash) if not cached: raise CommandExecutionError('Failed to cache {0}'.format(name)) @@ -1094,7 +1104,7 @@ def unzip(zip_file, return _trim_files(cleaned_files, trim_output) -def is_encrypted(name, clean=False, saltenv='base'): +def is_encrypted(name, clean=False, saltenv='base', source_hash=None): ''' .. versionadded:: 2016.11.0 @@ -1113,6 +1123,18 @@ def is_encrypted(name, clean=False, saltenv='base'): If there is an error listing the archive's contents, the cached file will not be removed, to allow for troubleshooting. + saltenv : base + Specifies the fileserver environment from which to retrieve + ``archive``. This is only applicable when ``archive`` is a file from + the ``salt://`` fileserver. + + source_hash + If ``name`` is an http(s)/ftp URL and the file exists in the minion's + file cache, this option can be passed to keep the minion from + re-downloading the archive if the cached copy matches the specified + hash. + + .. versionadded:: Oxygen CLI Examples: @@ -1122,9 +1144,10 @@ def is_encrypted(name, clean=False, saltenv='base'): salt '*' archive.is_encrypted salt://foo.zip salt '*' archive.is_encrypted salt://foo.zip saltenv=dev salt '*' archive.is_encrypted https://domain.tld/myfile.zip clean=True + salt '*' archive.is_encrypted https://domain.tld/myfile.zip source_hash=f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 salt '*' archive.is_encrypted ftp://10.1.2.3/foo.zip ''' - cached = __salt__['cp.cache_file'](name, saltenv) + cached = __salt__['cp.cache_file'](name, saltenv, source_hash=source_hash) if not cached: raise CommandExecutionError('Failed to cache {0}'.format(name)) diff --git a/salt/modules/cp.py b/salt/modules/cp.py index 86634d559c..cdbeb4434e 100644 --- a/salt/modules/cp.py +++ b/salt/modules/cp.py @@ -352,7 +352,7 @@ def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): return _client().get_dir(path, dest, saltenv, gzip) -def get_url(path, dest='', saltenv='base', makedirs=False): +def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: Oxygen ``dest`` can now be a directory @@ -386,6 +386,13 @@ def get_url(path, dest='', saltenv='base', makedirs=False): Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. + source_hash + If ``path`` is an http(s) or ftp URL and the file exists in the + minion's file cache, this option can be passed to keep the minion from + re-downloading the file if the cached copy matches the specified hash. + + .. versionadded:: Oxygen + CLI Example: .. code-block:: bash @@ -394,9 +401,11 @@ def get_url(path, dest='', saltenv='base', makedirs=False): salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): - result = _client().get_url(path, dest, makedirs, saltenv) + result = _client().get_url( + path, dest, makedirs, saltenv, source_hash=source_hash) else: - result = _client().get_url(path, None, makedirs, saltenv, no_cache=True) + result = _client().get_url( + path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error( 'Unable to fetch file {0} from saltenv {1}.'.format( @@ -429,11 +438,18 @@ def get_file_str(path, saltenv='base'): return fn_ -def cache_file(path, saltenv='base'): +def cache_file(path, saltenv='base', source_hash=None): ''' Used to cache a single file on the Minion - Returns the location of the new cached file on the Minion. + Returns the location of the new cached file on the Minion + + source_hash + If ``name`` is an http(s) or ftp URL and the file exists in the + minion's file cache, this option can be passed to keep the minion from + re-downloading the file if the cached copy matches the specified hash. + + .. versionadded:: Oxygen CLI Example: @@ -485,7 +501,7 @@ def cache_file(path, saltenv='base'): if senv: saltenv = senv - result = _client().cache_file(path, saltenv) + result = _client().cache_file(path, saltenv, source_hash=source_hash) if not result: log.error( u'Unable to cache file \'%s\' from saltenv \'%s\'.', diff --git a/salt/modules/file.py b/salt/modules/file.py index 944736f740..7dfd5ced01 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -60,6 +60,7 @@ import salt.utils.stringutils import salt.utils.templates import salt.utils.url from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message +from salt.utils.files import HASHES, HASHES_REVMAP log = logging.getLogger(__name__) @@ -67,16 +68,6 @@ __func_alias__ = { 'makedirs_': 'makedirs' } -HASHES = { - 'sha512': 128, - 'sha384': 96, - 'sha256': 64, - 'sha224': 56, - 'sha1': 40, - 'md5': 32, -} -HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) - def __virtual__(): ''' @@ -3767,14 +3758,8 @@ def source_list(source, source_hash, saltenv): ret = (single_src, single_hash) break elif proto.startswith('http') or proto == 'ftp': - try: - if __salt__['cp.cache_file'](single_src): - ret = (single_src, single_hash) - break - except MinionError as exc: - # Error downloading file. Log the caught exception and - # continue on to the next source. - log.exception(exc) + ret = (single_src, single_hash) + break elif proto == 'file' and os.path.exists(urlparsed_single_src.path): ret = (single_src, single_hash) break @@ -3794,9 +3779,8 @@ def source_list(source, source_hash, saltenv): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': - if __salt__['cp.cache_file'](single): - ret = (single, source_hash) - break + ret = (single, source_hash) + break elif single.startswith('/') and os.path.exists(single): ret = (single, source_hash) break @@ -4007,11 +3991,14 @@ def get_managed( else: sfn = cached_dest - # If we didn't have the template or remote file, let's get it - # Similarly when the file has been updated and the cache has to be refreshed + # If we didn't have the template or remote file, or the file has been + # updated and the cache has to be refreshed, download the file. if not sfn or cache_refetch: try: - sfn = __salt__['cp.cache_file'](source, saltenv) + sfn = __salt__['cp.cache_file']( + source, + saltenv, + source_hash=source_sum.get('hsum')) except Exception as exc: # A 404 or other error code may raise an exception, catch it # and return a comment that will fail the calling state. @@ -4675,7 +4662,7 @@ def check_file_meta( ''' changes = {} if not source_sum: - source_sum = dict() + source_sum = {} lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False) if not lstats: changes['newfile'] = name @@ -4683,7 +4670,10 @@ def check_file_meta( if 'hsum' in source_sum: if source_sum['hsum'] != lstats['sum']: if not sfn and source: - sfn = __salt__['cp.cache_file'](source, saltenv) + sfn = __salt__['cp.cache_file']( + source, + saltenv, + source_hash=source_sum['hsum']) if sfn: try: changes['diff'] = get_diff( @@ -4750,7 +4740,9 @@ def get_diff(file1, saltenv='base', show_filenames=True, show_changes=True, - template=False): + template=False, + source_hash_file1=None, + source_hash_file2=None): ''' Return unified diff of two files @@ -4785,6 +4777,22 @@ def get_diff(file1, .. versionadded:: Oxygen + source_hash_file1 + If ``file1`` is an http(s)/ftp URL and the file exists in the minion's + file cache, this option can be passed to keep the minion from + re-downloading the archive if the cached copy matches the specified + hash. + + .. versionadded:: Oxygen + + source_hash_file2 + If ``file2`` is an http(s)/ftp URL and the file exists in the minion's + file cache, this option can be passed to keep the minion from + re-downloading the archive if the cached copy matches the specified + hash. + + .. versionadded:: Oxygen + CLI Examples: .. code-block:: bash @@ -4793,14 +4801,17 @@ def get_diff(file1, salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt ''' files = (file1, file2) + source_hashes = (source_hash_file1, source_hash_file2) paths = [] errors = [] - for filename in files: + for filename, source_hash in zip(files, source_hashes): try: # Local file paths will just return the same path back when passed # to cp.cache_file. - cached_path = __salt__['cp.cache_file'](filename, saltenv) + cached_path = __salt__['cp.cache_file'](filename, + saltenv, + source_hash=source_hash) if cached_path is False: errors.append( u'File {0} not found'.format( diff --git a/salt/states/archive.py b/salt/states/archive.py index c2308cbbd0..d6e46e595e 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -64,16 +64,30 @@ def _gen_checksum(path): 'hash_type': __opts__['hash_type']} -def _update_checksum(cached_source): - cached_source_sum = '.'.join((cached_source, 'hash')) - source_sum = _gen_checksum(cached_source) +def _checksum_file_path(path): + relpath = '.'.join((os.path.relpath(path, __opts__['cachedir']), 'hash')) + if re.match(r'..[/\\]', relpath): + # path is a local file + relpath = salt.utils.path.join( + 'local', + os.path.splitdrive(path)[-1].lstrip('/\\'), + ) + return salt.utils.path.join(__opts__['cachedir'], 'archive_hash', relpath) + + +def _update_checksum(path): + checksum_file = _checksum_file_path(path) + checksum_dir = os.path.dirname(checksum_file) + if not os.path.isdir(checksum_dir): + os.makedirs(checksum_dir) + source_sum = _gen_checksum(path) hash_type = source_sum.get('hash_type') hsum = source_sum.get('hsum') if hash_type and hsum: lines = [] try: try: - with salt.utils.files.fopen(cached_source_sum, 'r') as fp_: + with salt.utils.files.fopen(checksum_file, 'r') as fp_: for line in fp_: try: lines.append(line.rstrip('\n').split(':', 1)) @@ -83,7 +97,7 @@ def _update_checksum(cached_source): if exc.errno != errno.ENOENT: raise - with salt.utils.files.fopen(cached_source_sum, 'w') as fp_: + with salt.utils.files.fopen(checksum_file, 'w') as fp_: for line in lines: if line[0] == hash_type: line[1] = hsum @@ -93,16 +107,16 @@ def _update_checksum(cached_source): except (IOError, OSError) as exc: log.warning( 'Failed to update checksum for %s: %s', - cached_source, exc.__str__() + path, exc.__str__(), exc_info=True ) -def _read_cached_checksum(cached_source, form=None): +def _read_cached_checksum(path, form=None): if form is None: form = __opts__['hash_type'] - path = '.'.join((cached_source, 'hash')) + checksum_file = _checksum_file_path(path) try: - with salt.utils.files.fopen(path, 'r') as fp_: + with salt.utils.files.fopen(checksum_file, 'r') as fp_: for line in fp_: # Should only be one line in this file but just in case it # isn't, read only a single line to avoid overuse of memory. @@ -117,9 +131,9 @@ def _read_cached_checksum(cached_source, form=None): return {'hash_type': hash_type, 'hsum': hsum} -def _compare_checksum(cached_source, source_sum): +def _compare_checksum(cached, source_sum): cached_sum = _read_cached_checksum( - cached_source, + cached, form=source_sum.get('hash_type', __opts__['hash_type']) ) return source_sum == cached_sum @@ -146,6 +160,7 @@ def extracted(name, source_hash_name=None, source_hash_update=False, skip_verify=False, + cache_source=True, password=None, options=None, list_options=None, @@ -155,7 +170,6 @@ def extracted(name, user=None, group=None, if_missing=None, - keep=False, trim_output=False, use_cmd_unzip=None, extract_perms=True, @@ -391,6 +405,19 @@ def extracted(name, .. versionadded:: 2016.3.4 + cache_source : True + For ``source`` archives not local to the minion (i.e. from the Salt + fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt + will need to download the archive to the minion cache before they can + be extracted. To remove the downloaded archive after extraction, set + this argument to ``False``. + + .. versionadded:: 2017.7.3 + + keep : True + .. deprecated:: 2017.7.3 + Use ``cache_source`` instead + password **For ZIP archives only.** Password used for extraction. @@ -518,13 +545,6 @@ def extracted(name, simply checked for existence and extraction will be skipped if if is present. - keep : False - For ``source`` archives not local to the minion (i.e. from the Salt - fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt - will need to download the archive to the minion cache before they can - be extracted. After extraction, these source archives will be removed - unless this argument is set to ``True``. - trim_output : False Useful for archives with many files in them. This can either be set to ``True`` (in which case only the first 100 files extracted will be @@ -626,6 +646,14 @@ def extracted(name, # Remove pub kwargs as they're irrelevant here. kwargs = salt.utils.args.clean_kwargs(**kwargs) + if 'keep' in kwargs: + cache_source = bool(kwargs.pop('keep')) + ret.setdefault('warnings', []).append( + 'The \'keep\' argument has been renamed to \'cache_source\'. ' + 'Assumed cache_source={0}. Please update your SLS to get rid of ' + 'this warning.'.format(cache_source) + ) + if not _path_is_abs(name): ret['comment'] = '{0} is not an absolute path'.format(name) return ret @@ -721,10 +749,10 @@ def extracted(name, urlparsed_source = _urlparse(source_match) source_hash_basename = urlparsed_source.path or urlparsed_source.netloc - source_is_local = urlparsed_source.scheme in ('', 'file') + source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS if source_is_local: # Get rid of "file://" from start of source_match - source_match = urlparsed_source.path + source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path)) if not os.path.isfile(source_match): ret['comment'] = 'Source file \'{0}\' does not exist'.format(source_match) return ret @@ -858,95 +886,49 @@ def extracted(name, source_sum = {} if source_is_local: - cached_source = source_match + cached = source_match else: - cached_source = os.path.join( - __opts__['cachedir'], - 'files', - __env__, - re.sub(r'[:/\\]', '_', source_hash_basename), - ) - - if os.path.isdir(cached_source): - # Prevent a traceback from attempting to read from a directory path - salt.utils.files.rm_rf(cached_source) - - existing_cached_source_sum = _read_cached_checksum(cached_source) - - if source_is_local: - # No need to download archive, it's local to the minion - update_source = False - else: - if not os.path.isfile(cached_source): - # Archive not cached, we need to download it - update_source = True - else: - # Archive is cached, keep=True likely used in prior run. If we need - # to verify the hash, then we *have* to update the source archive - # to know whether or not the hash changed. Hence the below - # statement. bool(source_hash) will be True if source_hash was - # passed, and otherwise False. - update_source = bool(source_hash) - - if update_source: if __opts__['test']: ret['result'] = None ret['comment'] = ( - 'Archive {0} would be downloaded to cache and checked to ' - 'discover if extraction is necessary'.format( + 'Archive {0} would be cached (if necessary) and checked to ' + 'discover if extraction is needed'.format( salt.utils.url.redact_http_basic_auth(source_match) ) ) return ret - # NOTE: This will result in more than one copy of the source archive on - # the minion. The reason this is necessary is because if we are - # tracking the checksum using source_hash_update, we need a location - # where we can place the checksum file alongside the cached source - # file, where it won't be overwritten by caching a file with the same - # name in the same parent dir as the source file. Long term, we should - # come up with a better solution for this. - file_result = __states__['file.managed'](cached_source, - source=source_match, - source_hash=source_hash, - source_hash_name=source_hash_name, - makedirs=True, - skip_verify=skip_verify) - log.debug('file.managed: {0}'.format(file_result)) + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + log.debug('file.cached: {0}'.format(result)) # Prevent a traceback if errors prevented the above state from getting # off the ground. - if isinstance(file_result, list): + if isinstance(result, list): try: - ret['comment'] = '\n'.join(file_result) + ret['comment'] = '\n'.join(result) except TypeError: - ret['comment'] = '\n'.join([str(x) for x in file_result]) + ret['comment'] = '\n'.join([str(x) for x in result]) return ret - try: - if not file_result['result']: - log.debug( - 'failed to download %s', - salt.utils.url.redact_http_basic_auth(source_match) - ) - return file_result - except TypeError: - if not file_result: - log.debug( - 'failed to download %s', - salt.utils.url.redact_http_basic_auth(source_match) - ) - return file_result + if result['result']: + # Get the path of the file in the minion cache + cached = __salt__['cp.is_cached'](source_match) + else: + log.debug( + 'failed to download %s', + salt.utils.url.redact_http_basic_auth(source_match) + ) + return result - else: - log.debug( - 'Archive %s is already in cache', - salt.utils.url.redact_http_basic_auth(source_match) - ) + existing_cached_source_sum = _read_cached_checksum(cached) if source_hash and source_hash_update and not skip_verify: # Create local hash sum file if we're going to track sum update - _update_checksum(cached_source) + _update_checksum(cached) if archive_format == 'zip' and not password: log.debug('Checking %s to see if it is password-protected', @@ -955,7 +937,7 @@ def extracted(name, # implicitly enabled by setting the "options" argument. try: encrypted_zip = __salt__['archive.is_encrypted']( - cached_source, + cached, clean=False, saltenv=__env__) except CommandExecutionError: @@ -973,7 +955,7 @@ def extracted(name, return ret try: - contents = __salt__['archive.list'](cached_source, + contents = __salt__['archive.list'](cached, archive_format=archive_format, options=list_options, strip_components=strip_components, @@ -1142,7 +1124,7 @@ def extracted(name, if not extraction_needed \ and source_hash_update \ and existing_cached_source_sum is not None \ - and not _compare_checksum(cached_source, existing_cached_source_sum): + and not _compare_checksum(cached, existing_cached_source_sum): extraction_needed = True source_hash_trigger = True else: @@ -1200,13 +1182,13 @@ def extracted(name, __states__['file.directory'](name, user=user, makedirs=True) created_destdir = True - log.debug('Extracting {0} to {1}'.format(cached_source, name)) + log.debug('Extracting {0} to {1}'.format(cached, name)) try: if archive_format == 'zip': if use_cmd_unzip: try: files = __salt__['archive.cmd_unzip']( - cached_source, + cached, name, options=options, trim_output=trim_output, @@ -1216,7 +1198,7 @@ def extracted(name, ret['comment'] = exc.strerror return ret else: - files = __salt__['archive.unzip'](cached_source, + files = __salt__['archive.unzip'](cached, name, options=options, trim_output=trim_output, @@ -1225,7 +1207,7 @@ def extracted(name, **kwargs) elif archive_format == 'rar': try: - files = __salt__['archive.unrar'](cached_source, + files = __salt__['archive.unrar'](cached, name, trim_output=trim_output, **kwargs) @@ -1235,7 +1217,7 @@ def extracted(name, else: if options is None: try: - with closing(tarfile.open(cached_source, 'r')) as tar: + with closing(tarfile.open(cached, 'r')) as tar: tar.extractall(name) files = tar.getnames() if trim_output: @@ -1243,7 +1225,7 @@ def extracted(name, except tarfile.ReadError: if salt.utils.path.which('xz'): if __salt__['cmd.retcode']( - ['xz', '-t', cached_source], + ['xz', '-t', cached], python_shell=False, ignore_retcode=True) == 0: # XZ-compressed data @@ -1259,7 +1241,7 @@ def extracted(name, # pipe it to tar for extraction. cmd = 'xz --decompress --stdout {0} | tar xvf -' results = __salt__['cmd.run_all']( - cmd.format(_cmd_quote(cached_source)), + cmd.format(_cmd_quote(cached)), cwd=name, python_shell=True) if results['retcode'] != 0: @@ -1329,7 +1311,7 @@ def extracted(name, tar_cmd.append(tar_shortopts) tar_cmd.extend(tar_longopts) - tar_cmd.extend(['-f', cached_source]) + tar_cmd.extend(['-f', cached]) results = __salt__['cmd.run_all'](tar_cmd, cwd=name, @@ -1500,18 +1482,12 @@ def extracted(name, for item in enforce_failed: ret['comment'] += '\n- {0}'.format(item) - if not source_is_local and not keep: - for path in (cached_source, __salt__['cp.is_cached'](source_match)): - if not path: - continue - log.debug('Cleaning cached source file %s', path) - try: - os.remove(path) - except OSError as exc: - if exc.errno != errno.ENOENT: - log.error( - 'Failed to clean cached source file %s: %s', - cached_source, exc.__str__() - ) + if not cache_source and not source_is_local: + log.debug('Cleaning cached source file %s', cached) + result = __states__['file.not_cached'](source_match, saltenv=__env__) + if not result['result']: + # Don't let failure to delete cached file cause the state itself ot + # fail, just drop it in the warnings. + ret.setdefault('warnings', []).append(result['comment']) return ret diff --git a/salt/states/file.py b/salt/states/file.py index 05801ff544..cedd99e624 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -299,6 +299,7 @@ if salt.utils.platform.is_windows(): # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip_longest +from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module if salt.utils.platform.is_windows(): import pywintypes import win32com.client @@ -1530,6 +1531,7 @@ def managed(name, source=None, source_hash='', source_hash_name=None, + cache_source=True, user=None, group=None, mode=None, @@ -1729,6 +1731,15 @@ def managed(name, .. versionadded:: 2016.3.5 + cache_source : True + Set to ``False`` to discard the cached copy of the source file once the + state completes. This can be useful for larger files to keep them from + taking up space in minion cache. However, keep in mind that discarding + the source file will result in the state needing to re-download the + source file if the state is run again. + + .. versionadded:: 2017.7.3 + user The user to own the file, this defaults to the user salt is running as on the minion @@ -2440,8 +2451,9 @@ def managed(name, except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) - if os.path.isfile(tmp_filename): - os.remove(tmp_filename) + salt.utils.files.remove(tmp_filename) + if not cache_source and sfn: + salt.utils.files.remove(sfn) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) # file being updated to verify using check_cmd @@ -2459,15 +2471,9 @@ def managed(name, cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) if isinstance(cret, dict): ret.update(cret) - if os.path.isfile(tmp_filename): - os.remove(tmp_filename) - if sfn and os.path.isfile(sfn): - os.remove(sfn) + salt.utils.files.remove(tmp_filename) return ret - if sfn and os.path.isfile(sfn): - os.remove(sfn) - # Since we generated a new tempfile and we are not returning here # lets change the original sfn to the new tempfile or else we will # get file not found @@ -2516,10 +2522,10 @@ def managed(name, log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) finally: - if tmp_filename and os.path.isfile(tmp_filename): - os.remove(tmp_filename) - if sfn and os.path.isfile(sfn): - os.remove(sfn) + if tmp_filename: + salt.utils.files.remove(tmp_filename) + if not cache_source and sfn: + salt.utils.files.remove(sfn) _RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs'] @@ -3048,6 +3054,7 @@ def directory(name, def recurse(name, source, + cache_source=True, clean=False, require=None, user=None, @@ -3080,6 +3087,15 @@ def recurse(name, located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs + cache_source : True + Set to ``False`` to discard the cached copy of the source file once the + state completes. This can be useful for larger files to keep them from + taking up space in minion cache. However, keep in mind that discarding + the source file will result in the state needing to re-download the + source file if the state is run again. + + .. versionadded:: 2017.7.3 + clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this @@ -3360,6 +3376,7 @@ def recurse(name, _ret = managed( path, source=source, + cache_source=cache_source, user=user, group=group, mode='keep' if keep_mode else file_mode, @@ -6426,3 +6443,283 @@ def shortcut( ret['comment'] += (', but was unable to set ownership to ' '{0}'.format(user)) return ret + + +def cached(name, + source_hash='', + source_hash_name=None, + skip_verify=False, + saltenv='base'): + ''' + .. versionadded:: 2017.7.3 + + Ensures that a file is saved to the minion's cache. This state is primarily + invoked by other states to ensure that we do not re-download a source file + if we do not need to. + + name + The URL of the file to be cached. To cache a file from an environment + other than ``base``, either use the ``saltenv`` argument or include the + saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``). + + .. note:: + A list of URLs is not supported, this must be a single URL. If a + local file is passed here, then the state will obviously not try to + download anything, but it will compare a hash if one is specified. + + source_hash + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + .. note:: + For remote files not originating from the ``salt://`` fileserver, + such as http(s) or ftp servers, this state will not re-download the + file if the locally-cached copy matches this hash. This is done to + prevent unnecessary downloading on repeated runs of this state. To + update the cached copy of a file, it is necessary to update this + hash. + + source_hash_name + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + skip_verify + See the documentation for this same argument in the + :py:func:`file.managed ` state. + + .. note:: + Setting this to ``True`` will result in a copy of the file being + downloaded from a remote (http(s), ftp, etc.) source each time the + state is run. + + saltenv + Used to specify the environment from which to download a file from the + Salt fileserver (i.e. those with ``salt://`` URL). + ''' + ret = {'changes': {}, + 'comment': '', + 'name': name, + 'result': False} + + try: + parsed = _urlparse(name) + except Exception: + ret['comment'] = 'Only URLs or local file paths are valid input' + return ret + + # This if statement will keep the state from proceeding if a remote source + # is specified and no source_hash is presented (unless we're skipping hash + # verification). + if not skip_verify \ + and not source_hash \ + and parsed.scheme in salt.utils.files.REMOTE_PROTOS: + ret['comment'] = ( + 'Unable to verify upstream hash of source file {0}, please set ' + 'source_hash or set skip_verify to True'.format(name) + ) + return ret + + if source_hash: + # Get the hash and hash type from the input. This takes care of parsing + # the hash out of a file containing checksums, if that is how the + # source_hash was specified. + try: + source_sum = __salt__['file.get_source_sum']( + source=name, + source_hash=source_hash, + source_hash_name=source_hash_name, + saltenv=saltenv) + except CommandExecutionError as exc: + ret['comment'] = exc.strerror + return ret + else: + if not source_sum: + # We shouldn't get here, problems in retrieving the hash in + # file.get_source_sum should result in a CommandExecutionError + # being raised, which we catch above. Nevertheless, we should + # provide useful information in the event that + # file.get_source_sum regresses. + ret['comment'] = ( + 'Failed to get source hash from {0}. This may be a bug. ' + 'If this error persists, please report it and set ' + 'skip_verify to True to work around it.'.format(source_hash) + ) + return ret + else: + source_sum = {} + + if parsed.scheme in salt.utils.files.LOCAL_PROTOS: + # Source is a local file path + full_path = os.path.realpath(os.path.expanduser(parsed.path)) + if os.path.exists(full_path): + if not skip_verify and source_sum: + # Enforce the hash + local_hash = __salt__['file.get_hash']( + full_path, + source_sum.get('hash_type', __opts__['hash_type'])) + if local_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File {0} is present on the minion and has hash ' + '{1}'.format(full_path, local_hash) + ) + else: + ret['comment'] = ( + 'File {0} is present on the minion, but the hash ({1}) ' + 'does not match the specified hash ({2})'.format( + full_path, local_hash, source_sum['hsum'] + ) + ) + return ret + else: + ret['result'] = True + ret['comment'] = 'File {0} is present on the minion'.format( + full_path + ) + return ret + else: + ret['comment'] = 'File {0} is not present on the minion'.format( + full_path + ) + return ret + + local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv) + + if local_copy: + # File is already cached + pre_hash = __salt__['file.get_hash']( + local_copy, + source_sum.get('hash_type', __opts__['hash_type'])) + + if not skip_verify and source_sum: + # Get the local copy's hash to compare with the hash that was + # specified via source_hash. If it matches, we can exit early from + # the state without going any further, because the file is cached + # with the correct hash. + if pre_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File is already cached to {0} with hash {1}'.format( + local_copy, pre_hash + ) + ) + else: + pre_hash = None + + # Cache the file. Note that this will not actually download the file if + # either of the following is true: + # 1. source is a salt:// URL and the fileserver determines that the hash + # of the minion's copy matches that of the fileserver. + # 2. File is remote (http(s), ftp, etc.) and the specified source_hash + # matches the cached copy. + # Remote, non salt:// sources _will_ download if a copy of the file was + # not already present in the minion cache. + try: + local_copy = __salt__['cp.cache_file']( + name, + saltenv=saltenv, + source_hash=source_sum.get('hsum')) + except Exception as exc: + ret['comment'] = exc.__str__() + return ret + + if not local_copy: + ret['comment'] = ( + 'Failed to cache {0}, check minion log for more ' + 'information'.format(name) + ) + return ret + + post_hash = __salt__['file.get_hash']( + local_copy, + source_sum.get('hash_type', __opts__['hash_type'])) + + if pre_hash != post_hash: + ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash} + + # Check the hash, if we're enforcing one. Note that this will be the first + # hash check if the file was not previously cached, and the 2nd hash check + # if it was cached and the + if not skip_verify and source_sum: + if post_hash == source_sum['hsum']: + ret['result'] = True + ret['comment'] = ( + 'File is already cached to {0} with hash {1}'.format( + local_copy, post_hash + ) + ) + else: + ret['comment'] = ( + 'File is cached to {0}, but the hash ({1}) does not match ' + 'the specified hash ({2})'.format( + local_copy, post_hash, source_sum['hsum'] + ) + ) + return ret + + # We're not enforcing a hash, and we already know that the file was + # successfully cached, so we know the state was successful. + ret['result'] = True + ret['comment'] = 'File is cached to {0}'.format(local_copy) + return ret + + +def not_cached(name, saltenv='base'): + ''' + .. versionadded:: 2017.7.3 + + Ensures that a file is saved to the minion's cache. This state is primarily + invoked by other states to ensure that we do not re-download a source file + if we do not need to. + + name + The URL of the file to be cached. To cache a file from an environment + other than ``base``, either use the ``saltenv`` argument or include the + saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``). + + .. note:: + A list of URLs is not supported, this must be a single URL. If a + local file is passed here, the state will take no action. + + saltenv + Used to specify the environment from which to download a file from the + Salt fileserver (i.e. those with ``salt://`` URL). + ''' + ret = {'changes': {}, + 'comment': '', + 'name': name, + 'result': False} + + try: + parsed = _urlparse(name) + except Exception: + ret['comment'] = 'Only URLs or local file paths are valid input' + return ret + else: + if parsed.scheme in salt.utils.files.LOCAL_PROTOS: + full_path = os.path.realpath(os.path.expanduser(parsed.path)) + ret['result'] = True + ret['comment'] = ( + 'File {0} is a local path, no action taken'.format( + full_path + ) + ) + return ret + + local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv) + + if local_copy: + try: + os.remove(local_copy) + except Exception as exc: + ret['comment'] = 'Failed to delete {0}: {1}'.format( + local_copy, exc.__str__() + ) + else: + ret['result'] = True + ret['changes']['deleted'] = True + ret['comment'] = '{0} was deleted'.format(local_copy) + else: + ret['result'] = True + ret['comment'] = '{0} is not cached'.format(name) + return ret diff --git a/salt/utils/files.py b/salt/utils/files.py index 1d7068987a..c55ac86324 100644 --- a/salt/utils/files.py +++ b/salt/utils/files.py @@ -35,10 +35,21 @@ except ImportError: log = logging.getLogger(__name__) +LOCAL_PROTOS = ('', 'file') REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3') VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS TEMPFILE_PREFIX = '__salt.tmp.' +HASHES = { + 'sha512': 128, + 'sha384': 96, + 'sha256': 64, + 'sha224': 56, + 'sha1': 40, + 'md5': 32, +} +HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) + def guess_archive_type(name): ''' @@ -538,3 +549,14 @@ def is_text_file(fp_, blocksize=512): nontext = block.translate(None, text_characters) return float(len(nontext)) / len(block) <= 0.30 + + +def remove(path): + ''' + Runs os.remove(path) and suppresses the OSError if the file doesn't exist + ''' + try: + os.remove(path) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise From 68ae5a2ec6c4f65a005b17be22013999d5f7ccbe Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 20 Sep 2017 12:02:22 -0500 Subject: [PATCH 626/639] Add documentation for using file.cached state within other states --- salt/states/file.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/salt/states/file.py b/salt/states/file.py index cedd99e624..389d5896de 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6495,6 +6495,37 @@ def cached(name, saltenv Used to specify the environment from which to download a file from the Salt fileserver (i.e. those with ``salt://`` URL). + + + This state will in most cases not be useful in SLS files, but it is useful + when writing a state or remote-execution module that needs to make sure + that a file at a given URL has been downloaded to the cachedir. One example + of this is in the :py:func:`archive.extracted ` + state: + + .. code-block:: python + + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + + This will return a dictionary containing the state's return data, including + a ``result`` key which will state whether or not the state was successful. + Note that this will not catch exceptions, so it is best used within a + try/except. + + Once this state has been run from within another state or remote-execution + module, the actual location of the cached file can be obtained using + :py:func:`cp.is_cached `: + + .. code-block:: python + + cached = __salt__['cp.is_cached'](source_match) + + This function will return the cached path of the file, or an empty string + if the file is not present in the minion cache. ''' ret = {'changes': {}, 'comment': '', From bb1d40e425f2fa86b7ce124d01e2ea0826f650de Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Wed, 20 Sep 2017 12:27:46 -0500 Subject: [PATCH 627/639] Guard against exceptions when running file.cached state within archive.extracted The code used to have a salt.state.HighState instance call the state. That method pre-dated availability of __states__ to use for executing the state function. The HighState instance handles exception catching and produces a list as output if there were errors which arose before the state was executed. Running the state function using __states__ does not give you any such protection. This commit removes the type check on the return data, as it will never be a list when run via __states__, and wraps the state function in a try/except to catch any exceptions that may be raised by invoking the file.cached state. --- salt/states/archive.py | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index d6e46e595e..24e61a3d73 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -898,22 +898,32 @@ def extracted(name, ) return ret - result = __states__['file.cached'](source_match, - source_hash=source_hash, - source_hash_name=source_hash_name, - skip_verify=skip_verify, - saltenv=__env__) - log.debug('file.cached: {0}'.format(result)) - - # Prevent a traceback if errors prevented the above state from getting - # off the ground. - if isinstance(result, list): - try: - ret['comment'] = '\n'.join(result) - except TypeError: - ret['comment'] = '\n'.join([str(x) for x in result]) + if 'file.cached' not in __states__: + # Shouldn't happen unless there is a traceback keeping + # salt/states/file.py from being processed through the loader. If + # that is the case, we have much more important problems as _all_ + # file states would be unavailable. + ret['comment'] = ( + 'Unable to cache {0}, file.cached state not available'.format( + source_match + ) + ) return ret + try: + result = __states__['file.cached'](source_match, + source_hash=source_hash, + source_hash_name=source_hash_name, + skip_verify=skip_verify, + saltenv=__env__) + except Exception as exc: + msg = 'Failed to cache {0}: {1}'.format(source_match, exc.__str__()) + log.exception(msg) + ret['comment'] = msg + return ret + else: + log.debug('file.cached: {0}'.format(result)) + if result['result']: # Get the path of the file in the minion cache cached = __salt__['cp.is_cached'](source_match) From 4d7bcff58f3de4271292de8947c497b0a92497f7 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 00:59:57 -0500 Subject: [PATCH 628/639] Rename cache_source arg to keep_source This new argument name is ambiguous as "cache" can either be interpreted as a noun or a verb. --- salt/states/archive.py | 29 +++++++++++++++++++---------- salt/states/file.py | 14 +++++++------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index 24e61a3d73..a33bc15899 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -160,7 +160,6 @@ def extracted(name, source_hash_name=None, source_hash_update=False, skip_verify=False, - cache_source=True, password=None, options=None, list_options=None, @@ -405,7 +404,7 @@ def extracted(name, .. versionadded:: 2016.3.4 - cache_source : True + keep_source : True For ``source`` archives not local to the minion (i.e. from the Salt fileserver or a remote source such as ``http(s)`` or ``ftp``), Salt will need to download the archive to the minion cache before they can @@ -415,8 +414,11 @@ def extracted(name, .. versionadded:: 2017.7.3 keep : True - .. deprecated:: 2017.7.3 - Use ``cache_source`` instead + Same as ``keep_source``. + + .. note:: + If both ``keep_source`` and ``keep`` are used, ``keep`` will be + ignored. password **For ZIP archives only.** Password used for extraction. @@ -646,13 +648,20 @@ def extracted(name, # Remove pub kwargs as they're irrelevant here. kwargs = salt.utils.args.clean_kwargs(**kwargs) - if 'keep' in kwargs: - cache_source = bool(kwargs.pop('keep')) + if 'keep_source' in kwargs and 'keep' in kwargs: ret.setdefault('warnings', []).append( - 'The \'keep\' argument has been renamed to \'cache_source\'. ' - 'Assumed cache_source={0}. Please update your SLS to get rid of ' - 'this warning.'.format(cache_source) + 'Both \'keep_source\' and \'keep\' were used. Since these both ' + 'do the same thing, \'keep\' was ignored.' ) + keep_source = bool(kwargs.pop('keep_source')) + kwargs.pop('keep') + elif 'keep_source' in kwargs: + keep_source = bool(kwargs.pop('keep_source')) + elif 'keep' in kwargs: + keep_source = bool(kwargs.pop('keep')) + else: + # Neither was passed, default is True + keep_source = True if not _path_is_abs(name): ret['comment'] = '{0} is not an absolute path'.format(name) @@ -1492,7 +1501,7 @@ def extracted(name, for item in enforce_failed: ret['comment'] += '\n- {0}'.format(item) - if not cache_source and not source_is_local: + if not keep_source and not source_is_local: log.debug('Cleaning cached source file %s', cached) result = __states__['file.not_cached'](source_match, saltenv=__env__) if not result['result']: diff --git a/salt/states/file.py b/salt/states/file.py index 389d5896de..1d89feb295 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1531,7 +1531,7 @@ def managed(name, source=None, source_hash='', source_hash_name=None, - cache_source=True, + keep_source=True, user=None, group=None, mode=None, @@ -1731,7 +1731,7 @@ def managed(name, .. versionadded:: 2016.3.5 - cache_source : True + keep_source : True Set to ``False`` to discard the cached copy of the source file once the state completes. This can be useful for larger files to keep them from taking up space in minion cache. However, keep in mind that discarding @@ -2452,7 +2452,7 @@ def managed(name, ret['changes'] = {} log.debug(traceback.format_exc()) salt.utils.files.remove(tmp_filename) - if not cache_source and sfn: + if not keep_source and sfn: salt.utils.files.remove(sfn) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) @@ -2524,7 +2524,7 @@ def managed(name, finally: if tmp_filename: salt.utils.files.remove(tmp_filename) - if not cache_source and sfn: + if not keep_source and sfn: salt.utils.files.remove(sfn) @@ -3054,7 +3054,7 @@ def directory(name, def recurse(name, source, - cache_source=True, + keep_source=True, clean=False, require=None, user=None, @@ -3087,7 +3087,7 @@ def recurse(name, located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs - cache_source : True + keep_source : True Set to ``False`` to discard the cached copy of the source file once the state completes. This can be useful for larger files to keep them from taking up space in minion cache. However, keep in mind that discarding @@ -3376,7 +3376,7 @@ def recurse(name, _ret = managed( path, source=source, - cache_source=cache_source, + keep_source=keep_source, user=user, group=group, mode='keep' if keep_mode else file_mode, From 8b237c129a959c8f504aeb83cbb8281133795366 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 01:02:19 -0500 Subject: [PATCH 629/639] Improve logging when keeping the cached source file --- salt/states/archive.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/salt/states/archive.py b/salt/states/archive.py index a33bc15899..2a1454f99d 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -1501,12 +1501,15 @@ def extracted(name, for item in enforce_failed: ret['comment'] += '\n- {0}'.format(item) - if not keep_source and not source_is_local: - log.debug('Cleaning cached source file %s', cached) - result = __states__['file.not_cached'](source_match, saltenv=__env__) - if not result['result']: - # Don't let failure to delete cached file cause the state itself ot - # fail, just drop it in the warnings. - ret.setdefault('warnings', []).append(result['comment']) + if not source_is_local: + if keep_source: + log.debug('Keeping cached source file %s', cached) + else: + log.debug('Cleaning cached source file %s', cached) + result = __states__['file.not_cached'](source_match, saltenv=__env__) + if not result['result']: + # Don't let failure to delete cached file cause the state + # itself to fail, just drop it in the warnings. + ret.setdefault('warnings', []).append(result['comment']) return ret From 100113726cc4e7c0768aaac77275999f09ddd69d Mon Sep 17 00:00:00 2001 From: rallytime Date: Thu, 21 Sep 2017 13:18:07 -0400 Subject: [PATCH 630/639] Reduce the number of days an issue is stale by 30 --- .github/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 3d5da2f3df..35928803a7 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -1,8 +1,8 @@ # Probot Stale configuration file # Number of days of inactivity before an issue becomes stale -# 1030 is approximately 2 years and 10 months -daysUntilStale: 1030 +# 1000 is approximately 2 years and 9 months +daysUntilStale: 1000 # Number of days of inactivity before a stale issue is closed daysUntilClose: 7 From 7dcac8452158fcb36c0587b317d90e1c3347308a Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 21 Sep 2017 14:05:31 -0500 Subject: [PATCH 631/639] Make git_pillar runner return None when no changes are fetched This distinguishes cases in which no changes were fetched from those when there were errors. --- salt/runners/git_pillar.py | 5 +++++ salt/utils/gitfs.py | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py index ca302ae7f8..6826268076 100644 --- a/salt/runners/git_pillar.py +++ b/salt/runners/git_pillar.py @@ -28,6 +28,11 @@ def update(branch=None, repo=None): fetched, and ``False`` if there were errors or no new commits were fetched. + .. versionchanged:: Oxygen + The return for a given git_pillar remote will now be ``None`` when no + changes were fetched. ``False`` now is reserved only for instances in + which there were errors. + Fetch one or all configured git_pillar remotes. .. note:: diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 471ce74707..a0b0b20ca1 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -1145,7 +1145,7 @@ class GitPython(GitProvider): new_objs = True cleaned = self.clean_stale_refs() - return bool(new_objs or cleaned) + return True if (new_objs or cleaned) else None def file_list(self, tgt_env): ''' @@ -1621,7 +1621,9 @@ class Pygit2(GitProvider): log.debug('%s remote \'%s\' is up-to-date', self.role, self.id) refs_post = self.repo.listall_references() cleaned = self.clean_stale_refs(local_refs=refs_post) - return bool(received_objects or refs_pre != refs_post or cleaned) + return True \ + if (received_objects or refs_pre != refs_post or cleaned) \ + else None def file_list(self, tgt_env): ''' From f5188cb886d1d8a40f8dfda637dd2adb9d51ae4e Mon Sep 17 00:00:00 2001 From: Alexander Bergmann Date: Thu, 21 Sep 2017 22:28:31 +0200 Subject: [PATCH 632/639] Extend openscap module command parsing. This commit extends the cmd_pattern with unknown parts of the parsing process. Otherwise it is not possible to customize the openscap XCCDF execution with additional parameters like --remediate. --- salt/modules/openscap.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py index 2061550012..0dfb911f4a 100644 --- a/salt/modules/openscap.py +++ b/salt/modules/openscap.py @@ -26,7 +26,7 @@ _XCCDF_MAP = { 'cmd_pattern': ( "oscap xccdf eval " "--oval-results --results results.xml --report report.html " - "--profile {0} {1}" + "--profile {0} {1} {2}" ) } } @@ -73,6 +73,7 @@ def xccdf(params): ''' params = shlex.split(params) policy = params[-1] + del params[-1] success = True error = None @@ -89,7 +90,7 @@ def xccdf(params): error = str(err) if success: - cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, policy) + cmd = _XCCDF_MAP[action]['cmd_pattern'].format(args.profile, " ".join(argv), policy) tempdir = tempfile.mkdtemp() proc = Popen( shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir) From 022a25e3fc4a6f77de4f381258db35f63791e004 Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 12:59:01 -0500 Subject: [PATCH 633/639] Mention Fedora 26 support --- salt/cloud/clouds/libvirt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index 53a8c4b659..336e44c82d 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -41,6 +41,7 @@ Example profile: master_port: 5506 Tested on: +- Fedora 26 (libvirt 3.2.1, qemu 2.9.1) - Fedora 25 (libvirt 1.3.3.2, qemu 2.6.1) - Fedora 23 (libvirt 1.2.18, qemu 2.4.1) - Centos 7 (libvirt 1.2.17, qemu 1.5.3) From ab0892c802f3c4a19c65aeef662cf5f1d51958b7 Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 15:48:07 -0500 Subject: [PATCH 634/639] Fix fire_event for destroy --- salt/cloud/clouds/libvirt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index 336e44c82d..d89b2f215f 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -517,7 +517,7 @@ def destroy(name, call=None): 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), - {'name': name}, + args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) @@ -528,7 +528,7 @@ def destroy(name, call=None): 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), - {'name': name}, + args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) From 7595f974886b75bbba4bf4a2e3132f0f031a0c14 Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 15:48:41 -0500 Subject: [PATCH 635/639] Ensure libvirt does not write to stderr by itself Install error handler and redirect stderr output to debug log. --- salt/cloud/clouds/libvirt.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index d89b2f215f..be5a683477 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -83,9 +83,6 @@ from salt.exceptions import ( SaltCloudSystemExit ) -# Get logging started -log = logging.getLogger(__name__) - VIRT_STATE_NAME_MAP = {0: 'running', 1: 'running', 2: 'running', @@ -100,6 +97,18 @@ IP_LEARNING_XML = """ __virtualname__ = 'libvirt' +# Set up logging +log = logging.getLogger(__name__) + +def libvirtErrorHandler(ctx, error): + ''' + Redirect stderr prints from libvirt to salt logging. + ''' + log.debug("libvirt error {0}".format(error)) + + +if HAS_LIBVIRT: + libvirt.registerErrorHandler(f=libvirtErrorHandler, ctx=None) def __virtual__(): ''' From b750c44224f3a39bbaddbf4926bec720cfc74e7b Mon Sep 17 00:00:00 2001 From: Ric Klaren Date: Mon, 11 Sep 2017 16:24:34 -0500 Subject: [PATCH 636/639] Fix some lint warnings --- salt/cloud/clouds/libvirt.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/cloud/clouds/libvirt.py b/salt/cloud/clouds/libvirt.py index be5a683477..c3a1b56c0e 100644 --- a/salt/cloud/clouds/libvirt.py +++ b/salt/cloud/clouds/libvirt.py @@ -100,7 +100,8 @@ __virtualname__ = 'libvirt' # Set up logging log = logging.getLogger(__name__) -def libvirtErrorHandler(ctx, error): + +def libvirt_error_handler(ctx, error): ''' Redirect stderr prints from libvirt to salt logging. ''' @@ -108,7 +109,8 @@ def libvirtErrorHandler(ctx, error): if HAS_LIBVIRT: - libvirt.registerErrorHandler(f=libvirtErrorHandler, ctx=None) + libvirt.registerErrorHandler(f=libvirt_error_handler, ctx=None) + def __virtual__(): ''' @@ -290,7 +292,7 @@ def create(vm_): validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True - log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml)) + log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml)) try: # Check for required profile parameters before sending any API calls. From 012281dea281a52dfcbb0e9b2e8663c966133af9 Mon Sep 17 00:00:00 2001 From: Damian Myerscough Date: Thu, 21 Sep 2017 22:46:03 -0700 Subject: [PATCH 637/639] Fix docstring code-blocks --- salt/cloud/clouds/digitalocean.py | 12 ++++++------ salt/modules/win_path.py | 2 +- salt/modules/zk_concurrency.py | 8 ++++---- salt/utils/boto.py | 4 ++-- salt/utils/boto3.py | 4 ++-- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/salt/cloud/clouds/digitalocean.py b/salt/cloud/clouds/digitalocean.py index 38516dee45..d5bcb4fb6f 100644 --- a/salt/cloud/clouds/digitalocean.py +++ b/salt/cloud/clouds/digitalocean.py @@ -969,7 +969,7 @@ def list_floating_ips(call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f list_floating_ips my-digitalocean-config ''' @@ -1009,7 +1009,7 @@ def show_floating_ip(kwargs=None, call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f show_floating_ip my-digitalocean-config floating_ip='45.55.96.47' ''' @@ -1042,7 +1042,7 @@ def create_floating_ip(kwargs=None, call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2' @@ -1084,7 +1084,7 @@ def delete_floating_ip(kwargs=None, call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f delete_floating_ip my-digitalocean-config floating_ip='45.55.96.47' ''' @@ -1119,7 +1119,7 @@ def assign_floating_ip(kwargs=None, call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f assign_floating_ip my-digitalocean-config droplet_id=1234567 floating_ip='45.55.96.47' ''' @@ -1152,7 +1152,7 @@ def unassign_floating_ip(kwargs=None, call=None): CLI Examples: - ... code-block:: bash + .. code-block:: bash salt-cloud -f unassign_floating_ip my-digitalocean-config floating_ip='45.55.96.47' ''' diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py index 27bba5f719..fa57f53d18 100644 --- a/salt/modules/win_path.py +++ b/salt/modules/win_path.py @@ -51,7 +51,7 @@ def rehash(): CLI Example: - ... code-block:: bash + .. code-block:: bash salt '*' win_path.rehash ''' diff --git a/salt/modules/zk_concurrency.py b/salt/modules/zk_concurrency.py index 4335a176d8..2dc0a8dbf5 100644 --- a/salt/modules/zk_concurrency.py +++ b/salt/modules/zk_concurrency.py @@ -185,7 +185,7 @@ def lock_holders(path, Example: - ... code-block: bash + .. code-block: bash salt minion zk_concurrency.lock_holders /lock/path host1:1234,host2:1234 ''' @@ -237,7 +237,7 @@ def lock(path, Example: - ... code-block: bash + .. code-block: bash salt minion zk_concurrency.lock /lock/path host1:1234,host2:1234 ''' @@ -298,7 +298,7 @@ def unlock(path, Example: - ... code-block: bash + .. code-block: bash salt minion zk_concurrency.unlock /lock/path host1:1234,host2:1234 ''' @@ -348,7 +348,7 @@ def party_members(path, Example: - ... code-block: bash + .. code-block: bash salt minion zk_concurrency.party_members /lock/path host1:1234,host2:1234 salt minion zk_concurrency.party_members /lock/path host1:1234,host2:1234 min_nodes=3 blocking=True diff --git a/salt/utils/boto.py b/salt/utils/boto.py index ee0a795ee6..86b09a8e7d 100644 --- a/salt/utils/boto.py +++ b/salt/utils/boto.py @@ -160,7 +160,7 @@ def cache_id_func(service): ''' Returns a partial `cache_id` function for the provided service. - ... code-block:: python + .. code-block:: python cache_id = __utils__['boto.cache_id_func']('ec2') cache_id('myinstance', 'i-a1b2c3') @@ -209,7 +209,7 @@ def get_connection_func(service, module=None): ''' Returns a partial `get_connection` function for the provided service. - ... code-block:: python + .. code-block:: python get_conn = __utils__['boto.get_connection_func']('ec2') conn = get_conn() diff --git a/salt/utils/boto3.py b/salt/utils/boto3.py index 29d45e0e2d..866ab7ed17 100644 --- a/salt/utils/boto3.py +++ b/salt/utils/boto3.py @@ -182,7 +182,7 @@ def cache_id_func(service): ''' Returns a partial `cache_id` function for the provided service. - ... code-block:: python + .. code-block:: python cache_id = __utils__['boto.cache_id_func']('ec2') cache_id('myinstance', 'i-a1b2c3') @@ -233,7 +233,7 @@ def get_connection_func(service, module=None): ''' Returns a partial `get_connection` function for the provided service. - ... code-block:: python + .. code-block:: python get_conn = __utils__['boto.get_connection_func']('ec2') conn = get_conn() From 43ded5413206ea906430f543ac06fb2c65bacc39 Mon Sep 17 00:00:00 2001 From: Joaquin Veira Date: Fri, 22 Sep 2017 11:43:44 +0200 Subject: [PATCH 638/639] Update zabbix_return.py corrected indentation and other suggestions by jenkins --- salt/returners/zabbix_return.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/salt/returners/zabbix_return.py b/salt/returners/zabbix_return.py index a5e79ca8e0..9969e3365c 100644 --- a/salt/returners/zabbix_return.py +++ b/salt/returners/zabbix_return.py @@ -55,22 +55,21 @@ def zbx(): return False -def zabbix_send(key, host, output): - with salt.utils.fopen(zbx()['zabbix_config'],'r') as file_handle: - for line in file_handle: - if "ServerActive" in line: - flag = "true" - server = line.rsplit('=') - server = server[1].rsplit(',') - for s in server: - cmd = zbx()['sender'] + " -z " + s.replace('\n','') + " -s " + host + " -k " + key + " -o \"" + output +"\"" - __salt__['cmd.shell'](cmd) - break - else: - flag = "false" - if flag == 'false': - cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" - file_handle.close() +def zabbix_send(key, host, output): + with salt.utils.fopen(zbx()['zabbix_config'], 'r') as file_handle: + for line in file_handle: + if "ServerActive" in line: + flag = "true" + server = line.rsplit('=') + server = server[1].rsplit(',') + for s in server: + cmd = zbx()['sender'] + " -z " + s.replace('\n', '') + " -s " + host + " -k " + key + " -o \"" + output +"\"" + __salt__['cmd.shell'](cmd) + break + else: + flag = "false" + if flag == 'false': + cmd = zbx()['sender'] + " -c " + zbx()['config'] + " -s " + host + " -k " + key + " -o \"" + output +"\"" def returner(ret): From 25a19583048921d8814634f89f7a6f9639cff51f Mon Sep 17 00:00:00 2001 From: Nathan DELHAYE Date: Fri, 22 Sep 2017 15:49:44 +0200 Subject: [PATCH 639/639] Add timeout option to elasticsearch execution module --- salt/modules/elasticsearch.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/modules/elasticsearch.py b/salt/modules/elasticsearch.py index 9be0ce20f5..9d81164383 100644 --- a/salt/modules/elasticsearch.py +++ b/salt/modules/elasticsearch.py @@ -88,6 +88,7 @@ def _get_instance(hosts=None, profile=None): ca_certs = None verify_certs = True http_auth = None + timeout = 10 if profile is None: profile = 'elasticsearch' @@ -106,6 +107,7 @@ def _get_instance(hosts=None, profile=None): verify_certs = _profile.get('verify_certs', True) username = _profile.get('username', None) password = _profile.get('password', None) + timeout = _profile.get('timeout', 10) if username and password: http_auth = (username, password) @@ -131,6 +133,7 @@ def _get_instance(hosts=None, profile=None): ca_certs=ca_certs, verify_certs=verify_certs, http_auth=http_auth, + timeout=timeout, ) else: es = elasticsearch.Elasticsearch( @@ -139,6 +142,7 @@ def _get_instance(hosts=None, profile=None): ca_certs=ca_certs, verify_certs=verify_certs, http_auth=http_auth, + timeout=timeout, ) # Try the connection