mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 08:58:59 +00:00
Merge pull request #22952 from basepi/merge-forward-2015.2
[2015.2] Merge forward from 2014.7 to 2015.2
This commit is contained in:
commit
1d9230cb8f
@ -19,17 +19,18 @@ Implemented using ctypes, so no compilation is necessary.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python libs
|
||||
# Import Python Lobs
|
||||
from __future__ import absolute_import
|
||||
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
|
||||
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
|
||||
from ctypes.util import find_library
|
||||
|
||||
# Import Salt libs
|
||||
# Import Salt Libs
|
||||
from salt.utils import get_group_list
|
||||
from salt.ext.six.moves import range
|
||||
|
||||
LIBPAM = CDLL(find_library('pam'))
|
||||
LIBC = CDLL(find_library('c'))
|
||||
LIBPAM = CDLL(find_library("pam"))
|
||||
LIBC = CDLL(find_library("c"))
|
||||
|
||||
CALLOC = LIBC.calloc
|
||||
CALLOC.restype = c_void_p
|
||||
@ -51,7 +52,7 @@ class PamHandle(Structure):
|
||||
Wrapper class for pam_handle_t
|
||||
'''
|
||||
_fields_ = [
|
||||
('handle', c_void_p)
|
||||
("handle", c_void_p)
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
@ -65,11 +66,11 @@ class PamMessage(Structure):
|
||||
'''
|
||||
_fields_ = [
|
||||
("msg_style", c_int),
|
||||
("msg", c_char_p),
|
||||
("msg", POINTER(c_char)),
|
||||
]
|
||||
|
||||
def __repr__(self):
|
||||
return '<PamMessage {0} {1!r}>'.format(self.msg_style, self.msg)
|
||||
return "<PamMessage {0:d} '{1}'>".format(self.msg_style, self.msg)
|
||||
|
||||
|
||||
class PamResponse(Structure):
|
||||
@ -77,13 +78,12 @@ class PamResponse(Structure):
|
||||
Wrapper class for pam_response structure
|
||||
'''
|
||||
_fields_ = [
|
||||
('resp', c_char_p),
|
||||
('resp_retcode', c_int),
|
||||
("resp", POINTER(c_char)),
|
||||
("resp_retcode", c_int),
|
||||
]
|
||||
|
||||
def __repr__(self):
|
||||
return '<PamResponse {0} {1!r}>'.format(self.resp_retcode, self.resp)
|
||||
|
||||
return "<PamResponse {0:d} '{1}'>".format(self.resp_retcode, self.resp)
|
||||
|
||||
CONV_FUNC = CFUNCTYPE(c_int,
|
||||
c_int, POINTER(POINTER(PamMessage)),
|
||||
@ -95,24 +95,35 @@ class PamConv(Structure):
|
||||
Wrapper class for pam_conv structure
|
||||
'''
|
||||
_fields_ = [
|
||||
('conv', CONV_FUNC),
|
||||
('appdata_ptr', c_void_p)
|
||||
("conv", CONV_FUNC),
|
||||
("appdata_ptr", c_void_p)
|
||||
]
|
||||
|
||||
|
||||
try:
|
||||
PAM_START = LIBPAM.pam_start
|
||||
PAM_START.restype = c_int
|
||||
PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv),
|
||||
POINTER(PamHandle)]
|
||||
POINTER(PamHandle)]
|
||||
|
||||
PAM_END = LIBPAM.pam_end
|
||||
PAM_END.restpe = c_int
|
||||
PAM_END.argtypes = [PamHandle, c_int]
|
||||
|
||||
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
|
||||
PAM_AUTHENTICATE.restype = c_int
|
||||
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
|
||||
|
||||
PAM_END = LIBPAM.pam_end
|
||||
PAM_END.restype = c_int
|
||||
PAM_END.argtypes = [PamHandle, c_int]
|
||||
PAM_SETCRED = LIBPAM.pam_setcred
|
||||
PAM_SETCRED.restype = c_int
|
||||
PAM_SETCRED.argtypes = [PamHandle, c_int]
|
||||
|
||||
PAM_OPEN_SESSION = LIBPAM.pam_open_session
|
||||
PAM_OPEN_SESSION.restype = c_int
|
||||
PAM_OPEN_SESSION.argtypes = [PamHandle, c_int]
|
||||
|
||||
PAM_CLOSE_SESSION = LIBPAM.pam_close_session
|
||||
PAM_CLOSE_SESSION.restype = c_int
|
||||
PAM_CLOSE_SESSION.argtypes = [PamHandle, c_int]
|
||||
except Exception:
|
||||
HAS_PAM = False
|
||||
else:
|
||||
@ -150,7 +161,7 @@ def authenticate(username, password, service='login'):
|
||||
for i in range(n_messages):
|
||||
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
|
||||
pw_copy = STRDUP(str(password))
|
||||
p_response.contents[i].resp = cast(pw_copy, c_char_p)
|
||||
p_response.contents[i].resp = pw_copy
|
||||
p_response.contents[i].resp_retcode = 0
|
||||
return 0
|
||||
|
||||
@ -165,7 +176,26 @@ def authenticate(username, password, service='login'):
|
||||
return False
|
||||
|
||||
retval = PAM_AUTHENTICATE(handle, 0)
|
||||
PAM_END(handle, 0)
|
||||
if retval != 0:
|
||||
PAM_END(handle, retval)
|
||||
return False
|
||||
|
||||
retval = PAM_SETCRED(handle, 0)
|
||||
if retval != 0:
|
||||
PAM_END(handle, retval)
|
||||
return False
|
||||
|
||||
retval = PAM_OPEN_SESSION(handle, 0)
|
||||
if retval != 0:
|
||||
PAM_END(handle, retval)
|
||||
return False
|
||||
|
||||
retval = PAM_CLOSE_SESSION(handle, 0)
|
||||
if retval != 0:
|
||||
PAM_END(handle, retval)
|
||||
return False
|
||||
|
||||
retval = PAM_END(handle, retval)
|
||||
return retval == 0
|
||||
|
||||
|
||||
|
@ -33,6 +33,7 @@ import salt.client
|
||||
import salt.loader
|
||||
import salt.utils
|
||||
import salt.utils.cloud
|
||||
import salt.syspaths
|
||||
from salt.utils import context
|
||||
from salt.ext.six import string_types
|
||||
from salt.template import compile_template
|
||||
@ -1327,12 +1328,19 @@ class Cloud(object):
|
||||
ret = {}
|
||||
if not vm_overrides:
|
||||
vm_overrides = {}
|
||||
|
||||
with salt.utils.fopen(os.path.join(salt.syspaths.CONFIG_DIR, 'cloud'), 'r') as mcc:
|
||||
main_cloud_config = yaml.safe_load(mcc)
|
||||
|
||||
profile_details = self.opts['profiles'][profile]
|
||||
alias, driver = profile_details['provider'].split(':')
|
||||
mapped_providers = self.map_providers_parallel()
|
||||
alias_data = mapped_providers.setdefault(alias, {})
|
||||
vms = alias_data.setdefault(driver, {})
|
||||
|
||||
provider_details = self.opts['providers'][alias][driver].copy()
|
||||
del provider_details['profiles']
|
||||
|
||||
for name in names:
|
||||
name_exists = False
|
||||
if name in vms:
|
||||
@ -1349,8 +1357,11 @@ class Cloud(object):
|
||||
ret[name] = {'Error': msg}
|
||||
continue
|
||||
|
||||
vm_ = profile_details.copy()
|
||||
vm_ = main_cloud_config.copy()
|
||||
vm_.update(provider_details)
|
||||
vm_.update(profile_details)
|
||||
vm_.update(vm_overrides)
|
||||
|
||||
vm_['name'] = name
|
||||
if self.opts['parallel']:
|
||||
process = multiprocessing.Process(
|
||||
|
@ -63,6 +63,14 @@ def __virtual__():
|
||||
return True
|
||||
|
||||
|
||||
def _is_valid_resource(_type):
|
||||
if _type in ('A', 'CNAME', 'MX'):
|
||||
return True
|
||||
else:
|
||||
log.error('{0} is an unsupported resource type.'.format(_type))
|
||||
return False
|
||||
|
||||
|
||||
def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
|
||||
keyid=None, profile=None):
|
||||
'''
|
||||
@ -82,36 +90,28 @@ def get_record(name, zone, record_type, fetch_all=False, region=None, key=None,
|
||||
return None
|
||||
_type = record_type.upper()
|
||||
ret = odict.OrderedDict()
|
||||
|
||||
if not _is_valid_resource(_type):
|
||||
return None
|
||||
|
||||
if _type == 'A':
|
||||
_record = _zone.get_a(name, fetch_all)
|
||||
if _record:
|
||||
ret['name'] = _record.name
|
||||
ret['value'] = _record.to_print()
|
||||
ret['record_type'] = _record.type
|
||||
ret['ttl'] = _record.ttl
|
||||
elif _type == 'CNAME':
|
||||
_record = _zone.get_cname(name, fetch_all)
|
||||
if _record:
|
||||
ret['name'] = _record.name
|
||||
ret['value'] = _record.to_print()
|
||||
ret['record_type'] = _record.type
|
||||
ret['ttl'] = _record.ttl
|
||||
elif _type == 'MX':
|
||||
_record = _zone.get_mx(name, fetch_all)
|
||||
if _record:
|
||||
ret['name'] = _record.name
|
||||
ret['value'] = _record.to_print()
|
||||
ret['record_type'] = _record.type
|
||||
ret['ttl'] = _record.ttl
|
||||
else:
|
||||
msg = '{0} is an unsupported resource type.'.format(_type)
|
||||
log.error(msg)
|
||||
return None
|
||||
|
||||
if _record:
|
||||
ret['name'] = _record.name
|
||||
ret['value'] = _record.to_print()
|
||||
ret['record_type'] = _record.type
|
||||
ret['ttl'] = _record.ttl
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def add_record(name, value, zone, record_type, identifier=None, ttl=None,
|
||||
region=None, key=None, keyid=None, profile=None):
|
||||
region=None, key=None, keyid=None, profile=None, sync_wait=False):
|
||||
'''
|
||||
Add a record to a zone.
|
||||
|
||||
@ -128,24 +128,29 @@ def add_record(name, value, zone, record_type, identifier=None, ttl=None,
|
||||
log.error(msg)
|
||||
return False
|
||||
_type = record_type.upper()
|
||||
|
||||
if not _is_valid_resource(_type):
|
||||
return False
|
||||
|
||||
if _type == 'A':
|
||||
status = _zone.add_a(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'CNAME':
|
||||
status = _zone.add_cname(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'MX':
|
||||
status = _zone.add_mx(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
|
||||
if sync_wait:
|
||||
if _wait_for_sync(status.id, conn):
|
||||
return True
|
||||
else:
|
||||
log.error('Failed to add route53 record {0}.'.format(name))
|
||||
return False
|
||||
else:
|
||||
msg = '{0} is an unsupported resource type.'.format(_type)
|
||||
log.error(msg)
|
||||
log.error('Failed to add route53 record {0}.'.format(name))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def update_record(name, value, zone, record_type, identifier=None, ttl=None,
|
||||
region=None, key=None, keyid=None, profile=None):
|
||||
region=None, key=None, keyid=None, profile=None, sync_wait=False):
|
||||
'''
|
||||
Modify a record in a zone.
|
||||
|
||||
@ -162,24 +167,29 @@ def update_record(name, value, zone, record_type, identifier=None, ttl=None,
|
||||
log.error(msg)
|
||||
return False
|
||||
_type = record_type.upper()
|
||||
|
||||
if not _is_valid_resource(_type):
|
||||
return False
|
||||
|
||||
if _type == 'A':
|
||||
status = _zone.update_a(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'CNAME':
|
||||
status = _zone.update_cname(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'MX':
|
||||
status = _zone.update_mx(name, value, ttl, identifier)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
|
||||
if sync_wait:
|
||||
if _wait_for_sync(status.id, conn):
|
||||
return True
|
||||
else:
|
||||
log.error('Failed to update route53 record {0}.'.format(name))
|
||||
return False
|
||||
else:
|
||||
msg = '{0} is an unsupported resource type.'.format(_type)
|
||||
log.error(msg)
|
||||
log.error('Failed to update route53 record {0}.'.format(name))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def delete_record(name, zone, record_type, identifier=None, all_records=False,
|
||||
region=None, key=None, keyid=None, profile=None):
|
||||
region=None, key=None, keyid=None, profile=None, sync_wait=False):
|
||||
'''
|
||||
Modify a record in a zone.
|
||||
|
||||
@ -196,32 +206,38 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
|
||||
log.error(msg)
|
||||
return False
|
||||
_type = record_type.upper()
|
||||
|
||||
if not _is_valid_resource(_type):
|
||||
return False
|
||||
|
||||
if _type == 'A':
|
||||
status = _zone.delete_a(name, identifier, all_records)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'CNAME':
|
||||
status = _zone.delete_cname(name, identifier, all_records)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
elif _type == 'MX':
|
||||
status = _zone.delete_mx(name, identifier, all_records)
|
||||
return _wait_for_sync(status.id, conn)
|
||||
|
||||
if sync_wait:
|
||||
if _wait_for_sync(status.id, conn):
|
||||
return True
|
||||
else:
|
||||
log.error('Failed to delete route53 record {0}.'.format(name))
|
||||
return False
|
||||
else:
|
||||
msg = '{0} is an unsupported resource type.'.format(_type)
|
||||
log.error(msg)
|
||||
log.error('Failed to delete route53 record {0}.'.format(name))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _wait_for_sync(status, conn):
|
||||
retry = 30
|
||||
retry = 10
|
||||
i = 0
|
||||
while i < retry:
|
||||
log.info('Getting route53 status (attempt {0})'.format(i + 1))
|
||||
change = conn.get_change(status)
|
||||
log.debug(change.GetChangeResponse.ChangeInfo.Status)
|
||||
if change.GetChangeResponse.ChangeInfo.Status == 'INSYNC':
|
||||
return True
|
||||
i = i + 1
|
||||
time.sleep(10)
|
||||
time.sleep(20)
|
||||
log.error('Timed out waiting for Route53 status update.')
|
||||
return False
|
||||
|
||||
|
@ -1767,14 +1767,22 @@ def _run_wrapper(status, container, func, cmd, *args, **kwargs):
|
||||
container_id = container_info['Id']
|
||||
if driver.startswith('lxc-'):
|
||||
full_cmd = 'lxc-attach -n {0} -- {1}'.format(container_id, cmd)
|
||||
elif driver.startswith('native-') and HAS_NSENTER:
|
||||
# http://jpetazzo.github.io/2014/03/23/lxc-attach-nsinit-nsenter-docker-0-9/
|
||||
container_pid = container_info['State']['Pid']
|
||||
if container_pid == 0:
|
||||
_invalid(status, id_=container, comment='Container is not running')
|
||||
return status
|
||||
full_cmd = ('nsenter --target {pid} --mount --uts --ipc --net --pid'
|
||||
' -- {cmd}'.format(pid=container_pid, cmd=cmd))
|
||||
elif driver.startswith('native-'):
|
||||
if HAS_NSENTER:
|
||||
# http://jpetazzo.github.io/2014/03/23/lxc-attach-nsinit-nsenter-docker-0-9/
|
||||
container_pid = container_info['State']['Pid']
|
||||
if container_pid == 0:
|
||||
_invalid(status, id_=container,
|
||||
comment='Container is not running')
|
||||
return status
|
||||
full_cmd = (
|
||||
'nsenter --target {pid} --mount --uts --ipc --net --pid'
|
||||
' -- {cmd}'.format(pid=container_pid, cmd=cmd)
|
||||
)
|
||||
else:
|
||||
raise CommandExecutionError(
|
||||
'nsenter is not installed on the minion, cannot run command'
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'
|
||||
|
@ -3686,12 +3686,16 @@ def makedirs_(path,
|
||||
|
||||
if os.path.isdir(dirname):
|
||||
# There's nothing for us to do
|
||||
return 'Directory {0!r} already exists'.format(dirname)
|
||||
msg = 'Directory {0!r} already exists'.format(dirname)
|
||||
log.debug(msg)
|
||||
return msg
|
||||
|
||||
if os.path.exists(dirname):
|
||||
return 'The path {0!r} already exists and is not a directory'.format(
|
||||
msg = 'The path {0!r} already exists and is not a directory'.format(
|
||||
dirname
|
||||
)
|
||||
log.debug(msg)
|
||||
return msg
|
||||
|
||||
directories_to_create = []
|
||||
while True:
|
||||
@ -3705,6 +3709,7 @@ def makedirs_(path,
|
||||
directories_to_create.reverse()
|
||||
for directory_to_create in directories_to_create:
|
||||
# all directories have the user, group and mode set!!
|
||||
log.debug('Creating directory: %s', directory_to_create)
|
||||
mkdir(directory_to_create, user=user, group=group, mode=mode)
|
||||
|
||||
|
||||
|
@ -42,7 +42,7 @@ def _check_pkgin():
|
||||
|
||||
|
||||
@decorators.memoize
|
||||
def _supports_regex():
|
||||
def _get_version():
|
||||
'''
|
||||
Get the pkgin version
|
||||
'''
|
||||
@ -58,7 +58,25 @@ def _supports_regex():
|
||||
if not version_match:
|
||||
return False
|
||||
|
||||
return tuple([int(i) for i in version_match.group(1).split('.')]) > (0, 5)
|
||||
return version_match.group(1).split('.')
|
||||
|
||||
|
||||
@decorators.memoize
|
||||
def _supports_regex():
|
||||
'''
|
||||
Check support of regexp
|
||||
'''
|
||||
|
||||
return tuple([int(i) for i in _get_version()]) > (0, 5)
|
||||
|
||||
|
||||
@decorators.memoize
|
||||
def _supports_parsing():
|
||||
'''
|
||||
Check support of parsing
|
||||
'''
|
||||
|
||||
return tuple([int(i) for i in _get_version()]) > (0, 7)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -75,7 +93,7 @@ def __virtual__():
|
||||
def _splitpkg(name):
|
||||
# name is in the format foobar-1.0nb1, already space-splitted
|
||||
if name[0].isalnum() and name != 'No': # avoid < > = and 'No result'
|
||||
return name.rsplit('-', 1)
|
||||
return name.split(';', 1)[0].rsplit('-', 1)
|
||||
|
||||
|
||||
def search(pkg_name):
|
||||
@ -240,7 +258,10 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
out = __salt__['cmd.run'](pkg_command, output_loglevel='trace')
|
||||
for line in out.splitlines():
|
||||
try:
|
||||
pkg, ver = line.split(' ')[0].rsplit('-', 1)
|
||||
if _supports_parsing():
|
||||
pkg, ver = line.split(';', 1)[0].rsplit('-', 1)
|
||||
else:
|
||||
pkg, ver = line.split(' ', 1)[0].rsplit('-', 1)
|
||||
except ValueError:
|
||||
continue
|
||||
__salt__['pkg_resource.add_pkg'](ret, pkg, ver)
|
||||
|
@ -131,12 +131,15 @@ def add(name,
|
||||
# /etc/usermgmt.conf not present: defaults will be used
|
||||
pass
|
||||
|
||||
if createhome:
|
||||
cmd.append('-m')
|
||||
elif (createhome is False
|
||||
and __grains__['kernel'] != 'NetBSD'
|
||||
and __grains__['kernel'] != 'OpenBSD'):
|
||||
cmd.append('-M')
|
||||
if isinstance(createhome, bool):
|
||||
if createhome:
|
||||
cmd.append('-m')
|
||||
elif (__grains__['kernel'] != 'NetBSD'
|
||||
and __grains__['kernel'] != 'OpenBSD'):
|
||||
cmd.append('-M')
|
||||
else:
|
||||
log.error('Value passes to ``createhome`` must be a boolean')
|
||||
return False
|
||||
|
||||
if home is not None:
|
||||
cmd.extend(['-d', home])
|
||||
@ -310,8 +313,8 @@ def chshell(name, shell):
|
||||
|
||||
def chhome(name, home, persist=False):
|
||||
'''
|
||||
Change the home directory of the user, pass True for persist to copy files
|
||||
to the new home dir
|
||||
Change the home directory of the user, pass True for persist to move files
|
||||
to the new home directory if the old home directory exist.
|
||||
|
||||
CLI Example:
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
'''
|
||||
Clone a remote git repository and use the filesystem as a Pillar source
|
||||
|
||||
Currently GitPython is the only supported provider for git Pillars
|
||||
|
||||
This external Pillar source can be configured in the master config file like
|
||||
so:
|
||||
|
||||
|
@ -128,8 +128,9 @@ def lookup_jid(jid,
|
||||
else:
|
||||
ret[minion] = data[minion].get('return')
|
||||
if missing:
|
||||
load = mminion.returners['{0}.get_load'.format(returner)](jid)
|
||||
ckminions = salt.utils.minions.CkMinions(__opts__)
|
||||
exp = ckminions.check_minions(data['tgt'], data['tgt_type'])
|
||||
exp = ckminions.check_minions(load['tgt'], load['tgt_type'])
|
||||
for minion_id in exp:
|
||||
if minion_id not in data:
|
||||
ret[minion_id] = 'Minion did not return'
|
||||
|
@ -2284,7 +2284,7 @@ class BaseHighState(object):
|
||||
),
|
||||
self.state.rend,
|
||||
self.state.opts['renderer'],
|
||||
env=self.opts['environment']
|
||||
saltenv=self.opts['environment']
|
||||
)
|
||||
]
|
||||
else:
|
||||
|
@ -99,7 +99,7 @@ Available Functions
|
||||
|
||||
/finish-install.sh:
|
||||
docker.run:
|
||||
- container: mysuperdocker
|
||||
- cid: mysuperdocker
|
||||
- unless: grep -q something /var/log/foo
|
||||
- docker_unless: grep -q done /install_log
|
||||
|
||||
|
@ -2372,16 +2372,64 @@ def replace(name,
|
||||
backup='.bak',
|
||||
show_changes=True):
|
||||
r'''
|
||||
Maintain an edit in a file
|
||||
Maintain an edit in a file.
|
||||
|
||||
.. versionadded:: 0.17.0
|
||||
|
||||
Params are identical to the remote execution function :mod:`file.replace
|
||||
<salt.modules.file.replace>`.
|
||||
name
|
||||
Filesystem path to the file to be edited.
|
||||
|
||||
For complex regex patterns it can be useful to avoid the need for complex
|
||||
quoting and escape sequences by making use of YAML's multiline string
|
||||
syntax.
|
||||
pattern
|
||||
Python's `regular expression search<https://docs.python.org/2/library/re.html>`_.
|
||||
|
||||
repl
|
||||
The replacement text.
|
||||
|
||||
count
|
||||
Maximum number of pattern occurrences to be replaced.
|
||||
|
||||
flags
|
||||
A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`.
|
||||
Each list item should be a string that will correlate to the human-friendly flag name.
|
||||
E.g., ``['IGNORECASE', 'MULTILINE']``. Note: multiline searches must specify ``file``
|
||||
as the ``bufsize`` argument below. Defaults to 0 and can be a list or an int.
|
||||
|
||||
bufsize
|
||||
How much of the file to buffer into memory at once. The default value ``1`` processes
|
||||
one line at a time. The special value ``file`` may be specified which will read the
|
||||
entire file into memory before processing. Note: multiline searches must specify ``file``
|
||||
buffering. Can be an int or a str.
|
||||
|
||||
append_if_not_found
|
||||
If pattern is not found and set to ``True`` then, the content will be appended to the file.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
prepend_if_not_found
|
||||
If pattern is not found and set to ``True`` then, the content will be prepended to the file.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
not_found_content
|
||||
Content to use for append/prepend if not found. If ``None`` (default), uses ``repl``. Useful
|
||||
when ``repl`` uses references to group in pattern.
|
||||
|
||||
.. versionadded:: 2014.7.0
|
||||
|
||||
backup
|
||||
The file extension to use for a backup of the file before editing. Set to ``False`` to skip
|
||||
making a backup.
|
||||
|
||||
show_changes
|
||||
Output a unified diff of the old file and the new file. If ``False`` return a boolean if any
|
||||
changes were made. Returns a boolean or a string.
|
||||
|
||||
.. note:
|
||||
Using this option will store two copies of the file in-memory (the original version and
|
||||
the edited version) in order to generate the diff.
|
||||
|
||||
For complex regex patterns it can be useful to avoid the need for complex quoting and escape
|
||||
sequences by making use of YAML's multiline string syntax.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
@ -102,7 +102,16 @@ def user_present(name,
|
||||
Availability state for this user
|
||||
|
||||
roles
|
||||
The roles the user should have under tenants
|
||||
The roles the user should have under given tenants.
|
||||
Passed as a dictionary mapping tenant names to a list
|
||||
of roles in this tenant, i.e.::
|
||||
|
||||
roles:
|
||||
admin: # tenant
|
||||
- admin # role
|
||||
service:
|
||||
- admin
|
||||
- Member
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
@ -173,11 +182,11 @@ def user_present(name,
|
||||
ret['comment'] = 'User "{0}" has been updated'.format(name)
|
||||
ret['changes']['Password'] = 'Updated'
|
||||
if roles:
|
||||
for tenant_role in roles:
|
||||
for tenant in roles.keys():
|
||||
args = dict({'user_name': name, 'tenant_name':
|
||||
tenant_role, 'profile': profile}, **connection_args)
|
||||
tenant, 'profile': profile}, **connection_args)
|
||||
tenant_roles = __salt__['keystone.user_role_list'](**args)
|
||||
for role in roles[tenant_role]:
|
||||
for role in roles[tenant]:
|
||||
if role not in tenant_roles:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
@ -187,7 +196,7 @@ def user_present(name,
|
||||
ret['changes']['roles'] = [role]
|
||||
continue
|
||||
addargs = dict({'user': name, 'role': role,
|
||||
'tenant': tenant_role,
|
||||
'tenant': tenant,
|
||||
'profile': profile},
|
||||
**connection_args)
|
||||
newrole = __salt__['keystone.user_role_add'](**addargs)
|
||||
@ -195,7 +204,7 @@ def user_present(name,
|
||||
ret['changes']['roles'].append(newrole)
|
||||
else:
|
||||
ret['changes']['roles'] = [newrole]
|
||||
roles_to_remove = list(set(tenant_roles) - set(roles[tenant_role]))
|
||||
roles_to_remove = list(set(tenant_roles) - set(roles[tenant]))
|
||||
for role in roles_to_remove:
|
||||
if __opts__['test']:
|
||||
ret['result'] = None
|
||||
@ -205,7 +214,7 @@ def user_present(name,
|
||||
ret['changes']['roles'] = [role]
|
||||
continue
|
||||
addargs = dict({'user': name, 'role': role,
|
||||
'tenant': tenant_role,
|
||||
'tenant': tenant,
|
||||
'profile': profile},
|
||||
**connection_args)
|
||||
oldrole = __salt__['keystone.user_role_remove'](**addargs)
|
||||
@ -228,11 +237,11 @@ def user_present(name,
|
||||
profile=profile,
|
||||
**connection_args)
|
||||
if roles:
|
||||
for tenant_role in roles:
|
||||
for role in roles[tenant_role]:
|
||||
for tenant in roles.keys():
|
||||
for role in roles[tenant]:
|
||||
__salt__['keystone.user_role_add'](user=name,
|
||||
role=role,
|
||||
tenant=tenant_role,
|
||||
tenant=tenant,
|
||||
profile=profile,
|
||||
**connection_args)
|
||||
ret['comment'] = 'Keystone user {0} has been added'.format(name)
|
||||
|
@ -109,8 +109,10 @@ def _changes(name,
|
||||
if home:
|
||||
if lusr['home'] != home:
|
||||
change['home'] = home
|
||||
if createhome and not os.path.isdir(home):
|
||||
change['homeDoesNotExist'] = home
|
||||
if createhome:
|
||||
newhome = home if home else lusr['home']
|
||||
if not os.path.isdir(newhome):
|
||||
change['homeDoesNotExist'] = newhome
|
||||
|
||||
if shell:
|
||||
if lusr['shell'] != shell:
|
||||
@ -220,7 +222,10 @@ def present(name,
|
||||
the state, Default is ``True``.
|
||||
|
||||
home
|
||||
The location of the home directory to manage
|
||||
The custom login directory of user. Uses default value of underlying
|
||||
system if not set. Notice that this directory does not have to exists.
|
||||
This also the location of the home directory to create if createhome is
|
||||
set to True.
|
||||
|
||||
createhome
|
||||
If False, the home directory will not be created if it doesn't exist.
|
||||
@ -390,11 +395,14 @@ def present(name,
|
||||
if key == 'date':
|
||||
__salt__['shadow.set_date'](name, date)
|
||||
continue
|
||||
if key == 'home' or key == 'homeDoesNotExist':
|
||||
if createhome:
|
||||
__salt__['user.chhome'](name, val, True)
|
||||
else:
|
||||
__salt__['user.chhome'](name, val, False)
|
||||
# run chhome once to avoid any possible bad side-effect
|
||||
if key == 'home' and 'homeDoesNotExist' not in changes:
|
||||
__salt__['user.chhome'](name, val, False)
|
||||
continue
|
||||
if key == 'homeDoesNotExist':
|
||||
__salt__['user.chhome'](name, val, True)
|
||||
if not os.path.isdir(val):
|
||||
__salt__['file.mkdir'](val, pre['uid'], pre['gid'], 0755)
|
||||
continue
|
||||
if key == 'mindays':
|
||||
__salt__['shadow.set_mindays'](name, mindays)
|
||||
|
@ -339,7 +339,7 @@ class PipStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
# Let's install a fixed version pip over whatever pip was
|
||||
# previously installed
|
||||
ret = self.run_function(
|
||||
'pip.install', ['pip==1.3.1'], upgrade=True,
|
||||
'pip.install', ['pip==6.0'], upgrade=True,
|
||||
ignore_installed=True,
|
||||
bin_env=venv_dir
|
||||
)
|
||||
@ -354,15 +354,15 @@ class PipStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
pprint.pprint(ret)
|
||||
raise
|
||||
|
||||
# Le't make sure we have pip 1.3.1 installed
|
||||
# Le't make sure we have pip 6.0 installed
|
||||
self.assertEqual(
|
||||
self.run_function('pip.list', ['pip'], bin_env=venv_dir),
|
||||
{'pip': '1.3.1'}
|
||||
{'pip': '6.0'}
|
||||
)
|
||||
|
||||
# Now the actual pip upgrade pip test
|
||||
ret = self.run_state(
|
||||
'pip.installed', name='pip==1.4.1', upgrade=True,
|
||||
'pip.installed', name='pip==6.0.7', upgrade=True,
|
||||
bin_env=venv_dir
|
||||
)
|
||||
try:
|
||||
@ -370,7 +370,7 @@ class PipStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
|
||||
self.assertInSaltReturn(
|
||||
'Installed',
|
||||
ret,
|
||||
['changes', 'pip==1.4.1']
|
||||
['changes', 'pip==6.0.7']
|
||||
)
|
||||
except AssertionError:
|
||||
import pprint
|
||||
|
@ -61,6 +61,27 @@ class UserTest(integration.ModuleCase,
|
||||
ret = self.run_state('user.absent', name='salt_test')
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
@destructiveTest
|
||||
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
|
||||
def test_user_present_when_home_dir_does_not_18843(self):
|
||||
'''
|
||||
This is a DESTRUCTIVE TEST it creates a new user on the minion.
|
||||
And then destroys that user.
|
||||
Assume that it will break any system you run it on.
|
||||
'''
|
||||
HOMEDIR = '/tmp/home_of_salt_test'
|
||||
ret = self.run_state('user.present', name='salt_test',
|
||||
home=HOMEDIR)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
self.run_function('file.absent', name=HOMEDIR)
|
||||
ret = self.run_state('user.present', name='salt_test',
|
||||
home=HOMEDIR)
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
ret = self.run_state('user.absent', name='salt_test')
|
||||
self.assertSaltTrueReturn(ret)
|
||||
|
||||
@destructiveTest
|
||||
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
|
||||
def test_user_present_nondefault(self):
|
||||
|
Loading…
Reference in New Issue
Block a user