mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge remote-tracking branch 'upstream/develop' into sam_raet_2
Conflicts: salt/transport/road/raet/stacking.py salt/transport/road/raet/test/test_stacking.py
This commit is contained in:
commit
ebae5afc64
@ -213,6 +213,9 @@ Edit the minion config file:
|
||||
4. Uncomment and change the ``id:`` value to something descriptive like
|
||||
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
||||
which Salt installation you are working with.
|
||||
5. If you changed the ``ret_port`` value in the master config because you are
|
||||
also running a non-development version of Salt, then you will have to
|
||||
change the ``master_port`` value in the minion config to match.
|
||||
|
||||
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
||||
|
||||
|
20
doc/_themes/saltstack/layout.html
vendored
20
doc/_themes/saltstack/layout.html
vendored
@ -233,27 +233,23 @@
|
||||
<div class="row-fluid">
|
||||
<div class="footerCol">
|
||||
<h4>About Us</h4>
|
||||
<a href="http://saltstack.com/about.html">SaltStack</a>
|
||||
<a href="http://saltstack.com/about.html">Leadership</a>
|
||||
<a href="http://saltstack.com/">SaltStack</a>
|
||||
<a href="http://saltstack.com/about/">Leadership</a>
|
||||
</div>
|
||||
<div class="footerCol">
|
||||
<h4>Products</h4>
|
||||
<a href="http://saltstack.com/products.html">Remote Execution</a>
|
||||
<a href="http://saltstack.com/products.html">Config Management</a>
|
||||
<a href="http://saltstack.com/products.html">Cloud Management</a>
|
||||
<a href="http://saltstack.com/products.html">SaltStack Solutions</a>
|
||||
<a href="http://saltstack.com/enterprise/">Enterprise</a>
|
||||
<a href="http://saltstack.com/services/">Integration</a>
|
||||
</div>
|
||||
<div class="footerCol">
|
||||
<h4>Services</h4>
|
||||
<a href="http://saltstack.com/services.html">Onsite Training</a>
|
||||
<a href="http://saltstack.com/services.html">Regional Training</a>
|
||||
<a href="http://saltstack.com/services.html">Custom Professional Services</a>
|
||||
<a href="http://saltstack.com/services.html">Training Dates and Locations</a>
|
||||
<a href="http://saltstack.com/training/">Onsite Training</a>
|
||||
<a href="http://saltstack.com/services/">Custom Professional Services</a>
|
||||
</div>
|
||||
<div class="footerCol">
|
||||
<h4>Contact Us</h4>
|
||||
<a href="http://saltstack.com/contact.html">Support</a>
|
||||
<a href="http://saltstack.com/contact.html">Contact us</a>
|
||||
<a href="http://saltstack.com/contact/">Support</a>
|
||||
<a href="http://saltstack.com/contact/">Contact us</a>
|
||||
</div>
|
||||
<div class="footerCol">
|
||||
<h4>Community</h4>
|
||||
|
@ -37,7 +37,10 @@ Set up the provider config at ``/etc/salt/cloud.providers.d/azure.conf``:
|
||||
minion:
|
||||
master: saltmaster.example.com
|
||||
|
||||
provider: gce
|
||||
provider: azure
|
||||
|
||||
# Optional
|
||||
management_host: management.core.windows.net
|
||||
|
||||
The certificate used must be generated by the user. OpenSSL can be used to
|
||||
create the management certificates. Two certificates are needed: a .cer file,
|
||||
@ -58,6 +61,9 @@ To create the .cer file, execute the following command:
|
||||
After you creating these files, the .cer file will need to be uploaded to
|
||||
Azure via the "Upload" action of the "Settings" tab of the management portal.
|
||||
|
||||
Optionally, a ``management_host`` may be configured, if necessary for your
|
||||
region.
|
||||
|
||||
|
||||
Cloud Profiles
|
||||
==============
|
||||
|
@ -43,6 +43,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
|
||||
image: Ubuntu 12.10 x64
|
||||
size: 512MB
|
||||
location: New York 1
|
||||
private_networking: True
|
||||
backups_enabled: True
|
||||
|
||||
Sizes can be obtained using the ``--list-sizes`` option for the ``salt-cloud``
|
||||
command:
|
||||
|
@ -27,7 +27,7 @@ Split Package
|
||||
Salt Should always be split in a standard way, with standard dependencies, this lowers
|
||||
cross distribution confusion about what components are going to be shipped with
|
||||
specific packages. These packages can be defined from the Salt Source as of
|
||||
Salt 0.17.0:
|
||||
Salt 2014.1.0:
|
||||
|
||||
Salt Common
|
||||
-----------
|
||||
@ -177,6 +177,7 @@ Files
|
||||
|
||||
- `scripts/salt-ssh`
|
||||
- `man/salt-ssh.1`
|
||||
- `conf/cloud*`
|
||||
|
||||
Depends
|
||||
~~~~~~~
|
||||
@ -185,6 +186,31 @@ Depends
|
||||
- `sshpass`
|
||||
- `Python MessagePack` (Messagepack C lib, or msgpack-pure)
|
||||
|
||||
Salt Cloud
|
||||
----------
|
||||
|
||||
As of Salt 2014.1.0 Salt Cloud is included in the same repo as Salt. This
|
||||
can be split out into a separate package or it can be included in the
|
||||
salt-master package.
|
||||
|
||||
Name
|
||||
~~~~
|
||||
|
||||
- `salt-cloud`
|
||||
|
||||
Files
|
||||
~~~~~
|
||||
|
||||
- `scripts/salt-cloud`
|
||||
- `man/salt-cloud.1`
|
||||
|
||||
Depends
|
||||
~~~~~~~
|
||||
|
||||
- `Salt Common`
|
||||
- `sshpass`
|
||||
- `apache libcloud`
|
||||
|
||||
Salt Doc
|
||||
--------
|
||||
|
||||
|
@ -267,6 +267,9 @@ Edit the minion config file:
|
||||
4. Uncomment and change the ``id:`` value to something descriptive like
|
||||
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
||||
which Salt installation you are working with.
|
||||
5. If you changed the ``ret_port`` value in the master config because you are
|
||||
also running a non-development version of Salt, then you will have to
|
||||
change the ``master_port`` value in the minion config to match.
|
||||
|
||||
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
Preseed Minion with Accepted Key
|
||||
=================================
|
||||
|
||||
In some situations, it is not convenient to wait for a minion to start before
|
||||
accepting its key on the master. For instance, you may want the minion to
|
||||
bootstrap itself as soon as it comes online. You may also want to to let your
|
||||
In some situations, it is not convenient to wait for a minion to start before
|
||||
accepting its key on the master. For instance, you may want the minion to
|
||||
bootstrap itself as soon as it comes online. You may also want to to let your
|
||||
developers provision new development machines on the fly.
|
||||
|
||||
There is a general four step process to do this:
|
||||
@ -23,23 +23,22 @@ Pick a name for the key, such as the minion's id.
|
||||
|
||||
root@saltmaster# cp key_name.pub /etc/salt/pki/master/minions/[minion_id]
|
||||
|
||||
It is necessary that the public key file has the same name as your minion id.
|
||||
This is how Salt matches minions with their keys. Also note that the pki folder
|
||||
could be in a different location, depending on your OS or if specified in the
|
||||
It is necessary that the public key file has the same name as your minion id.
|
||||
This is how Salt matches minions with their keys. Also note that the pki folder
|
||||
could be in a different location, depending on your OS or if specified in the
|
||||
master config file.
|
||||
|
||||
3. Distribute the minion keys.
|
||||
|
||||
There is no single method to get the keypair to your minion. If you are
|
||||
spooling up minions on EC2, you could pass them in using user_data or a
|
||||
cloud-init script. If you are handing them off to a team of developers for
|
||||
provisioning dev machines, you will need a secure file transfer.
|
||||
There is no single method to get the keypair to your minion. The difficulty is
|
||||
finding a distribution method which is secure.
|
||||
|
||||
.. admonition:: Security Warning
|
||||
|
||||
Since the minion key is already accepted on the master, distributing
|
||||
the private key poses a potential security risk. A malicious party
|
||||
will have access to your entire state tree and other sensitive data.
|
||||
Since the minion key is already accepted on the master, distributing
|
||||
the private key poses a potential security risk. A malicious party
|
||||
will have access to your entire state tree and other sensitive data if they
|
||||
gain access to a preseeded minion key.
|
||||
|
||||
4. Preseed the Minion with the keys
|
||||
|
||||
@ -50,6 +49,6 @@ You will want to place the minion keys before starting the salt-minion daemon:
|
||||
/etc/salt/pki/minion/minion.pem
|
||||
/etc/salt/pki/minion/minion.pub
|
||||
|
||||
Once in place, you should be able to start salt-minion and run
|
||||
``salt-call state.highstate`` or any other salt commands that require master
|
||||
Once in place, you should be able to start salt-minion and run
|
||||
``salt-call state.highstate`` or any other salt commands that require master
|
||||
authentication.
|
||||
|
@ -88,7 +88,7 @@ def auth(pem, **kwargs):
|
||||
|
||||
# The signature is a BIT STRING (Type 3)
|
||||
# Decode that as well
|
||||
der_sig_in = Crypto.util.asn1.DerObject()
|
||||
der_sig_in = Crypto.Util.asn1.DerObject()
|
||||
der_sig_in.decode(der_sig)
|
||||
|
||||
# Get the payload
|
||||
|
@ -860,7 +860,19 @@ class LocalClient(object):
|
||||
# Wait 0 == forever, use a minimum of 1s
|
||||
wait = max(1, time_left)
|
||||
raw = self.event.get_event(wait, jid)
|
||||
if raw is not None:
|
||||
if raw is None:
|
||||
if len(found.intersection(minions)) >= len(minions):
|
||||
# All minions have returned, break out of the loop
|
||||
log.debug('jid %s found all minions %s', jid, found)
|
||||
if self.opts['order_masters']:
|
||||
if syndic_wait < self.opts.get('syndic_wait', 1):
|
||||
syndic_wait += 1
|
||||
timeout_at = int(time.time()) + 1
|
||||
log.debug('jid %s syndic_wait %s will now timeout at %s',
|
||||
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time())
|
||||
continue
|
||||
break
|
||||
else:
|
||||
if 'minions' in raw.get('data', {}):
|
||||
minions.update(raw['data']['minions'])
|
||||
continue
|
||||
@ -879,17 +891,7 @@ class LocalClient(object):
|
||||
ret[raw['id']]['out'] = raw['out']
|
||||
log.debug('jid %s return from %s', jid, raw['id'])
|
||||
yield ret
|
||||
if len(found.intersection(minions)) >= len(minions):
|
||||
# All minions have returned, break out of the loop
|
||||
log.debug('jid %s found all minions %s', jid, found)
|
||||
if self.opts['order_masters']:
|
||||
if syndic_wait < self.opts.get('syndic_wait', 1):
|
||||
syndic_wait += 1
|
||||
timeout_at = int(time.time()) + 1
|
||||
log.debug('jid %s syndic_wait %s will now timeout at %s',
|
||||
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time())
|
||||
continue
|
||||
break
|
||||
|
||||
continue
|
||||
# Then event system timeout was reached and nothing was returned
|
||||
if len(found.intersection(minions)) >= len(minions):
|
||||
|
@ -286,9 +286,20 @@ def create(vm_):
|
||||
)
|
||||
|
||||
private_networking = config.get_cloud_config_value(
|
||||
'private_networking', vm_, __opts__, search_global=False, default=None
|
||||
'private_networking', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
kwargs['private_networking'] = 'true' if private_networking else 'false'
|
||||
if private_networking is not None:
|
||||
if not isinstance(private_networking, bool):
|
||||
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
|
||||
kwargs['private_networking'] = private_networking
|
||||
|
||||
backups_enabled = config.get_cloud_config_value(
|
||||
'backups_enabled', vm_, __opts__, search_global=False, default=None,
|
||||
)
|
||||
if backups_enabled is not None:
|
||||
if not isinstance(backups_enabled, bool):
|
||||
raise SaltCloudConfigError("'backups_enabled' should be a boolean value.")
|
||||
kwargs['backups_enabled'] = backups_enabled
|
||||
|
||||
salt.utils.cloud.fire_event(
|
||||
'event',
|
||||
|
@ -15,6 +15,7 @@ configuration at ``/etc/salt/cloud.providers`` or
|
||||
provider: azure
|
||||
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
|
||||
certificate_path: /etc/salt/azure.pem
|
||||
management_host: management.core.windows.net
|
||||
|
||||
Information on creating the pem file to use, and uploading the associated cer
|
||||
file can be found at:
|
||||
@ -76,7 +77,7 @@ def get_configured_provider():
|
||||
return config.is_provider_configured(
|
||||
__opts__,
|
||||
__active_provider_name__ or __virtualname__,
|
||||
('subscription_id', 'certificate_path',)
|
||||
('subscription_id', 'certificate_path')
|
||||
)
|
||||
|
||||
|
||||
@ -92,8 +93,15 @@ def get_conn():
|
||||
'subscription_id',
|
||||
get_configured_provider(), __opts__, search_global=False
|
||||
)
|
||||
management_host = config.get_cloud_config_value(
|
||||
'management_host',
|
||||
get_configured_provider(),
|
||||
__opts__,
|
||||
search_global=False,
|
||||
default='management.core.windows.net'
|
||||
)
|
||||
return azure.servicemanagement.ServiceManagementService(
|
||||
subscription_id, certificate_path
|
||||
subscription_id, certificate_path, management_host
|
||||
)
|
||||
|
||||
|
||||
|
@ -60,7 +60,7 @@ class RemoteMaster(ioflo.base.deeding.Deed):
|
||||
'''
|
||||
Set up required objects
|
||||
'''
|
||||
self.remote = salt.masterapi.RemoteFuncs(self.opts.value)
|
||||
self.remote = salt.daemons.masterapi.RemoteFuncs(self.opts.value)
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
@ -93,7 +93,7 @@ class LocalMaster(ioflo.base.deeding.Deed):
|
||||
'''
|
||||
Set up required objects
|
||||
'''
|
||||
self.remote = salt.masterapi.LocalFuncs(self.opts.value)
|
||||
self.remote = salt.daemons.masterapi.LocalFuncs(self.opts.value)
|
||||
|
||||
def action(self):
|
||||
'''
|
||||
|
@ -445,7 +445,7 @@ def _stale_refs_pygit2(repo):
|
||||
key = ' * [would prune] '
|
||||
ret = []
|
||||
for line in subprocess.Popen(
|
||||
'git remote prune --dry-run origin'.format(remote),
|
||||
'git remote prune --dry-run origin',
|
||||
shell=True,
|
||||
close_fds=True,
|
||||
cwd=repo.workdir,
|
||||
@ -657,7 +657,7 @@ def update():
|
||||
# Prune stale refs
|
||||
for ref in repo.get_refs():
|
||||
if ref not in refs_post:
|
||||
del(repo[ref])
|
||||
del repo[ref]
|
||||
except Exception as exc:
|
||||
log.warning(
|
||||
'Exception caught while fetching: {0}'.format(exc)
|
||||
|
@ -766,7 +766,7 @@ class RaetKey(Key):
|
||||
def _get_key_str(self, minion_id, status):
|
||||
'''
|
||||
Return the key string in the form of:
|
||||
|
||||
|
||||
pub: <pub>
|
||||
verify: <verify>
|
||||
'''
|
||||
|
@ -62,6 +62,13 @@ try:
|
||||
except ImportError:
|
||||
HAS_HALITE = False
|
||||
|
||||
try:
|
||||
import systemd.daemon
|
||||
HAS_PYTHON_SYSTEMD = True
|
||||
except ImportError:
|
||||
HAS_PYTHON_SYSTEMD = False
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -466,6 +473,13 @@ class ReqServer(object):
|
||||
|
||||
self.workers.bind(self.w_uri)
|
||||
|
||||
try:
|
||||
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
|
||||
systemd.daemon.notify('READY=1')
|
||||
except SystemError:
|
||||
# Daemon wasn't started by systemd
|
||||
pass
|
||||
|
||||
while True:
|
||||
try:
|
||||
zmq.device(zmq.QUEUE, self.clients, self.workers)
|
||||
|
@ -496,7 +496,7 @@ class MultiMinion(object):
|
||||
try:
|
||||
if not isinstance(minion, dict):
|
||||
minions[master] = {'minion': minion}
|
||||
t_minion = Minion(minion, 1, False)
|
||||
t_minion = Minion(minion, 5, False)
|
||||
minions[master]['minion'] = t_minion
|
||||
minions[master]['generator'] = t_minion.tune_in_no_block()
|
||||
auth_wait = self.opts['acceptance_wait_time']
|
||||
|
110
salt/modules/blockdev.py
Normal file
110
salt/modules/blockdev.py
Normal file
@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module for managing block devices
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on POSIX-like systems
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def tune(device, **kwargs):
|
||||
'''
|
||||
Set attributes for the specified device
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' blockdev.tune /dev/sda1 read-ahead=1024 read-write=True
|
||||
|
||||
Valid options are::
|
||||
|
||||
read-ahead:
|
||||
|
||||
See the ``blockdev(8)`` manpage for a more complete description of these
|
||||
options.
|
||||
'''
|
||||
|
||||
kwarg_map = {'read-ahead': 'setra',
|
||||
'filesystem-read-ahead': 'setfra',
|
||||
'read-only': 'setro',
|
||||
'read-write': 'setrw'}
|
||||
opts = ''
|
||||
args = []
|
||||
for key in kwargs:
|
||||
if key in kwarg_map:
|
||||
switch = kwarg_map[key]
|
||||
if key != 'read-write':
|
||||
args.append(switch.replace('set', 'get'))
|
||||
if kwargs[key] == 'True':
|
||||
opts += '--{0} '.format(key)
|
||||
else:
|
||||
opts += '--{0} {1} '.format(switch, kwargs[key])
|
||||
cmd = 'blockdev {0}{1}'.format(opts, device)
|
||||
out = __salt__['cmd.run'](cmd).splitlines()
|
||||
return dump(device, args)
|
||||
|
||||
|
||||
def wipe(device):
|
||||
'''
|
||||
Remove the filesystem information
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' blockdev.wipe /dev/sda1
|
||||
'''
|
||||
|
||||
cmd = 'wipefs {0}'.format(device)
|
||||
try:
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
except subprocess.CalledProcessError as err:
|
||||
return False
|
||||
if out['retcode'] == 0:
|
||||
return True
|
||||
|
||||
|
||||
def dump(device, args=None):
|
||||
'''
|
||||
Return all contents of dumpe2fs for a specified device
|
||||
|
||||
CLI Example:
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' extfs.dump /dev/sda1
|
||||
'''
|
||||
cmd = 'blockdev --getro --getsz --getss --getpbsz --getiomin --getioopt --getalignoff --getmaxsect --getsize --getsize64 --getra --getfra {0}'.format(device)
|
||||
ret = {}
|
||||
opts = [c[2:] for c in cmd.split() if c.startswith('--')]
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
if out['retcode'] == 0:
|
||||
lines = [line for line in out['stdout'].splitlines() if line]
|
||||
count = 0
|
||||
for line in lines:
|
||||
ret[opts[count]] = line
|
||||
count = count+1
|
||||
if args:
|
||||
temp_ret = {}
|
||||
for arg in args:
|
||||
temp_ret[arg] = ret[arg]
|
||||
return temp_ret
|
||||
else:
|
||||
return ret
|
||||
else:
|
||||
return False
|
@ -19,7 +19,7 @@ def __virtual__():
|
||||
'''
|
||||
if not salt.utils.which('dig'):
|
||||
return False
|
||||
return 'dig'
|
||||
return True
|
||||
|
||||
|
||||
def check_ip(x):
|
||||
|
@ -288,25 +288,25 @@ def list_():
|
||||
frozen = []
|
||||
running = []
|
||||
|
||||
for c in ctnrs:
|
||||
lines = __salt__['cmd.run']('lxc-info -n ' + c).splitlines()
|
||||
for container in ctnrs:
|
||||
c_infos = __salt__['cmd.run']('lxc-info -n {0}'.format(container))
|
||||
|
||||
for line in lines:
|
||||
stat = line.split(':')
|
||||
for info in c_infos:
|
||||
stat = info.split(':')
|
||||
if stat[0] == 'state':
|
||||
s = stat[1].strip()
|
||||
state = stat[1].strip()
|
||||
break
|
||||
|
||||
if not len(s):
|
||||
if not len(state):
|
||||
continue
|
||||
if s == 'STOPPED':
|
||||
stopped.append(c)
|
||||
if state == 'STOPPED':
|
||||
stopped.append(container)
|
||||
continue
|
||||
if s == 'FROZEN':
|
||||
frozen.append(c)
|
||||
if state == 'FROZEN':
|
||||
frozen.append(container)
|
||||
continue
|
||||
if s == 'RUNNING':
|
||||
running.append(c)
|
||||
if state == 'RUNNING':
|
||||
running.append(container)
|
||||
continue
|
||||
|
||||
return {'running': running,
|
||||
|
@ -63,6 +63,7 @@ verbose : True
|
||||
# Import python Libs
|
||||
import json
|
||||
import os
|
||||
import urllib2
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
@ -222,6 +223,28 @@ def _format_url(handler, host=None, core_name=None, extra=None):
|
||||
host, port, baseurl, core_name, handler, "&".join(extra))
|
||||
|
||||
|
||||
def _auth(url):
|
||||
'''
|
||||
Install an auth handler for urllib2
|
||||
'''
|
||||
user = __salt__['config.get']('solr.user', False)
|
||||
password = __salt__['config.get']('solr.passwd', False)
|
||||
realm = __salt__['config.get']('solr.auth_realm', 'Solr')
|
||||
|
||||
if user and password:
|
||||
basic = urllib2.HTTPBasicAuthHandler()
|
||||
basic.add_password(
|
||||
realm=realm, uri=url, user=user, passwd=password
|
||||
)
|
||||
digest = urllib2.HTTPDigestAuthHandler()
|
||||
digest.add_password(
|
||||
realm=realm, uri=url, user=user, passwd=password
|
||||
)
|
||||
urllib2.install_opener(
|
||||
urllib2.build_opener(basic, digest)
|
||||
)
|
||||
|
||||
|
||||
def _http_request(url, request_timeout=None):
|
||||
'''
|
||||
PRIVATE METHOD
|
||||
@ -237,6 +260,7 @@ def _http_request(url, request_timeout=None):
|
||||
|
||||
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
|
||||
'''
|
||||
_auth(url)
|
||||
try:
|
||||
|
||||
request_timeout = __salt__['config.option']('solr.request_timeout')
|
||||
|
@ -39,20 +39,22 @@ def _available_commands():
|
||||
if not zfs_path:
|
||||
return False
|
||||
|
||||
_return = {}
|
||||
ret = {}
|
||||
# Note that we append '|| :' as a unix hack to force return code to be 0.
|
||||
res = salt_cmd.run_all('{0} help || :'.format(zfs_path))
|
||||
res = salt_cmd.run_stderr(
|
||||
'{0} help || :'.format(zfs_path), output_loglevel='debug'
|
||||
)
|
||||
|
||||
# This bit is dependent on specific output from `zfs help` - any major changes
|
||||
# in how this works upstream will require a change.
|
||||
for line in res['stderr'].splitlines():
|
||||
for line in res.splitlines():
|
||||
if re.match(' [a-zA-Z]', line):
|
||||
cmds = line.split(' ')[0].split('|')
|
||||
doc = ' '.join(line.split(' ')[1:])
|
||||
for cmd in [cmd.strip() for cmd in cmds]:
|
||||
if cmd not in _return:
|
||||
_return[cmd] = doc
|
||||
return _return
|
||||
if cmd not in ret:
|
||||
ret[cmd] = doc
|
||||
return ret
|
||||
|
||||
|
||||
def _exit_status(retcode):
|
||||
|
@ -174,9 +174,9 @@ def print_job(job_id):
|
||||
|
||||
def _format_job_instance(job):
|
||||
return {'Function': job['fun'],
|
||||
'Arguments': list(job['arg']),
|
||||
'Arguments': list(job.get('arg', [])),
|
||||
'Target': job['tgt'],
|
||||
'Target-type': job['tgt_type'],
|
||||
'Target-type': job.get('tgt_type', []),
|
||||
'User': job.get('user', 'root')}
|
||||
|
||||
|
||||
|
@ -1351,7 +1351,9 @@ class State(object):
|
||||
# that's not found in cdata, we look for what we're being passed in
|
||||
# the original data, namely, the special dunder __env__. If that's
|
||||
# not found we default to 'base'
|
||||
if cdata['kwargs'].get('env', None) is not None:
|
||||
if 'saltenv' in low:
|
||||
inject_globals['__env__'] = low['saltenv']
|
||||
elif cdata['kwargs'].get('env', None) is not None:
|
||||
# User is using a deprecated env setting which was parsed by
|
||||
# format_call
|
||||
inject_globals['__env__'] = cdata['kwargs']['env']
|
||||
@ -1359,8 +1361,6 @@ class State(object):
|
||||
# The user is passing an alternative environment using __env__
|
||||
# which is also not the appropriate choice, still, handle it
|
||||
inject_globals['__env__'] = low['__env__']
|
||||
elif 'saltenv' in low:
|
||||
inject_globals['__env__'] = low['saltenv']
|
||||
else:
|
||||
# Let's use the default environment
|
||||
inject_globals['__env__'] = 'base'
|
||||
|
78
salt/states/blockdev.py
Normal file
78
salt/states/blockdev.py
Normal file
@ -0,0 +1,78 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Management of Block Devices
|
||||
===================================
|
||||
|
||||
A state module to manage blockdevices
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
|
||||
/dev/sda:
|
||||
blockdev.tuned:
|
||||
- read-only: True
|
||||
|
||||
master-data:
|
||||
blockdev:
|
||||
- tuned:
|
||||
- name : /dev/vg/master-data
|
||||
- read-only: True
|
||||
- read-ahead: 1024
|
||||
|
||||
|
||||
'''
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on POSIX-like systems
|
||||
'''
|
||||
if salt.utils.is_windows():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def tuned(name, **kwargs):
|
||||
'''
|
||||
Manage options of block device
|
||||
|
||||
name
|
||||
The name of the block device
|
||||
|
||||
opts:
|
||||
- read-ahead
|
||||
Read-ahead buffer size
|
||||
|
||||
- filesystem-read-ahead
|
||||
Filesystem Read-ahead buffer size
|
||||
|
||||
- read-only
|
||||
Set Read-Only
|
||||
|
||||
- read-write
|
||||
Set Read-Write
|
||||
'''
|
||||
|
||||
ret = {'changes': {},
|
||||
'comment': '',
|
||||
'name': name,
|
||||
'result': True}
|
||||
|
||||
if not __salt__['file.is_blkdev']:
|
||||
ret['comment'] = 'Changes to {0} cannot be applied. Not a block device '.format(name)
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Changes to {0} will be applied '.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
else:
|
||||
changes = __salt__['blockdev.tune'](name, **kwargs)
|
||||
if changes:
|
||||
ret['comment'] = 'Block device {0} successfully modified '.format(name)
|
||||
ret['changes'] = changes
|
||||
else:
|
||||
ret['comment'] = 'Failed to modify block device {0}'.format(name)
|
||||
ret['result'] = False
|
||||
return ret
|
@ -43,6 +43,7 @@ class Signer(object):
|
||||
'''
|
||||
return self.key.sign(msg).signature
|
||||
|
||||
|
||||
class Verifier(object):
|
||||
'''
|
||||
Used to verify messages with nacl digital signature
|
||||
@ -69,6 +70,7 @@ class Verifier(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class Publican(object):
|
||||
'''
|
||||
Container to manage remote nacl public key
|
||||
@ -140,6 +142,3 @@ class Privateer(object):
|
||||
'''
|
||||
box = nacl.public.Box(self.key, pub.key)
|
||||
return box.decrypt(cipher, nonce)
|
||||
|
||||
|
||||
|
||||
|
@ -6,7 +6,6 @@ packeting module provides classes for Raet packets
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
import socket
|
||||
from collections import Mapping
|
||||
try:
|
||||
import simplejson as json
|
||||
@ -19,6 +18,7 @@ from ioflo.base.aiding import packByte, unpackByte
|
||||
|
||||
from . import raeting
|
||||
|
||||
|
||||
class Part(object):
|
||||
'''
|
||||
Base class for parts of a RAET packet
|
||||
@ -29,8 +29,8 @@ class Part(object):
|
||||
'''
|
||||
Setup Part instance
|
||||
'''
|
||||
self.packet = packet # Packet this Part belongs too
|
||||
self.kind = kind # part kind
|
||||
self.packet = packet # Packet this Part belongs too
|
||||
self.kind = kind # part kind
|
||||
self.packed = ''
|
||||
|
||||
def __len__(self):
|
||||
@ -41,10 +41,11 @@ class Part(object):
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
'''
|
||||
Property is the length of this Part
|
||||
'''
|
||||
return (self.__len__())
|
||||
'''
|
||||
Property is the length of this Part
|
||||
'''
|
||||
return self.__len__()
|
||||
|
||||
|
||||
class Head(Part):
|
||||
'''
|
||||
@ -57,6 +58,7 @@ class Head(Part):
|
||||
'''
|
||||
super(Head, self).__init__(**kwa)
|
||||
|
||||
|
||||
class TxHead(Head):
|
||||
'''
|
||||
RAET protocl transmit packet header class
|
||||
@ -66,8 +68,8 @@ class TxHead(Head):
|
||||
Composes and returns .packed, which is the packed form of this part
|
||||
'''
|
||||
self.packed = ''
|
||||
self.kind= self.packet.data['hk']
|
||||
data = self.packet.data # for speed
|
||||
self.kind = self.packet.data['hk']
|
||||
data = self.packet.data # for speed
|
||||
|
||||
data['pk'] = self.packet.kind
|
||||
data['nk'] = self.packet.neck.kind
|
||||
@ -81,10 +83,10 @@ class TxHead(Head):
|
||||
|
||||
# kit always includes header kind and length fields
|
||||
kit = odict([('hk', self.kind), ('hl', 0)])
|
||||
for k, v in raeting.PACKET_DEFAULTS.items():# include if not equal to default
|
||||
if ( (k in raeting.HEAD_FIELDS) and
|
||||
(k not in raeting.PACKET_FLAGS ) and
|
||||
(data[k] != v)):
|
||||
for k, v in raeting.PACKET_DEFAULTS.items(): # include if not equal to default
|
||||
if ((k in raeting.HEAD_FIELDS) and
|
||||
(k not in raeting.PACKET_FLAGS) and
|
||||
(data[k] != v)):
|
||||
kit[k] = data[k]
|
||||
|
||||
if self.kind == raeting.headKinds.json:
|
||||
@ -109,6 +111,7 @@ class TxHead(Head):
|
||||
values.append(1 if self.packet.data.get(field, 0) else 0)
|
||||
return packByte(format='11111111', fields=values)
|
||||
|
||||
|
||||
class RxHead(Head):
|
||||
'''
|
||||
RAET protocl receive packet header class
|
||||
@ -121,8 +124,8 @@ class RxHead(Head):
|
||||
Returns False and updates .packet.error if failure occurs
|
||||
'''
|
||||
self.packed = ''
|
||||
data = self.packet.data #for speed
|
||||
packed = self.packet.packed #for speed
|
||||
data = self.packet.data # for speed
|
||||
packed = self.packet.packed # for speed
|
||||
if packed.startswith('{"hk":1,') and raeting.JSON_END in packed: # json header
|
||||
self.kind = raeting.headKinds.json
|
||||
front, sep, back = packed.partition(raeting.JSON_END)
|
||||
@ -171,6 +174,7 @@ class Neck(Part):
|
||||
'''
|
||||
super(Neck, self).__init__(**kwa)
|
||||
|
||||
|
||||
class TxNeck(Neck):
|
||||
'''
|
||||
RAET protocol transmit packet neck class
|
||||
@ -181,7 +185,7 @@ class TxNeck(Neck):
|
||||
Composes and returns .packed, which is the packed form of this part
|
||||
'''
|
||||
self.packed = ''
|
||||
self.kind= self.packet.data['nk']
|
||||
self.kind = self.packet.data['nk']
|
||||
|
||||
if self.kind not in raeting.NECK_KIND_NAMES:
|
||||
self.kind = raeting.neckKinds.unknown
|
||||
@ -196,6 +200,7 @@ class TxNeck(Neck):
|
||||
|
||||
return self.packed
|
||||
|
||||
|
||||
class RxNeck(Neck):
|
||||
'''
|
||||
RAET protocol receive packet neck class
|
||||
@ -235,6 +240,7 @@ class Body(Part):
|
||||
super(Body, self).__init__(**kwa)
|
||||
self.data = data or odict()
|
||||
|
||||
|
||||
class TxBody(Body):
|
||||
'''
|
||||
RAET protocol tx packet body class
|
||||
@ -244,11 +250,12 @@ class TxBody(Body):
|
||||
Composes and returns .packed, which is the packed form of this part
|
||||
'''
|
||||
self.packed = ''
|
||||
self.kind= self.packet.data['bk']
|
||||
self.kind = self.packet.data['bk']
|
||||
if self.kind == raeting.bodyKinds.json:
|
||||
self.packed = json.dumps(self.data, separators=(',', ':'))
|
||||
return self.packed
|
||||
|
||||
|
||||
class RxBody(Body):
|
||||
'''
|
||||
RAET protocol rx packet body class
|
||||
@ -291,6 +298,7 @@ class Tail(Part):
|
||||
''' Setup Tail instal'''
|
||||
super(Tail, self).__init__(**kwa)
|
||||
|
||||
|
||||
class TxTail(Tail):
|
||||
'''
|
||||
RAET protocol tx packet tail class
|
||||
@ -301,7 +309,7 @@ class TxTail(Tail):
|
||||
Composes and returns .packed, which is the packed form of this part
|
||||
'''
|
||||
self.packed = ''
|
||||
self.kind= self.packet.data['tk']
|
||||
self.kind = self.packet.data['tk']
|
||||
|
||||
if self.kind == raeting.tailKinds.nacl:
|
||||
self.packed = "".rjust(raeting.tailSizes.nacl, '\x00')
|
||||
@ -310,6 +318,7 @@ class TxTail(Tail):
|
||||
pass
|
||||
return self.packed
|
||||
|
||||
|
||||
class RxTail(Tail):
|
||||
'''
|
||||
RAET protocol rx packet tail class
|
||||
@ -345,7 +354,7 @@ class Packet(object):
|
||||
def __init__(self, kind=None):
|
||||
''' Setup Packet instance. Meta data for a packet. '''
|
||||
self.kind = kind or raeting.PACKET_DEFAULTS['pk']
|
||||
self.packed = '' #packed string
|
||||
self.packed = '' # packed string
|
||||
self.error = ''
|
||||
self.data = odict(raeting.PACKET_DEFAULTS)
|
||||
|
||||
@ -363,7 +372,8 @@ class Packet(object):
|
||||
self.data = odict(raeting.PACKET_DEFAULTS)
|
||||
if data:
|
||||
self.data.update(data)
|
||||
return self # so can method chain
|
||||
return self # so can method chain
|
||||
|
||||
|
||||
class TxPacket(Packet):
|
||||
'''
|
||||
@ -468,7 +478,7 @@ class RxPacket(Packet):
|
||||
if not self.unpack():
|
||||
return False
|
||||
|
||||
if self.data['vn'] not in raeting.VERSIONS.values():
|
||||
if self.data['vn'] not in raeting.VERSIONS.values():
|
||||
self.error = ("Received incompatible version '{0}'"
|
||||
"version '{1}'".format(self.data['vn']))
|
||||
return False
|
||||
@ -506,4 +516,4 @@ class RxPacket(Packet):
|
||||
'''
|
||||
Uses tail to validate body such as decrypt
|
||||
'''
|
||||
return True
|
||||
return True
|
||||
|
@ -137,7 +137,7 @@ TailKind = namedtuple('TailKind', TAIL_KINDS.keys())
|
||||
tailKinds = TailKind(**TAIL_KINDS)
|
||||
|
||||
# bytes
|
||||
TAIL_SIZES = odict([('nada', 0), ('nacl', 8), ('crc16', 2), ('crc64', 8),
|
||||
TAIL_SIZES = odict([('nada', 0), ('nacl', 8), ('crc16', 2), ('crc64', 8),
|
||||
('unknown', 0)])
|
||||
TailSize = namedtuple('TailSize', TAIL_SIZES.keys())
|
||||
tailSizes = TailSize(**TAIL_SIZES)
|
||||
@ -189,14 +189,14 @@ PACKET_DEFAULTS = odict([
|
||||
('fg', '00'),
|
||||
])
|
||||
|
||||
PACKET_FIELDS = [ 'sh', 'sp', 'dh', 'dp',
|
||||
'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
||||
PACKET_FIELDS = ['sh', 'sp', 'dh', 'dp',
|
||||
'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
||||
|
||||
HEAD_FIELDS = [ 'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||
'nk', 'nl', 'bk', 'bl','tk', 'tl', 'fg']
|
||||
HEAD_FIELDS = ['hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
||||
|
||||
PACKET_FLAGS = ['af', 'pf', 'sf', 'bf', 'cf']
|
||||
PACKET_FLAG_FIELDS = ['', '', 'af', 'pf', '', 'sf', 'bf', 'cf']
|
||||
@ -209,12 +209,12 @@ class RaetError(Exception):
|
||||
msg = "Invalid device id '{0}'".format(did)
|
||||
raise raeting.RaetError(msg)
|
||||
"""
|
||||
def __init__(self, message = None):
|
||||
self.message = message #description of error
|
||||
self.args = (message)
|
||||
def __init__(self, message=None):
|
||||
self.message = message # description of error
|
||||
super(RaetError, self).__init__(message)
|
||||
|
||||
def __str__(self):
|
||||
return ("{0}: {1}.\n".format(self.__class__.__name__, self.message))
|
||||
return "{0}: {1}.\n".format(self.__class__.__name__, self.message)
|
||||
|
||||
|
||||
def defaultData(data=None):
|
||||
|
@ -28,11 +28,11 @@ class Stack(object):
|
||||
'''
|
||||
RAET protocol stack object
|
||||
'''
|
||||
def __init__( self,
|
||||
version=raeting.VERSION,
|
||||
device=None,
|
||||
did=None,
|
||||
ha=("", raeting.RAET_PORT)):
|
||||
def __init__(self,
|
||||
version=raeting.VERSION,
|
||||
device=None,
|
||||
did=None,
|
||||
ha=("", raeting.RAET_PORT)):
|
||||
'''
|
||||
Setup Stack instance
|
||||
'''
|
||||
@ -41,12 +41,11 @@ class Stack(object):
|
||||
# local device for this stack
|
||||
self.device = device or LocalDevice(stack=self, did=did, ha=ha)
|
||||
self.transactions = odict() #transactions
|
||||
|
||||
self.rxdsUdp = deque()
|
||||
self.txdsUdp = deque()
|
||||
self.serverUdp = aiding.SocketUdpNb(ha=self.device.ha)
|
||||
self.serverUdp.reopen() # open socket
|
||||
self.device.ha = self.serverUdp.ha # update device host address after open
|
||||
self.device.ha = self.serverUdp.ha # update device host address after open
|
||||
|
||||
def addRemoteDevice(self, device, did=None):
|
||||
'''
|
||||
@ -117,14 +116,13 @@ class Stack(object):
|
||||
|
||||
sh, sp = ra
|
||||
dh, dp = da
|
||||
packet.data.update(sh=sh, sp=sp, dh=dh, dp=dp )
|
||||
packet.data.update(sh=sh, sp=sp, dh=dh, dp=dp)
|
||||
|
||||
if not packet.parseBack():
|
||||
return None
|
||||
return None
|
||||
|
||||
return packet
|
||||
|
||||
|
||||
def txUdp(self, packed, ddid):
|
||||
'''
|
||||
Queue duple of (packed, da) on stack transmit queue
|
||||
@ -143,12 +141,12 @@ class Device(object):
|
||||
'''
|
||||
Did = 2 # class attribute
|
||||
|
||||
def __init__( self, stack=None, did=None, sid=0, tid=0,
|
||||
host="", port=raeting.RAET_PORT, ha=None, ):
|
||||
def __init__(self, stack=None, did=None, sid=0, tid=0,
|
||||
host="", port=raeting.RAET_PORT, ha=None, ):
|
||||
'''
|
||||
Setup Device instance
|
||||
'''
|
||||
self.stack = stack # Stack object that manages this device
|
||||
self.stack = stack # Stack object that manages this device
|
||||
if did is None:
|
||||
if self.stack:
|
||||
while Device.Did in self.stack.devices:
|
||||
@ -164,7 +162,7 @@ class Device(object):
|
||||
self.sid = sid # current session ID
|
||||
self.tid = tid # current transaction ID
|
||||
|
||||
if ha: #takes precendence
|
||||
if ha: # takes precendence
|
||||
host, port = ha
|
||||
self.host = socket.gethostbyname(host)
|
||||
self.port = port
|
||||
@ -185,8 +183,8 @@ class Device(object):
|
||||
Generates next session id number.
|
||||
'''
|
||||
self.sid += 1
|
||||
if (self.sid > 0xffffffffL):
|
||||
self.sid = 1 # rollover to 1
|
||||
if self.sid > 0xffffffffL:
|
||||
self.sid = 1 # rollover to 1
|
||||
return self.sid
|
||||
|
||||
def nextTid(self):
|
||||
@ -194,10 +192,11 @@ class Device(object):
|
||||
Generates next session id number.
|
||||
'''
|
||||
self.tid += 1
|
||||
if (self.tid > 0xffffffffL):
|
||||
self.tid = 1 # rollover to 1
|
||||
if self.tid > 0xffffffffL:
|
||||
self.tid = 1 # rollover to 1
|
||||
return self.tid
|
||||
|
||||
|
||||
class LocalDevice(Device):
|
||||
'''
|
||||
RAET protocol endpoint local device object
|
||||
@ -214,6 +213,7 @@ class LocalDevice(Device):
|
||||
self.signer = nacling.Signer(signkey)
|
||||
self.priver = nacling.Privateer(prikey) # Long term key
|
||||
|
||||
|
||||
class RemoteDevice(Device):
|
||||
'''
|
||||
RAET protocol endpoint remote device object
|
||||
@ -234,6 +234,7 @@ class RemoteDevice(Device):
|
||||
self.publee = nacling.Publican() # short term key
|
||||
self.privee = nacling.Privateer() # short term key
|
||||
|
||||
|
||||
class Transaction(object):
|
||||
'''
|
||||
RAET protocol transaction class
|
||||
@ -248,7 +249,7 @@ class Transaction(object):
|
||||
self.kind = kind or raeting.PACKET_DEFAULTS['sk']
|
||||
|
||||
# local device is the .stack.device
|
||||
self.rdid = rdid # remote device did
|
||||
self.rdid = rdid # remote device did
|
||||
|
||||
self.crdr = crdr
|
||||
self.bcst = bcst
|
||||
@ -258,8 +259,8 @@ class Transaction(object):
|
||||
|
||||
self.rxData = rxData or odict()
|
||||
self.txData = txData or odict()
|
||||
self.rxPacket = None # last rx packet
|
||||
self.txPacket = None # last tx packet
|
||||
self.rxPacket = None # last rx packet
|
||||
self.txPacket = None # last tx packet
|
||||
|
||||
def transmit(self, packet):
|
||||
'''
|
||||
@ -276,13 +277,14 @@ class Initiator(Transaction):
|
||||
'''
|
||||
Setup Transaction instance
|
||||
'''
|
||||
crdr = False # force crdr to False
|
||||
crdr = False # force crdr to False
|
||||
super(Initiator, self).__init__(crdr=crdr, **kwa)
|
||||
if self.sid is None: # use current session id of local device
|
||||
if self.sid is None: # use current session id of local device
|
||||
self.sid = self.stack.device.sid
|
||||
if self.tid is None: # use next tid
|
||||
if self.tid is None: # use next tid
|
||||
self.tid = self.stack.device.nextTid()
|
||||
|
||||
|
||||
class Corresponder(Transaction):
|
||||
'''
|
||||
RAET protocol corresponder transaction class
|
||||
@ -291,9 +293,10 @@ class Corresponder(Transaction):
|
||||
'''
|
||||
Setup Transaction instance
|
||||
'''
|
||||
crdr = True # force crdr to True
|
||||
crdr = True # force crdr to True
|
||||
super(Corresponder, self).__init__(crdr=crdr, **kwa)
|
||||
|
||||
|
||||
class Joiner(Initiator):
|
||||
'''
|
||||
RAET protocol Joiner transaction class Dual of Acceptor
|
||||
|
@ -2,4 +2,4 @@
|
||||
'''
|
||||
package
|
||||
|
||||
'''
|
||||
'''
|
||||
|
@ -7,14 +7,14 @@ Tests to try out packeting. Potentially ephemeral
|
||||
from salt.transport.road.raet import packeting
|
||||
from ioflo.base.odicting import odict
|
||||
|
||||
|
||||
def test():
|
||||
data = odict(hk=1, bk=1, bf=1, cf=1)
|
||||
body=odict(msg='Hello Raet World', extra='what is this')
|
||||
body = odict(msg='Hello Raet World', extra='what is this')
|
||||
packet1 = packeting.Packet(data=data, body=body)
|
||||
print packet1.body.data
|
||||
print packet1.pack()
|
||||
|
||||
|
||||
packet2 = packeting.Packet()
|
||||
packet2.parse(packet1.packed)
|
||||
print packet2.body.data
|
||||
@ -27,6 +27,5 @@ def test():
|
||||
print packet1.pack()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test()
|
||||
|
@ -14,20 +14,17 @@ def test():
|
||||
privateer = nacling.Privateer()
|
||||
masterPriKeyHex = privateer.keyhex
|
||||
|
||||
|
||||
signer = nacling.Signer()
|
||||
minionSignKeyHex = signer.keyhex
|
||||
privateer = nacling.Privateer()
|
||||
masterPriKeyHex = privateer.keyhex
|
||||
|
||||
|
||||
# initially
|
||||
# master on port 7530 with did of 1
|
||||
# minion on port 7531 with did of 0
|
||||
# eventually
|
||||
# minion did of 2
|
||||
|
||||
|
||||
#master stack
|
||||
device = stacking.LocalDevice( did=1,
|
||||
signkey=masterSignKeyHex,
|
||||
@ -48,6 +45,7 @@ def test():
|
||||
data = odict(hk=1, bk=1)
|
||||
joiner = stacking.Joiner(stack=stack2, sid=0, txData=data)
|
||||
joiner.join()
|
||||
|
||||
stack2.serviceUdp()
|
||||
stack1.serviceUdp()
|
||||
|
||||
@ -89,6 +87,5 @@ def test():
|
||||
joiner.pend(packet.data)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test()
|
||||
|
@ -11,8 +11,8 @@ from salt.transport import table
|
||||
|
||||
def test_table():
|
||||
|
||||
bob_pub = table.Public()
|
||||
print json.dumps(bob_pub.keydata, indent=2)
|
||||
bob_pub = table.Public()
|
||||
print json.dumps(bob_pub.keydata, indent=2)
|
||||
|
||||
#print bob_pub.backend
|
||||
#print bob_pub.sec_backend
|
||||
@ -30,8 +30,7 @@ def test_table():
|
||||
|
||||
signature = bob_pub.signature("What have we here.")
|
||||
print signature
|
||||
print signature == signed.signature
|
||||
|
||||
print signature == signed.signature
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -710,15 +710,21 @@ def path_join(*parts):
|
||||
))
|
||||
|
||||
|
||||
def pem_finger(path, sum_type='md5'):
|
||||
def pem_finger(path=None, key=None, sum_type='md5'):
|
||||
'''
|
||||
Pass in the location of a pem file and the type of cryptographic hash to
|
||||
use. The default is md5.
|
||||
Pass in either a raw pem string, or the path on disk to the location of a
|
||||
pem file, and the type of cryptographic hash to use. The default is md5.
|
||||
The fingerprint of the pem will be returned.
|
||||
|
||||
If neither a key nor a path are passed in, a blank string will be returned.
|
||||
'''
|
||||
if not os.path.isfile(path):
|
||||
return ''
|
||||
with fopen(path, 'rb') as fp_:
|
||||
key = ''.join(fp_.readlines()[1:-1])
|
||||
if not key:
|
||||
if not os.path.isfile(path):
|
||||
return ''
|
||||
|
||||
with fopen(path, 'rb') as fp_:
|
||||
key = ''.join(fp_.readlines()[1:-1])
|
||||
|
||||
pre = getattr(hashlib, sum_type)(key).hexdigest()
|
||||
finger = ''
|
||||
for ind in range(len(pre)):
|
||||
|
@ -15,6 +15,7 @@ import subprocess
|
||||
import multiprocessing
|
||||
import logging
|
||||
import pipes
|
||||
import json
|
||||
import re
|
||||
|
||||
# Let's import pwd and catch the ImportError. We'll raise it if this is not
|
||||
@ -1484,7 +1485,135 @@ def list_nodes_select(nodes, selection, call=None):
|
||||
return ret
|
||||
|
||||
|
||||
def salt_cloud_force_ascii(exc):
|
||||
def init_cachedir(base=None):
|
||||
'''
|
||||
Initialize the cachedir needed for Salt Cloud to keep track of minions
|
||||
'''
|
||||
if base is None:
|
||||
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||
needed_dirs = (base,
|
||||
os.path.join(base, 'requested'),
|
||||
os.path.join(base, 'active'))
|
||||
for dir_ in needed_dirs:
|
||||
if not os.path.exists(dir_):
|
||||
os.makedirs(dir_)
|
||||
os.chmod(base, 0755)
|
||||
|
||||
|
||||
def request_minion_cachedir(
|
||||
minion_id,
|
||||
fingerprint='',
|
||||
pubkey=None,
|
||||
provider=None,
|
||||
base=None,
|
||||
):
|
||||
'''
|
||||
Creates an entry in the requested/ cachedir. This means that Salt Cloud has
|
||||
made a request to a cloud provider to create an instance, but it has not
|
||||
yet verified that the instance properly exists.
|
||||
|
||||
If the fingerprint is unknown, a raw pubkey can be passed in, and a
|
||||
fingerprint will be calculated. If both are empty, then the fingerprint
|
||||
will be set to None.
|
||||
'''
|
||||
if base is None:
|
||||
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||
|
||||
if not fingerprint:
|
||||
if pubkey is not None:
|
||||
fingerprint = salt.utils.pem_finger(key=pubkey)
|
||||
|
||||
init_cachedir(base)
|
||||
|
||||
data = {
|
||||
'minion_id': minion_id,
|
||||
'fingerprint': fingerprint,
|
||||
'provider': provider,
|
||||
}
|
||||
|
||||
fname = '{0}.json'.format(minion_id)
|
||||
path = os.path.join(base, 'requested', fname)
|
||||
with salt.utils.fopen(path, 'w') as fh_:
|
||||
json.dump(data, fh_)
|
||||
|
||||
|
||||
def change_minion_cachedir(
|
||||
minion_id,
|
||||
cachedir,
|
||||
data=None,
|
||||
base=None,
|
||||
):
|
||||
'''
|
||||
Changes the info inside a minion's cachedir entry. The type of cachedir
|
||||
must be specified (i.e., 'requested' or 'active'). A dict is also passed in
|
||||
which contains the data to be changed.
|
||||
|
||||
Example:
|
||||
|
||||
change_minion_cachedir(
|
||||
'myminion',
|
||||
'requested',
|
||||
{'fingerprint': '26:5c:8c:de:be:fe:89:c0:02:ed:27:65:0e:bb:be:60'},
|
||||
)
|
||||
'''
|
||||
if not isinstance(data, dict):
|
||||
return False
|
||||
|
||||
if base is None:
|
||||
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||
|
||||
fname = '{0}.json'.format(minion_id)
|
||||
path = os.path.join(base, cachedir, fname)
|
||||
|
||||
with salt.utils.fopen(path, 'r') as fh_:
|
||||
cache_data = json.load(fh_)
|
||||
|
||||
cache_data.update(data)
|
||||
|
||||
with salt.utils.fopen(path, 'w') as fh_:
|
||||
json.dump(cache_data, fh_)
|
||||
|
||||
|
||||
def activate_minion_cachedir(minion_id, base=None):
|
||||
'''
|
||||
Moves a minion from the requested/ cachedir into the active/ cachedir. This
|
||||
means that Salt Cloud has verified that a requested instance properly
|
||||
exists, and should be expected to exist from here on out.
|
||||
'''
|
||||
if base is None:
|
||||
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||
|
||||
fname = '{0}.json'.format(minion_id)
|
||||
src = os.path.join(base, 'requested', fname)
|
||||
dst = os.path.join(base, 'active')
|
||||
shutil.move(src, dst)
|
||||
|
||||
|
||||
def delete_minion_cachedir(minion_id, base=None):
|
||||
'''
|
||||
Deletes a minion's entry from the cloud cachedir. It will search through
|
||||
all cachedirs to find the minion's cache file.
|
||||
'''
|
||||
if base is None:
|
||||
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||
|
||||
fname = '{0}.json'.format(minion_id)
|
||||
for cachedir in ('requested', 'active'):
|
||||
path = os.path.join(base, cachedir, fname)
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
|
||||
|
||||
def _salt_cloud_force_ascii(exc):
|
||||
'''
|
||||
Helper method to try its best to convert any Unicode text into ASCII
|
||||
without stack tracing since salt internally does not handle Unicode strings
|
||||
|
||||
This method is not supposed to be used directly. Once
|
||||
`py:module: salt.utils.cloud` is imported this method register's with
|
||||
python's codecs module for proper automatic conversion in case of encoding
|
||||
errors.
|
||||
'''
|
||||
if not isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
|
||||
raise TypeError('Can\'t handle {0}'.format(exc))
|
||||
|
||||
@ -1499,4 +1628,4 @@ def salt_cloud_force_ascii(exc):
|
||||
# There's nothing else we can do, raise the exception
|
||||
raise exc
|
||||
|
||||
codecs.register_error('salt-cloud-force-ascii', salt_cloud_force_ascii)
|
||||
codecs.register_error('salt-cloud-force-ascii', _salt_cloud_force_ascii)
|
||||
|
@ -171,7 +171,7 @@ def _interfaces_ip(out):
|
||||
for line in group.splitlines():
|
||||
if not ' ' in line:
|
||||
continue
|
||||
match = re.match(r'^\d*:\s+([\w.]+)(?:@)?([\w.]+)?:\s+<(.+)>', line)
|
||||
match = re.match(r'^\d*:\s+([\w.-]+)(?:@)?([\w.-]+)?:\s+<(.+)>', line)
|
||||
if match:
|
||||
iface, parent, attrs = match.groups()
|
||||
if 'UP' in attrs.split(','):
|
||||
|
@ -258,7 +258,9 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
|
||||
unicode_context[key] = unicode(value, 'utf-8')
|
||||
|
||||
try:
|
||||
output = jinja_env.from_string(tmplstr).render(**unicode_context)
|
||||
template = jinja_env.from_string(tmplstr)
|
||||
template.globals.update(unicode_context)
|
||||
output = template.render(**unicode_context)
|
||||
except jinja2.exceptions.TemplateSyntaxError as exc:
|
||||
trace = traceback.extract_tb(sys.exc_info()[2])
|
||||
line, out = _get_jinja_error(trace, context=unicode_context)
|
||||
|
@ -6,7 +6,7 @@
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import patch, call, NO_MOCK, NO_MOCK_REASON
|
||||
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||
|
||||
ensure_in_syspath('../')
|
||||
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
# Import Salt Testing libs
|
||||
from salttesting import skipIf
|
||||
from salttesting.helpers import (ensure_in_syspath, destructiveTest)
|
||||
from salttesting.mock import patch, call, NO_MOCK, NO_MOCK_REASON
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||
ensure_in_syspath('../')
|
||||
|
||||
# Import salt libs
|
||||
|
@ -19,7 +19,6 @@ except ImportError:
|
||||
# Import salt libs
|
||||
import integration
|
||||
import salt.utils
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
SUBSALT_DIR = os.path.join(integration.TMP, 'subsalt')
|
||||
AUTHORIZED_KEYS = os.path.join(SUBSALT_DIR, 'authorized_keys')
|
||||
|
47
tests/unit/modules/blockdev_test.py
Normal file
47
tests/unit/modules/blockdev_test.py
Normal file
@ -0,0 +1,47 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting.unit import skipIf, TestCase
|
||||
from salttesting.helpers import ensure_in_syspath
|
||||
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import Salt Libs
|
||||
import salt.modules.blockdev as blockdev
|
||||
|
||||
blockdev.__salt__ = {
|
||||
'cmd.has_exec': MagicMock(return_value=True),
|
||||
'config.option': MagicMock(return_value=None)
|
||||
}
|
||||
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
class TestBlockdevModule(TestCase):
|
||||
def test_dump(self):
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch.dict(blockdev.__salt__, {'cmd.run_all': mock}):
|
||||
blockdev.dump('/dev/sda')
|
||||
mock.assert_called_once_with(
|
||||
'blockdev --getro --getsz --getss --getpbsz --getiomin '
|
||||
'--getioopt --getalignoff --getmaxsect --getsize '
|
||||
'--getsize64 --getra --getfra /dev/sda'
|
||||
)
|
||||
|
||||
def test_wipe(self):
|
||||
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch.dict(blockdev.__salt__, {'cmd.run_all': mock}):
|
||||
blockdev.wipe('/dev/sda')
|
||||
mock.assert_called_once_with(
|
||||
'wipefs /dev/sda'
|
||||
)
|
||||
|
||||
def test_tune(self):
|
||||
mock = MagicMock(return_value='712971264\n512\n512\n512\n0\n0\n88\n712971264\n365041287168\n512\n512')
|
||||
with patch.dict(blockdev.__salt__, {'cmd.run': mock}):
|
||||
mock_dump = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||
with patch('salt.modules.blockdev.dump', mock_dump):
|
||||
kwargs = {'read-ahead': 512, 'filesystem-read-ahead': 512}
|
||||
blockdev.tune('/dev/sda', **kwargs)
|
||||
mock.assert_called_once_with(
|
||||
'blockdev --setra 512 --setfra 512 /dev/sda'
|
||||
)
|
@ -1,15 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
:codauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
|
||||
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
|
||||
'''
|
||||
|
||||
# Import Salt Testing Libs
|
||||
from salttesting import TestCase, skipIf
|
||||
from salttesting.mock import MagicMock, patch, call
|
||||
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
|
||||
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
|
||||
ensure_in_syspath('../../')
|
||||
|
||||
# Import salt libs
|
||||
from salt.modules import dig
|
||||
|
||||
@skipIf(not dig.__virtual__(), 'Dig must be installed')
|
||||
|
||||
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||
@requires_salt_modules('dig')
|
||||
class DigTestCase(TestCase):
|
||||
|
||||
def test_check_ip(self):
|
||||
@ -18,13 +23,92 @@ class DigTestCase(TestCase):
|
||||
def test_check_ip_ipv6(self):
|
||||
self.assertTrue(dig.check_ip('1111:2222:3333:4444:5555:6666:7777:8888'), msg='Not a valid ip address')
|
||||
|
||||
@skipIf(True, 'Waiting for 2014.1 release')
|
||||
def test_check_ip_ipv6_valid(self):
|
||||
self.assertTrue(dig.check_ip('2607:fa18:0:3::4'))
|
||||
|
||||
def test_check_ip_neg(self):
|
||||
self.assertFalse(dig.check_ip('-127.0.0.1'), msg="Did not detect negative value as invalid")
|
||||
|
||||
def test_check_ip_empty(self):
|
||||
self.assertFalse(dig.check_ip(''), msg="Did not detect empty value as invalid")
|
||||
|
||||
def test_A(self):
|
||||
def test_a(self):
|
||||
dig.__salt__ = {}
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': MagicMock(return_value={'pid': 3656, 'retcode': 0, 'stderr': '', 'stdout': '74.125.193.104\n74.125.193.105\n74.125.193.99\n74.125.193.106\n74.125.193.103\n74.125.193.147'})}):
|
||||
self.assertEqual(dig.A('www.google.com'), ['74.125.193.104', '74.125.193.105', '74.125.193.99', '74.125.193.106', '74.125.193.103', '74.125.193.147'])
|
||||
dig_mock = MagicMock(return_value={
|
||||
'pid': 3656, 'retcode': 0, 'stderr': '', 'stdout': '74.125.193.104\n'
|
||||
'74.125.193.105\n'
|
||||
'74.125.193.99\n'
|
||||
'74.125.193.106\n'
|
||||
'74.125.193.103\n'
|
||||
'74.125.193.147'
|
||||
})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.A('www.google.com'), ['74.125.193.104',
|
||||
'74.125.193.105',
|
||||
'74.125.193.99',
|
||||
'74.125.193.106',
|
||||
'74.125.193.103',
|
||||
'74.125.193.147'])
|
||||
|
||||
@skipIf(True, 'Waiting for 2014.1 release')
|
||||
def test_aaaa(self):
|
||||
dig.__salt__ = {}
|
||||
dig_mock = MagicMock(return_value={
|
||||
'pid': 25451, 'retcode': 0, 'stderr': '', 'stdout': '2607:f8b0:400f:801::1014'
|
||||
})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.AAAA('www.google.com'), ['2607:f8b0:400f:801::1014'])
|
||||
|
||||
@patch('salt.modules.dig.A', MagicMock(return_value=['ns4.google.com.']))
|
||||
def test_ns(self):
|
||||
dig.__salt__ = {}
|
||||
dig_mock = MagicMock(return_value={
|
||||
'pid': 26136, 'retcode': 0, 'stderr': '', 'stdout': 'ns4.google.com.'
|
||||
})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.NS('google.com'), ['ns4.google.com.'])
|
||||
|
||||
def test_spf(self):
|
||||
dig.__salt__ = {}
|
||||
dig_mock = MagicMock(return_value={'pid': 26795,
|
||||
'retcode': 0,
|
||||
'stderr': '',
|
||||
'stdout': 'v=spf1'
|
||||
' include:_spf.google.com '
|
||||
'ip4:216.73.93.70/31 '
|
||||
'ip4:216.73.93.72/31 ~all'})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.SPF('google.com'),
|
||||
['216.73.93.70/31', '216.73.93.72/31'])
|
||||
|
||||
@skipIf(True, 'Waiting for 2014.1 release')
|
||||
def test_spf_redir(self):
|
||||
'''
|
||||
Test was written after a bug was found when a domain is redirecting the SPF ipv4 range
|
||||
'''
|
||||
dig.__salt__ = {}
|
||||
dig_mock = MagicMock(return_value={'pid': 27282,
|
||||
'retcode': 0,
|
||||
'stderr': '',
|
||||
'stdout': 'v=spf1 a mx '
|
||||
'include:_spf.xmission.com ?all'})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.SPF('xmission.com'), ['198.60.22.0/24', '166.70.13.0/24'])
|
||||
|
||||
def test_mx(self):
|
||||
dig.__salt__ = {}
|
||||
dig_mock = MagicMock(return_value={'pid': 27780,
|
||||
'retcode': 0,
|
||||
'stderr': '',
|
||||
'stdout': '10 aspmx.l.google.com.\n'
|
||||
'20 alt1.aspmx.l.google.com.\n'
|
||||
'40 alt3.aspmx.l.google.com.\n'
|
||||
'50 alt4.aspmx.l.google.com.\n'
|
||||
'30 alt2.aspmx.l.google.com.'})
|
||||
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||
self.assertEqual(dig.MX('google.com'), [['10', 'aspmx.l.google.com.'],
|
||||
['20', 'alt1.aspmx.l.google.com.'],
|
||||
['40', 'alt3.aspmx.l.google.com.'],
|
||||
['50', 'alt4.aspmx.l.google.com.'],
|
||||
['30', 'alt2.aspmx.l.google.com.']])
|
||||
|
Loading…
Reference in New Issue
Block a user