mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge remote-tracking branch 'upstream/develop' into sam_raet_2
Conflicts: salt/transport/road/raet/stacking.py salt/transport/road/raet/test/test_stacking.py
This commit is contained in:
commit
ebae5afc64
@ -213,6 +213,9 @@ Edit the minion config file:
|
|||||||
4. Uncomment and change the ``id:`` value to something descriptive like
|
4. Uncomment and change the ``id:`` value to something descriptive like
|
||||||
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
||||||
which Salt installation you are working with.
|
which Salt installation you are working with.
|
||||||
|
5. If you changed the ``ret_port`` value in the master config because you are
|
||||||
|
also running a non-development version of Salt, then you will have to
|
||||||
|
change the ``master_port`` value in the minion config to match.
|
||||||
|
|
||||||
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
||||||
|
|
||||||
|
20
doc/_themes/saltstack/layout.html
vendored
20
doc/_themes/saltstack/layout.html
vendored
@ -233,27 +233,23 @@
|
|||||||
<div class="row-fluid">
|
<div class="row-fluid">
|
||||||
<div class="footerCol">
|
<div class="footerCol">
|
||||||
<h4>About Us</h4>
|
<h4>About Us</h4>
|
||||||
<a href="http://saltstack.com/about.html">SaltStack</a>
|
<a href="http://saltstack.com/">SaltStack</a>
|
||||||
<a href="http://saltstack.com/about.html">Leadership</a>
|
<a href="http://saltstack.com/about/">Leadership</a>
|
||||||
</div>
|
</div>
|
||||||
<div class="footerCol">
|
<div class="footerCol">
|
||||||
<h4>Products</h4>
|
<h4>Products</h4>
|
||||||
<a href="http://saltstack.com/products.html">Remote Execution</a>
|
<a href="http://saltstack.com/enterprise/">Enterprise</a>
|
||||||
<a href="http://saltstack.com/products.html">Config Management</a>
|
<a href="http://saltstack.com/services/">Integration</a>
|
||||||
<a href="http://saltstack.com/products.html">Cloud Management</a>
|
|
||||||
<a href="http://saltstack.com/products.html">SaltStack Solutions</a>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="footerCol">
|
<div class="footerCol">
|
||||||
<h4>Services</h4>
|
<h4>Services</h4>
|
||||||
<a href="http://saltstack.com/services.html">Onsite Training</a>
|
<a href="http://saltstack.com/training/">Onsite Training</a>
|
||||||
<a href="http://saltstack.com/services.html">Regional Training</a>
|
<a href="http://saltstack.com/services/">Custom Professional Services</a>
|
||||||
<a href="http://saltstack.com/services.html">Custom Professional Services</a>
|
|
||||||
<a href="http://saltstack.com/services.html">Training Dates and Locations</a>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="footerCol">
|
<div class="footerCol">
|
||||||
<h4>Contact Us</h4>
|
<h4>Contact Us</h4>
|
||||||
<a href="http://saltstack.com/contact.html">Support</a>
|
<a href="http://saltstack.com/contact/">Support</a>
|
||||||
<a href="http://saltstack.com/contact.html">Contact us</a>
|
<a href="http://saltstack.com/contact/">Contact us</a>
|
||||||
</div>
|
</div>
|
||||||
<div class="footerCol">
|
<div class="footerCol">
|
||||||
<h4>Community</h4>
|
<h4>Community</h4>
|
||||||
|
@ -37,7 +37,10 @@ Set up the provider config at ``/etc/salt/cloud.providers.d/azure.conf``:
|
|||||||
minion:
|
minion:
|
||||||
master: saltmaster.example.com
|
master: saltmaster.example.com
|
||||||
|
|
||||||
provider: gce
|
provider: azure
|
||||||
|
|
||||||
|
# Optional
|
||||||
|
management_host: management.core.windows.net
|
||||||
|
|
||||||
The certificate used must be generated by the user. OpenSSL can be used to
|
The certificate used must be generated by the user. OpenSSL can be used to
|
||||||
create the management certificates. Two certificates are needed: a .cer file,
|
create the management certificates. Two certificates are needed: a .cer file,
|
||||||
@ -58,6 +61,9 @@ To create the .cer file, execute the following command:
|
|||||||
After you creating these files, the .cer file will need to be uploaded to
|
After you creating these files, the .cer file will need to be uploaded to
|
||||||
Azure via the "Upload" action of the "Settings" tab of the management portal.
|
Azure via the "Upload" action of the "Settings" tab of the management portal.
|
||||||
|
|
||||||
|
Optionally, a ``management_host`` may be configured, if necessary for your
|
||||||
|
region.
|
||||||
|
|
||||||
|
|
||||||
Cloud Profiles
|
Cloud Profiles
|
||||||
==============
|
==============
|
||||||
|
@ -43,6 +43,8 @@ Set up an initial profile at ``/etc/salt/cloud.profiles`` or in the
|
|||||||
image: Ubuntu 12.10 x64
|
image: Ubuntu 12.10 x64
|
||||||
size: 512MB
|
size: 512MB
|
||||||
location: New York 1
|
location: New York 1
|
||||||
|
private_networking: True
|
||||||
|
backups_enabled: True
|
||||||
|
|
||||||
Sizes can be obtained using the ``--list-sizes`` option for the ``salt-cloud``
|
Sizes can be obtained using the ``--list-sizes`` option for the ``salt-cloud``
|
||||||
command:
|
command:
|
||||||
|
@ -27,7 +27,7 @@ Split Package
|
|||||||
Salt Should always be split in a standard way, with standard dependencies, this lowers
|
Salt Should always be split in a standard way, with standard dependencies, this lowers
|
||||||
cross distribution confusion about what components are going to be shipped with
|
cross distribution confusion about what components are going to be shipped with
|
||||||
specific packages. These packages can be defined from the Salt Source as of
|
specific packages. These packages can be defined from the Salt Source as of
|
||||||
Salt 0.17.0:
|
Salt 2014.1.0:
|
||||||
|
|
||||||
Salt Common
|
Salt Common
|
||||||
-----------
|
-----------
|
||||||
@ -177,6 +177,7 @@ Files
|
|||||||
|
|
||||||
- `scripts/salt-ssh`
|
- `scripts/salt-ssh`
|
||||||
- `man/salt-ssh.1`
|
- `man/salt-ssh.1`
|
||||||
|
- `conf/cloud*`
|
||||||
|
|
||||||
Depends
|
Depends
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
@ -185,6 +186,31 @@ Depends
|
|||||||
- `sshpass`
|
- `sshpass`
|
||||||
- `Python MessagePack` (Messagepack C lib, or msgpack-pure)
|
- `Python MessagePack` (Messagepack C lib, or msgpack-pure)
|
||||||
|
|
||||||
|
Salt Cloud
|
||||||
|
----------
|
||||||
|
|
||||||
|
As of Salt 2014.1.0 Salt Cloud is included in the same repo as Salt. This
|
||||||
|
can be split out into a separate package or it can be included in the
|
||||||
|
salt-master package.
|
||||||
|
|
||||||
|
Name
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
- `salt-cloud`
|
||||||
|
|
||||||
|
Files
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
- `scripts/salt-cloud`
|
||||||
|
- `man/salt-cloud.1`
|
||||||
|
|
||||||
|
Depends
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
- `Salt Common`
|
||||||
|
- `sshpass`
|
||||||
|
- `apache libcloud`
|
||||||
|
|
||||||
Salt Doc
|
Salt Doc
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -267,6 +267,9 @@ Edit the minion config file:
|
|||||||
4. Uncomment and change the ``id:`` value to something descriptive like
|
4. Uncomment and change the ``id:`` value to something descriptive like
|
||||||
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
"saltdev". This isn't strictly necessary but it will serve as a reminder of
|
||||||
which Salt installation you are working with.
|
which Salt installation you are working with.
|
||||||
|
5. If you changed the ``ret_port`` value in the master config because you are
|
||||||
|
also running a non-development version of Salt, then you will have to
|
||||||
|
change the ``master_port`` value in the minion config to match.
|
||||||
|
|
||||||
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
.. note:: Using `salt-call` with a :doc:`Standalone Minion </topics/tutorials/standalone_minion>`
|
||||||
|
|
||||||
|
@ -2,9 +2,9 @@
|
|||||||
Preseed Minion with Accepted Key
|
Preseed Minion with Accepted Key
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
In some situations, it is not convenient to wait for a minion to start before
|
In some situations, it is not convenient to wait for a minion to start before
|
||||||
accepting its key on the master. For instance, you may want the minion to
|
accepting its key on the master. For instance, you may want the minion to
|
||||||
bootstrap itself as soon as it comes online. You may also want to to let your
|
bootstrap itself as soon as it comes online. You may also want to to let your
|
||||||
developers provision new development machines on the fly.
|
developers provision new development machines on the fly.
|
||||||
|
|
||||||
There is a general four step process to do this:
|
There is a general four step process to do this:
|
||||||
@ -23,23 +23,22 @@ Pick a name for the key, such as the minion's id.
|
|||||||
|
|
||||||
root@saltmaster# cp key_name.pub /etc/salt/pki/master/minions/[minion_id]
|
root@saltmaster# cp key_name.pub /etc/salt/pki/master/minions/[minion_id]
|
||||||
|
|
||||||
It is necessary that the public key file has the same name as your minion id.
|
It is necessary that the public key file has the same name as your minion id.
|
||||||
This is how Salt matches minions with their keys. Also note that the pki folder
|
This is how Salt matches minions with their keys. Also note that the pki folder
|
||||||
could be in a different location, depending on your OS or if specified in the
|
could be in a different location, depending on your OS or if specified in the
|
||||||
master config file.
|
master config file.
|
||||||
|
|
||||||
3. Distribute the minion keys.
|
3. Distribute the minion keys.
|
||||||
|
|
||||||
There is no single method to get the keypair to your minion. If you are
|
There is no single method to get the keypair to your minion. The difficulty is
|
||||||
spooling up minions on EC2, you could pass them in using user_data or a
|
finding a distribution method which is secure.
|
||||||
cloud-init script. If you are handing them off to a team of developers for
|
|
||||||
provisioning dev machines, you will need a secure file transfer.
|
|
||||||
|
|
||||||
.. admonition:: Security Warning
|
.. admonition:: Security Warning
|
||||||
|
|
||||||
Since the minion key is already accepted on the master, distributing
|
Since the minion key is already accepted on the master, distributing
|
||||||
the private key poses a potential security risk. A malicious party
|
the private key poses a potential security risk. A malicious party
|
||||||
will have access to your entire state tree and other sensitive data.
|
will have access to your entire state tree and other sensitive data if they
|
||||||
|
gain access to a preseeded minion key.
|
||||||
|
|
||||||
4. Preseed the Minion with the keys
|
4. Preseed the Minion with the keys
|
||||||
|
|
||||||
@ -50,6 +49,6 @@ You will want to place the minion keys before starting the salt-minion daemon:
|
|||||||
/etc/salt/pki/minion/minion.pem
|
/etc/salt/pki/minion/minion.pem
|
||||||
/etc/salt/pki/minion/minion.pub
|
/etc/salt/pki/minion/minion.pub
|
||||||
|
|
||||||
Once in place, you should be able to start salt-minion and run
|
Once in place, you should be able to start salt-minion and run
|
||||||
``salt-call state.highstate`` or any other salt commands that require master
|
``salt-call state.highstate`` or any other salt commands that require master
|
||||||
authentication.
|
authentication.
|
||||||
|
@ -88,7 +88,7 @@ def auth(pem, **kwargs):
|
|||||||
|
|
||||||
# The signature is a BIT STRING (Type 3)
|
# The signature is a BIT STRING (Type 3)
|
||||||
# Decode that as well
|
# Decode that as well
|
||||||
der_sig_in = Crypto.util.asn1.DerObject()
|
der_sig_in = Crypto.Util.asn1.DerObject()
|
||||||
der_sig_in.decode(der_sig)
|
der_sig_in.decode(der_sig)
|
||||||
|
|
||||||
# Get the payload
|
# Get the payload
|
||||||
|
@ -860,7 +860,19 @@ class LocalClient(object):
|
|||||||
# Wait 0 == forever, use a minimum of 1s
|
# Wait 0 == forever, use a minimum of 1s
|
||||||
wait = max(1, time_left)
|
wait = max(1, time_left)
|
||||||
raw = self.event.get_event(wait, jid)
|
raw = self.event.get_event(wait, jid)
|
||||||
if raw is not None:
|
if raw is None:
|
||||||
|
if len(found.intersection(minions)) >= len(minions):
|
||||||
|
# All minions have returned, break out of the loop
|
||||||
|
log.debug('jid %s found all minions %s', jid, found)
|
||||||
|
if self.opts['order_masters']:
|
||||||
|
if syndic_wait < self.opts.get('syndic_wait', 1):
|
||||||
|
syndic_wait += 1
|
||||||
|
timeout_at = int(time.time()) + 1
|
||||||
|
log.debug('jid %s syndic_wait %s will now timeout at %s',
|
||||||
|
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time())
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
else:
|
||||||
if 'minions' in raw.get('data', {}):
|
if 'minions' in raw.get('data', {}):
|
||||||
minions.update(raw['data']['minions'])
|
minions.update(raw['data']['minions'])
|
||||||
continue
|
continue
|
||||||
@ -879,17 +891,7 @@ class LocalClient(object):
|
|||||||
ret[raw['id']]['out'] = raw['out']
|
ret[raw['id']]['out'] = raw['out']
|
||||||
log.debug('jid %s return from %s', jid, raw['id'])
|
log.debug('jid %s return from %s', jid, raw['id'])
|
||||||
yield ret
|
yield ret
|
||||||
if len(found.intersection(minions)) >= len(minions):
|
|
||||||
# All minions have returned, break out of the loop
|
|
||||||
log.debug('jid %s found all minions %s', jid, found)
|
|
||||||
if self.opts['order_masters']:
|
|
||||||
if syndic_wait < self.opts.get('syndic_wait', 1):
|
|
||||||
syndic_wait += 1
|
|
||||||
timeout_at = int(time.time()) + 1
|
|
||||||
log.debug('jid %s syndic_wait %s will now timeout at %s',
|
|
||||||
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time())
|
|
||||||
continue
|
|
||||||
break
|
|
||||||
continue
|
continue
|
||||||
# Then event system timeout was reached and nothing was returned
|
# Then event system timeout was reached and nothing was returned
|
||||||
if len(found.intersection(minions)) >= len(minions):
|
if len(found.intersection(minions)) >= len(minions):
|
||||||
|
@ -286,9 +286,20 @@ def create(vm_):
|
|||||||
)
|
)
|
||||||
|
|
||||||
private_networking = config.get_cloud_config_value(
|
private_networking = config.get_cloud_config_value(
|
||||||
'private_networking', vm_, __opts__, search_global=False, default=None
|
'private_networking', vm_, __opts__, search_global=False, default=None,
|
||||||
)
|
)
|
||||||
kwargs['private_networking'] = 'true' if private_networking else 'false'
|
if private_networking is not None:
|
||||||
|
if not isinstance(private_networking, bool):
|
||||||
|
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
|
||||||
|
kwargs['private_networking'] = private_networking
|
||||||
|
|
||||||
|
backups_enabled = config.get_cloud_config_value(
|
||||||
|
'backups_enabled', vm_, __opts__, search_global=False, default=None,
|
||||||
|
)
|
||||||
|
if backups_enabled is not None:
|
||||||
|
if not isinstance(backups_enabled, bool):
|
||||||
|
raise SaltCloudConfigError("'backups_enabled' should be a boolean value.")
|
||||||
|
kwargs['backups_enabled'] = backups_enabled
|
||||||
|
|
||||||
salt.utils.cloud.fire_event(
|
salt.utils.cloud.fire_event(
|
||||||
'event',
|
'event',
|
||||||
|
@ -15,6 +15,7 @@ configuration at ``/etc/salt/cloud.providers`` or
|
|||||||
provider: azure
|
provider: azure
|
||||||
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
|
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
|
||||||
certificate_path: /etc/salt/azure.pem
|
certificate_path: /etc/salt/azure.pem
|
||||||
|
management_host: management.core.windows.net
|
||||||
|
|
||||||
Information on creating the pem file to use, and uploading the associated cer
|
Information on creating the pem file to use, and uploading the associated cer
|
||||||
file can be found at:
|
file can be found at:
|
||||||
@ -76,7 +77,7 @@ def get_configured_provider():
|
|||||||
return config.is_provider_configured(
|
return config.is_provider_configured(
|
||||||
__opts__,
|
__opts__,
|
||||||
__active_provider_name__ or __virtualname__,
|
__active_provider_name__ or __virtualname__,
|
||||||
('subscription_id', 'certificate_path',)
|
('subscription_id', 'certificate_path')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -92,8 +93,15 @@ def get_conn():
|
|||||||
'subscription_id',
|
'subscription_id',
|
||||||
get_configured_provider(), __opts__, search_global=False
|
get_configured_provider(), __opts__, search_global=False
|
||||||
)
|
)
|
||||||
|
management_host = config.get_cloud_config_value(
|
||||||
|
'management_host',
|
||||||
|
get_configured_provider(),
|
||||||
|
__opts__,
|
||||||
|
search_global=False,
|
||||||
|
default='management.core.windows.net'
|
||||||
|
)
|
||||||
return azure.servicemanagement.ServiceManagementService(
|
return azure.servicemanagement.ServiceManagementService(
|
||||||
subscription_id, certificate_path
|
subscription_id, certificate_path, management_host
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ class RemoteMaster(ioflo.base.deeding.Deed):
|
|||||||
'''
|
'''
|
||||||
Set up required objects
|
Set up required objects
|
||||||
'''
|
'''
|
||||||
self.remote = salt.masterapi.RemoteFuncs(self.opts.value)
|
self.remote = salt.daemons.masterapi.RemoteFuncs(self.opts.value)
|
||||||
|
|
||||||
def action(self):
|
def action(self):
|
||||||
'''
|
'''
|
||||||
@ -93,7 +93,7 @@ class LocalMaster(ioflo.base.deeding.Deed):
|
|||||||
'''
|
'''
|
||||||
Set up required objects
|
Set up required objects
|
||||||
'''
|
'''
|
||||||
self.remote = salt.masterapi.LocalFuncs(self.opts.value)
|
self.remote = salt.daemons.masterapi.LocalFuncs(self.opts.value)
|
||||||
|
|
||||||
def action(self):
|
def action(self):
|
||||||
'''
|
'''
|
||||||
|
@ -445,7 +445,7 @@ def _stale_refs_pygit2(repo):
|
|||||||
key = ' * [would prune] '
|
key = ' * [would prune] '
|
||||||
ret = []
|
ret = []
|
||||||
for line in subprocess.Popen(
|
for line in subprocess.Popen(
|
||||||
'git remote prune --dry-run origin'.format(remote),
|
'git remote prune --dry-run origin',
|
||||||
shell=True,
|
shell=True,
|
||||||
close_fds=True,
|
close_fds=True,
|
||||||
cwd=repo.workdir,
|
cwd=repo.workdir,
|
||||||
@ -657,7 +657,7 @@ def update():
|
|||||||
# Prune stale refs
|
# Prune stale refs
|
||||||
for ref in repo.get_refs():
|
for ref in repo.get_refs():
|
||||||
if ref not in refs_post:
|
if ref not in refs_post:
|
||||||
del(repo[ref])
|
del repo[ref]
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
log.warning(
|
log.warning(
|
||||||
'Exception caught while fetching: {0}'.format(exc)
|
'Exception caught while fetching: {0}'.format(exc)
|
||||||
|
@ -766,7 +766,7 @@ class RaetKey(Key):
|
|||||||
def _get_key_str(self, minion_id, status):
|
def _get_key_str(self, minion_id, status):
|
||||||
'''
|
'''
|
||||||
Return the key string in the form of:
|
Return the key string in the form of:
|
||||||
|
|
||||||
pub: <pub>
|
pub: <pub>
|
||||||
verify: <verify>
|
verify: <verify>
|
||||||
'''
|
'''
|
||||||
|
@ -62,6 +62,13 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
HAS_HALITE = False
|
HAS_HALITE = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import systemd.daemon
|
||||||
|
HAS_PYTHON_SYSTEMD = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_PYTHON_SYSTEMD = False
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -466,6 +473,13 @@ class ReqServer(object):
|
|||||||
|
|
||||||
self.workers.bind(self.w_uri)
|
self.workers.bind(self.w_uri)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
|
||||||
|
systemd.daemon.notify('READY=1')
|
||||||
|
except SystemError:
|
||||||
|
# Daemon wasn't started by systemd
|
||||||
|
pass
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
zmq.device(zmq.QUEUE, self.clients, self.workers)
|
zmq.device(zmq.QUEUE, self.clients, self.workers)
|
||||||
|
@ -496,7 +496,7 @@ class MultiMinion(object):
|
|||||||
try:
|
try:
|
||||||
if not isinstance(minion, dict):
|
if not isinstance(minion, dict):
|
||||||
minions[master] = {'minion': minion}
|
minions[master] = {'minion': minion}
|
||||||
t_minion = Minion(minion, 1, False)
|
t_minion = Minion(minion, 5, False)
|
||||||
minions[master]['minion'] = t_minion
|
minions[master]['minion'] = t_minion
|
||||||
minions[master]['generator'] = t_minion.tune_in_no_block()
|
minions[master]['generator'] = t_minion.tune_in_no_block()
|
||||||
auth_wait = self.opts['acceptance_wait_time']
|
auth_wait = self.opts['acceptance_wait_time']
|
||||||
|
110
salt/modules/blockdev.py
Normal file
110
salt/modules/blockdev.py
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Module for managing block devices
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Import python libs
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
|
import salt.utils
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
'''
|
||||||
|
Only work on POSIX-like systems
|
||||||
|
'''
|
||||||
|
if salt.utils.is_windows():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def tune(device, **kwargs):
|
||||||
|
'''
|
||||||
|
Set attributes for the specified device
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' blockdev.tune /dev/sda1 read-ahead=1024 read-write=True
|
||||||
|
|
||||||
|
Valid options are::
|
||||||
|
|
||||||
|
read-ahead:
|
||||||
|
|
||||||
|
See the ``blockdev(8)`` manpage for a more complete description of these
|
||||||
|
options.
|
||||||
|
'''
|
||||||
|
|
||||||
|
kwarg_map = {'read-ahead': 'setra',
|
||||||
|
'filesystem-read-ahead': 'setfra',
|
||||||
|
'read-only': 'setro',
|
||||||
|
'read-write': 'setrw'}
|
||||||
|
opts = ''
|
||||||
|
args = []
|
||||||
|
for key in kwargs:
|
||||||
|
if key in kwarg_map:
|
||||||
|
switch = kwarg_map[key]
|
||||||
|
if key != 'read-write':
|
||||||
|
args.append(switch.replace('set', 'get'))
|
||||||
|
if kwargs[key] == 'True':
|
||||||
|
opts += '--{0} '.format(key)
|
||||||
|
else:
|
||||||
|
opts += '--{0} {1} '.format(switch, kwargs[key])
|
||||||
|
cmd = 'blockdev {0}{1}'.format(opts, device)
|
||||||
|
out = __salt__['cmd.run'](cmd).splitlines()
|
||||||
|
return dump(device, args)
|
||||||
|
|
||||||
|
|
||||||
|
def wipe(device):
|
||||||
|
'''
|
||||||
|
Remove the filesystem information
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' blockdev.wipe /dev/sda1
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = 'wipefs {0}'.format(device)
|
||||||
|
try:
|
||||||
|
out = __salt__['cmd.run_all'](cmd)
|
||||||
|
except subprocess.CalledProcessError as err:
|
||||||
|
return False
|
||||||
|
if out['retcode'] == 0:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def dump(device, args=None):
|
||||||
|
'''
|
||||||
|
Return all contents of dumpe2fs for a specified device
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
salt '*' extfs.dump /dev/sda1
|
||||||
|
'''
|
||||||
|
cmd = 'blockdev --getro --getsz --getss --getpbsz --getiomin --getioopt --getalignoff --getmaxsect --getsize --getsize64 --getra --getfra {0}'.format(device)
|
||||||
|
ret = {}
|
||||||
|
opts = [c[2:] for c in cmd.split() if c.startswith('--')]
|
||||||
|
out = __salt__['cmd.run_all'](cmd)
|
||||||
|
if out['retcode'] == 0:
|
||||||
|
lines = [line for line in out['stdout'].splitlines() if line]
|
||||||
|
count = 0
|
||||||
|
for line in lines:
|
||||||
|
ret[opts[count]] = line
|
||||||
|
count = count+1
|
||||||
|
if args:
|
||||||
|
temp_ret = {}
|
||||||
|
for arg in args:
|
||||||
|
temp_ret[arg] = ret[arg]
|
||||||
|
return temp_ret
|
||||||
|
else:
|
||||||
|
return ret
|
||||||
|
else:
|
||||||
|
return False
|
@ -19,7 +19,7 @@ def __virtual__():
|
|||||||
'''
|
'''
|
||||||
if not salt.utils.which('dig'):
|
if not salt.utils.which('dig'):
|
||||||
return False
|
return False
|
||||||
return 'dig'
|
return True
|
||||||
|
|
||||||
|
|
||||||
def check_ip(x):
|
def check_ip(x):
|
||||||
|
@ -288,25 +288,25 @@ def list_():
|
|||||||
frozen = []
|
frozen = []
|
||||||
running = []
|
running = []
|
||||||
|
|
||||||
for c in ctnrs:
|
for container in ctnrs:
|
||||||
lines = __salt__['cmd.run']('lxc-info -n ' + c).splitlines()
|
c_infos = __salt__['cmd.run']('lxc-info -n {0}'.format(container))
|
||||||
|
|
||||||
for line in lines:
|
for info in c_infos:
|
||||||
stat = line.split(':')
|
stat = info.split(':')
|
||||||
if stat[0] == 'state':
|
if stat[0] == 'state':
|
||||||
s = stat[1].strip()
|
state = stat[1].strip()
|
||||||
break
|
break
|
||||||
|
|
||||||
if not len(s):
|
if not len(state):
|
||||||
continue
|
continue
|
||||||
if s == 'STOPPED':
|
if state == 'STOPPED':
|
||||||
stopped.append(c)
|
stopped.append(container)
|
||||||
continue
|
continue
|
||||||
if s == 'FROZEN':
|
if state == 'FROZEN':
|
||||||
frozen.append(c)
|
frozen.append(container)
|
||||||
continue
|
continue
|
||||||
if s == 'RUNNING':
|
if state == 'RUNNING':
|
||||||
running.append(c)
|
running.append(container)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
return {'running': running,
|
return {'running': running,
|
||||||
|
@ -63,6 +63,7 @@ verbose : True
|
|||||||
# Import python Libs
|
# Import python Libs
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import urllib2
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
import salt.utils
|
import salt.utils
|
||||||
@ -222,6 +223,28 @@ def _format_url(handler, host=None, core_name=None, extra=None):
|
|||||||
host, port, baseurl, core_name, handler, "&".join(extra))
|
host, port, baseurl, core_name, handler, "&".join(extra))
|
||||||
|
|
||||||
|
|
||||||
|
def _auth(url):
|
||||||
|
'''
|
||||||
|
Install an auth handler for urllib2
|
||||||
|
'''
|
||||||
|
user = __salt__['config.get']('solr.user', False)
|
||||||
|
password = __salt__['config.get']('solr.passwd', False)
|
||||||
|
realm = __salt__['config.get']('solr.auth_realm', 'Solr')
|
||||||
|
|
||||||
|
if user and password:
|
||||||
|
basic = urllib2.HTTPBasicAuthHandler()
|
||||||
|
basic.add_password(
|
||||||
|
realm=realm, uri=url, user=user, passwd=password
|
||||||
|
)
|
||||||
|
digest = urllib2.HTTPDigestAuthHandler()
|
||||||
|
digest.add_password(
|
||||||
|
realm=realm, uri=url, user=user, passwd=password
|
||||||
|
)
|
||||||
|
urllib2.install_opener(
|
||||||
|
urllib2.build_opener(basic, digest)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _http_request(url, request_timeout=None):
|
def _http_request(url, request_timeout=None):
|
||||||
'''
|
'''
|
||||||
PRIVATE METHOD
|
PRIVATE METHOD
|
||||||
@ -237,6 +260,7 @@ def _http_request(url, request_timeout=None):
|
|||||||
|
|
||||||
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
|
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
|
||||||
'''
|
'''
|
||||||
|
_auth(url)
|
||||||
try:
|
try:
|
||||||
|
|
||||||
request_timeout = __salt__['config.option']('solr.request_timeout')
|
request_timeout = __salt__['config.option']('solr.request_timeout')
|
||||||
|
@ -39,20 +39,22 @@ def _available_commands():
|
|||||||
if not zfs_path:
|
if not zfs_path:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
_return = {}
|
ret = {}
|
||||||
# Note that we append '|| :' as a unix hack to force return code to be 0.
|
# Note that we append '|| :' as a unix hack to force return code to be 0.
|
||||||
res = salt_cmd.run_all('{0} help || :'.format(zfs_path))
|
res = salt_cmd.run_stderr(
|
||||||
|
'{0} help || :'.format(zfs_path), output_loglevel='debug'
|
||||||
|
)
|
||||||
|
|
||||||
# This bit is dependent on specific output from `zfs help` - any major changes
|
# This bit is dependent on specific output from `zfs help` - any major changes
|
||||||
# in how this works upstream will require a change.
|
# in how this works upstream will require a change.
|
||||||
for line in res['stderr'].splitlines():
|
for line in res.splitlines():
|
||||||
if re.match(' [a-zA-Z]', line):
|
if re.match(' [a-zA-Z]', line):
|
||||||
cmds = line.split(' ')[0].split('|')
|
cmds = line.split(' ')[0].split('|')
|
||||||
doc = ' '.join(line.split(' ')[1:])
|
doc = ' '.join(line.split(' ')[1:])
|
||||||
for cmd in [cmd.strip() for cmd in cmds]:
|
for cmd in [cmd.strip() for cmd in cmds]:
|
||||||
if cmd not in _return:
|
if cmd not in ret:
|
||||||
_return[cmd] = doc
|
ret[cmd] = doc
|
||||||
return _return
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def _exit_status(retcode):
|
def _exit_status(retcode):
|
||||||
|
@ -174,9 +174,9 @@ def print_job(job_id):
|
|||||||
|
|
||||||
def _format_job_instance(job):
|
def _format_job_instance(job):
|
||||||
return {'Function': job['fun'],
|
return {'Function': job['fun'],
|
||||||
'Arguments': list(job['arg']),
|
'Arguments': list(job.get('arg', [])),
|
||||||
'Target': job['tgt'],
|
'Target': job['tgt'],
|
||||||
'Target-type': job['tgt_type'],
|
'Target-type': job.get('tgt_type', []),
|
||||||
'User': job.get('user', 'root')}
|
'User': job.get('user', 'root')}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1351,7 +1351,9 @@ class State(object):
|
|||||||
# that's not found in cdata, we look for what we're being passed in
|
# that's not found in cdata, we look for what we're being passed in
|
||||||
# the original data, namely, the special dunder __env__. If that's
|
# the original data, namely, the special dunder __env__. If that's
|
||||||
# not found we default to 'base'
|
# not found we default to 'base'
|
||||||
if cdata['kwargs'].get('env', None) is not None:
|
if 'saltenv' in low:
|
||||||
|
inject_globals['__env__'] = low['saltenv']
|
||||||
|
elif cdata['kwargs'].get('env', None) is not None:
|
||||||
# User is using a deprecated env setting which was parsed by
|
# User is using a deprecated env setting which was parsed by
|
||||||
# format_call
|
# format_call
|
||||||
inject_globals['__env__'] = cdata['kwargs']['env']
|
inject_globals['__env__'] = cdata['kwargs']['env']
|
||||||
@ -1359,8 +1361,6 @@ class State(object):
|
|||||||
# The user is passing an alternative environment using __env__
|
# The user is passing an alternative environment using __env__
|
||||||
# which is also not the appropriate choice, still, handle it
|
# which is also not the appropriate choice, still, handle it
|
||||||
inject_globals['__env__'] = low['__env__']
|
inject_globals['__env__'] = low['__env__']
|
||||||
elif 'saltenv' in low:
|
|
||||||
inject_globals['__env__'] = low['saltenv']
|
|
||||||
else:
|
else:
|
||||||
# Let's use the default environment
|
# Let's use the default environment
|
||||||
inject_globals['__env__'] = 'base'
|
inject_globals['__env__'] = 'base'
|
||||||
|
78
salt/states/blockdev.py
Normal file
78
salt/states/blockdev.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
'''
|
||||||
|
Management of Block Devices
|
||||||
|
===================================
|
||||||
|
|
||||||
|
A state module to manage blockdevices
|
||||||
|
|
||||||
|
.. code-block:: yaml
|
||||||
|
|
||||||
|
|
||||||
|
/dev/sda:
|
||||||
|
blockdev.tuned:
|
||||||
|
- read-only: True
|
||||||
|
|
||||||
|
master-data:
|
||||||
|
blockdev:
|
||||||
|
- tuned:
|
||||||
|
- name : /dev/vg/master-data
|
||||||
|
- read-only: True
|
||||||
|
- read-ahead: 1024
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
|
import salt.utils
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
'''
|
||||||
|
Only work on POSIX-like systems
|
||||||
|
'''
|
||||||
|
if salt.utils.is_windows():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def tuned(name, **kwargs):
|
||||||
|
'''
|
||||||
|
Manage options of block device
|
||||||
|
|
||||||
|
name
|
||||||
|
The name of the block device
|
||||||
|
|
||||||
|
opts:
|
||||||
|
- read-ahead
|
||||||
|
Read-ahead buffer size
|
||||||
|
|
||||||
|
- filesystem-read-ahead
|
||||||
|
Filesystem Read-ahead buffer size
|
||||||
|
|
||||||
|
- read-only
|
||||||
|
Set Read-Only
|
||||||
|
|
||||||
|
- read-write
|
||||||
|
Set Read-Write
|
||||||
|
'''
|
||||||
|
|
||||||
|
ret = {'changes': {},
|
||||||
|
'comment': '',
|
||||||
|
'name': name,
|
||||||
|
'result': True}
|
||||||
|
|
||||||
|
if not __salt__['file.is_blkdev']:
|
||||||
|
ret['comment'] = 'Changes to {0} cannot be applied. Not a block device '.format(name)
|
||||||
|
elif __opts__['test']:
|
||||||
|
ret['comment'] = 'Changes to {0} will be applied '.format(name)
|
||||||
|
ret['result'] = None
|
||||||
|
return ret
|
||||||
|
else:
|
||||||
|
changes = __salt__['blockdev.tune'](name, **kwargs)
|
||||||
|
if changes:
|
||||||
|
ret['comment'] = 'Block device {0} successfully modified '.format(name)
|
||||||
|
ret['changes'] = changes
|
||||||
|
else:
|
||||||
|
ret['comment'] = 'Failed to modify block device {0}'.format(name)
|
||||||
|
ret['result'] = False
|
||||||
|
return ret
|
@ -43,6 +43,7 @@ class Signer(object):
|
|||||||
'''
|
'''
|
||||||
return self.key.sign(msg).signature
|
return self.key.sign(msg).signature
|
||||||
|
|
||||||
|
|
||||||
class Verifier(object):
|
class Verifier(object):
|
||||||
'''
|
'''
|
||||||
Used to verify messages with nacl digital signature
|
Used to verify messages with nacl digital signature
|
||||||
@ -69,6 +70,7 @@ class Verifier(object):
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Publican(object):
|
class Publican(object):
|
||||||
'''
|
'''
|
||||||
Container to manage remote nacl public key
|
Container to manage remote nacl public key
|
||||||
@ -140,6 +142,3 @@ class Privateer(object):
|
|||||||
'''
|
'''
|
||||||
box = nacl.public.Box(self.key, pub.key)
|
box = nacl.public.Box(self.key, pub.key)
|
||||||
return box.decrypt(cipher, nonce)
|
return box.decrypt(cipher, nonce)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ packeting module provides classes for Raet packets
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
# Import python libs
|
# Import python libs
|
||||||
import socket
|
|
||||||
from collections import Mapping
|
from collections import Mapping
|
||||||
try:
|
try:
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
@ -19,6 +18,7 @@ from ioflo.base.aiding import packByte, unpackByte
|
|||||||
|
|
||||||
from . import raeting
|
from . import raeting
|
||||||
|
|
||||||
|
|
||||||
class Part(object):
|
class Part(object):
|
||||||
'''
|
'''
|
||||||
Base class for parts of a RAET packet
|
Base class for parts of a RAET packet
|
||||||
@ -29,8 +29,8 @@ class Part(object):
|
|||||||
'''
|
'''
|
||||||
Setup Part instance
|
Setup Part instance
|
||||||
'''
|
'''
|
||||||
self.packet = packet # Packet this Part belongs too
|
self.packet = packet # Packet this Part belongs too
|
||||||
self.kind = kind # part kind
|
self.kind = kind # part kind
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
@ -41,10 +41,11 @@ class Part(object):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def size(self):
|
def size(self):
|
||||||
'''
|
'''
|
||||||
Property is the length of this Part
|
Property is the length of this Part
|
||||||
'''
|
'''
|
||||||
return (self.__len__())
|
return self.__len__()
|
||||||
|
|
||||||
|
|
||||||
class Head(Part):
|
class Head(Part):
|
||||||
'''
|
'''
|
||||||
@ -57,6 +58,7 @@ class Head(Part):
|
|||||||
'''
|
'''
|
||||||
super(Head, self).__init__(**kwa)
|
super(Head, self).__init__(**kwa)
|
||||||
|
|
||||||
|
|
||||||
class TxHead(Head):
|
class TxHead(Head):
|
||||||
'''
|
'''
|
||||||
RAET protocl transmit packet header class
|
RAET protocl transmit packet header class
|
||||||
@ -66,8 +68,8 @@ class TxHead(Head):
|
|||||||
Composes and returns .packed, which is the packed form of this part
|
Composes and returns .packed, which is the packed form of this part
|
||||||
'''
|
'''
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
self.kind= self.packet.data['hk']
|
self.kind = self.packet.data['hk']
|
||||||
data = self.packet.data # for speed
|
data = self.packet.data # for speed
|
||||||
|
|
||||||
data['pk'] = self.packet.kind
|
data['pk'] = self.packet.kind
|
||||||
data['nk'] = self.packet.neck.kind
|
data['nk'] = self.packet.neck.kind
|
||||||
@ -81,10 +83,10 @@ class TxHead(Head):
|
|||||||
|
|
||||||
# kit always includes header kind and length fields
|
# kit always includes header kind and length fields
|
||||||
kit = odict([('hk', self.kind), ('hl', 0)])
|
kit = odict([('hk', self.kind), ('hl', 0)])
|
||||||
for k, v in raeting.PACKET_DEFAULTS.items():# include if not equal to default
|
for k, v in raeting.PACKET_DEFAULTS.items(): # include if not equal to default
|
||||||
if ( (k in raeting.HEAD_FIELDS) and
|
if ((k in raeting.HEAD_FIELDS) and
|
||||||
(k not in raeting.PACKET_FLAGS ) and
|
(k not in raeting.PACKET_FLAGS) and
|
||||||
(data[k] != v)):
|
(data[k] != v)):
|
||||||
kit[k] = data[k]
|
kit[k] = data[k]
|
||||||
|
|
||||||
if self.kind == raeting.headKinds.json:
|
if self.kind == raeting.headKinds.json:
|
||||||
@ -109,6 +111,7 @@ class TxHead(Head):
|
|||||||
values.append(1 if self.packet.data.get(field, 0) else 0)
|
values.append(1 if self.packet.data.get(field, 0) else 0)
|
||||||
return packByte(format='11111111', fields=values)
|
return packByte(format='11111111', fields=values)
|
||||||
|
|
||||||
|
|
||||||
class RxHead(Head):
|
class RxHead(Head):
|
||||||
'''
|
'''
|
||||||
RAET protocl receive packet header class
|
RAET protocl receive packet header class
|
||||||
@ -121,8 +124,8 @@ class RxHead(Head):
|
|||||||
Returns False and updates .packet.error if failure occurs
|
Returns False and updates .packet.error if failure occurs
|
||||||
'''
|
'''
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
data = self.packet.data #for speed
|
data = self.packet.data # for speed
|
||||||
packed = self.packet.packed #for speed
|
packed = self.packet.packed # for speed
|
||||||
if packed.startswith('{"hk":1,') and raeting.JSON_END in packed: # json header
|
if packed.startswith('{"hk":1,') and raeting.JSON_END in packed: # json header
|
||||||
self.kind = raeting.headKinds.json
|
self.kind = raeting.headKinds.json
|
||||||
front, sep, back = packed.partition(raeting.JSON_END)
|
front, sep, back = packed.partition(raeting.JSON_END)
|
||||||
@ -171,6 +174,7 @@ class Neck(Part):
|
|||||||
'''
|
'''
|
||||||
super(Neck, self).__init__(**kwa)
|
super(Neck, self).__init__(**kwa)
|
||||||
|
|
||||||
|
|
||||||
class TxNeck(Neck):
|
class TxNeck(Neck):
|
||||||
'''
|
'''
|
||||||
RAET protocol transmit packet neck class
|
RAET protocol transmit packet neck class
|
||||||
@ -181,7 +185,7 @@ class TxNeck(Neck):
|
|||||||
Composes and returns .packed, which is the packed form of this part
|
Composes and returns .packed, which is the packed form of this part
|
||||||
'''
|
'''
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
self.kind= self.packet.data['nk']
|
self.kind = self.packet.data['nk']
|
||||||
|
|
||||||
if self.kind not in raeting.NECK_KIND_NAMES:
|
if self.kind not in raeting.NECK_KIND_NAMES:
|
||||||
self.kind = raeting.neckKinds.unknown
|
self.kind = raeting.neckKinds.unknown
|
||||||
@ -196,6 +200,7 @@ class TxNeck(Neck):
|
|||||||
|
|
||||||
return self.packed
|
return self.packed
|
||||||
|
|
||||||
|
|
||||||
class RxNeck(Neck):
|
class RxNeck(Neck):
|
||||||
'''
|
'''
|
||||||
RAET protocol receive packet neck class
|
RAET protocol receive packet neck class
|
||||||
@ -235,6 +240,7 @@ class Body(Part):
|
|||||||
super(Body, self).__init__(**kwa)
|
super(Body, self).__init__(**kwa)
|
||||||
self.data = data or odict()
|
self.data = data or odict()
|
||||||
|
|
||||||
|
|
||||||
class TxBody(Body):
|
class TxBody(Body):
|
||||||
'''
|
'''
|
||||||
RAET protocol tx packet body class
|
RAET protocol tx packet body class
|
||||||
@ -244,11 +250,12 @@ class TxBody(Body):
|
|||||||
Composes and returns .packed, which is the packed form of this part
|
Composes and returns .packed, which is the packed form of this part
|
||||||
'''
|
'''
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
self.kind= self.packet.data['bk']
|
self.kind = self.packet.data['bk']
|
||||||
if self.kind == raeting.bodyKinds.json:
|
if self.kind == raeting.bodyKinds.json:
|
||||||
self.packed = json.dumps(self.data, separators=(',', ':'))
|
self.packed = json.dumps(self.data, separators=(',', ':'))
|
||||||
return self.packed
|
return self.packed
|
||||||
|
|
||||||
|
|
||||||
class RxBody(Body):
|
class RxBody(Body):
|
||||||
'''
|
'''
|
||||||
RAET protocol rx packet body class
|
RAET protocol rx packet body class
|
||||||
@ -291,6 +298,7 @@ class Tail(Part):
|
|||||||
''' Setup Tail instal'''
|
''' Setup Tail instal'''
|
||||||
super(Tail, self).__init__(**kwa)
|
super(Tail, self).__init__(**kwa)
|
||||||
|
|
||||||
|
|
||||||
class TxTail(Tail):
|
class TxTail(Tail):
|
||||||
'''
|
'''
|
||||||
RAET protocol tx packet tail class
|
RAET protocol tx packet tail class
|
||||||
@ -301,7 +309,7 @@ class TxTail(Tail):
|
|||||||
Composes and returns .packed, which is the packed form of this part
|
Composes and returns .packed, which is the packed form of this part
|
||||||
'''
|
'''
|
||||||
self.packed = ''
|
self.packed = ''
|
||||||
self.kind= self.packet.data['tk']
|
self.kind = self.packet.data['tk']
|
||||||
|
|
||||||
if self.kind == raeting.tailKinds.nacl:
|
if self.kind == raeting.tailKinds.nacl:
|
||||||
self.packed = "".rjust(raeting.tailSizes.nacl, '\x00')
|
self.packed = "".rjust(raeting.tailSizes.nacl, '\x00')
|
||||||
@ -310,6 +318,7 @@ class TxTail(Tail):
|
|||||||
pass
|
pass
|
||||||
return self.packed
|
return self.packed
|
||||||
|
|
||||||
|
|
||||||
class RxTail(Tail):
|
class RxTail(Tail):
|
||||||
'''
|
'''
|
||||||
RAET protocol rx packet tail class
|
RAET protocol rx packet tail class
|
||||||
@ -345,7 +354,7 @@ class Packet(object):
|
|||||||
def __init__(self, kind=None):
|
def __init__(self, kind=None):
|
||||||
''' Setup Packet instance. Meta data for a packet. '''
|
''' Setup Packet instance. Meta data for a packet. '''
|
||||||
self.kind = kind or raeting.PACKET_DEFAULTS['pk']
|
self.kind = kind or raeting.PACKET_DEFAULTS['pk']
|
||||||
self.packed = '' #packed string
|
self.packed = '' # packed string
|
||||||
self.error = ''
|
self.error = ''
|
||||||
self.data = odict(raeting.PACKET_DEFAULTS)
|
self.data = odict(raeting.PACKET_DEFAULTS)
|
||||||
|
|
||||||
@ -363,7 +372,8 @@ class Packet(object):
|
|||||||
self.data = odict(raeting.PACKET_DEFAULTS)
|
self.data = odict(raeting.PACKET_DEFAULTS)
|
||||||
if data:
|
if data:
|
||||||
self.data.update(data)
|
self.data.update(data)
|
||||||
return self # so can method chain
|
return self # so can method chain
|
||||||
|
|
||||||
|
|
||||||
class TxPacket(Packet):
|
class TxPacket(Packet):
|
||||||
'''
|
'''
|
||||||
@ -468,7 +478,7 @@ class RxPacket(Packet):
|
|||||||
if not self.unpack():
|
if not self.unpack():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.data['vn'] not in raeting.VERSIONS.values():
|
if self.data['vn'] not in raeting.VERSIONS.values():
|
||||||
self.error = ("Received incompatible version '{0}'"
|
self.error = ("Received incompatible version '{0}'"
|
||||||
"version '{1}'".format(self.data['vn']))
|
"version '{1}'".format(self.data['vn']))
|
||||||
return False
|
return False
|
||||||
@ -506,4 +516,4 @@ class RxPacket(Packet):
|
|||||||
'''
|
'''
|
||||||
Uses tail to validate body such as decrypt
|
Uses tail to validate body such as decrypt
|
||||||
'''
|
'''
|
||||||
return True
|
return True
|
||||||
|
@ -137,7 +137,7 @@ TailKind = namedtuple('TailKind', TAIL_KINDS.keys())
|
|||||||
tailKinds = TailKind(**TAIL_KINDS)
|
tailKinds = TailKind(**TAIL_KINDS)
|
||||||
|
|
||||||
# bytes
|
# bytes
|
||||||
TAIL_SIZES = odict([('nada', 0), ('nacl', 8), ('crc16', 2), ('crc64', 8),
|
TAIL_SIZES = odict([('nada', 0), ('nacl', 8), ('crc16', 2), ('crc64', 8),
|
||||||
('unknown', 0)])
|
('unknown', 0)])
|
||||||
TailSize = namedtuple('TailSize', TAIL_SIZES.keys())
|
TailSize = namedtuple('TailSize', TAIL_SIZES.keys())
|
||||||
tailSizes = TailSize(**TAIL_SIZES)
|
tailSizes = TailSize(**TAIL_SIZES)
|
||||||
@ -189,14 +189,14 @@ PACKET_DEFAULTS = odict([
|
|||||||
('fg', '00'),
|
('fg', '00'),
|
||||||
])
|
])
|
||||||
|
|
||||||
PACKET_FIELDS = [ 'sh', 'sp', 'dh', 'dp',
|
PACKET_FIELDS = ['sh', 'sp', 'dh', 'dp',
|
||||||
'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||||
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
||||||
|
|
||||||
HEAD_FIELDS = [ 'hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
HEAD_FIELDS = ['hk', 'hl', 'vn', 'sd', 'dd', 'cf', 'bf', 'si', 'ti', 'sk', 'pk',
|
||||||
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
'sf', 'oi', 'dt', 'sn', 'sc', 'pf', 'af',
|
||||||
'nk', 'nl', 'bk', 'bl','tk', 'tl', 'fg']
|
'nk', 'nl', 'bk', 'bl', 'tk', 'tl', 'fg']
|
||||||
|
|
||||||
PACKET_FLAGS = ['af', 'pf', 'sf', 'bf', 'cf']
|
PACKET_FLAGS = ['af', 'pf', 'sf', 'bf', 'cf']
|
||||||
PACKET_FLAG_FIELDS = ['', '', 'af', 'pf', '', 'sf', 'bf', 'cf']
|
PACKET_FLAG_FIELDS = ['', '', 'af', 'pf', '', 'sf', 'bf', 'cf']
|
||||||
@ -209,12 +209,12 @@ class RaetError(Exception):
|
|||||||
msg = "Invalid device id '{0}'".format(did)
|
msg = "Invalid device id '{0}'".format(did)
|
||||||
raise raeting.RaetError(msg)
|
raise raeting.RaetError(msg)
|
||||||
"""
|
"""
|
||||||
def __init__(self, message = None):
|
def __init__(self, message=None):
|
||||||
self.message = message #description of error
|
self.message = message # description of error
|
||||||
self.args = (message)
|
super(RaetError, self).__init__(message)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return ("{0}: {1}.\n".format(self.__class__.__name__, self.message))
|
return "{0}: {1}.\n".format(self.__class__.__name__, self.message)
|
||||||
|
|
||||||
|
|
||||||
def defaultData(data=None):
|
def defaultData(data=None):
|
||||||
|
@ -28,11 +28,11 @@ class Stack(object):
|
|||||||
'''
|
'''
|
||||||
RAET protocol stack object
|
RAET protocol stack object
|
||||||
'''
|
'''
|
||||||
def __init__( self,
|
def __init__(self,
|
||||||
version=raeting.VERSION,
|
version=raeting.VERSION,
|
||||||
device=None,
|
device=None,
|
||||||
did=None,
|
did=None,
|
||||||
ha=("", raeting.RAET_PORT)):
|
ha=("", raeting.RAET_PORT)):
|
||||||
'''
|
'''
|
||||||
Setup Stack instance
|
Setup Stack instance
|
||||||
'''
|
'''
|
||||||
@ -41,12 +41,11 @@ class Stack(object):
|
|||||||
# local device for this stack
|
# local device for this stack
|
||||||
self.device = device or LocalDevice(stack=self, did=did, ha=ha)
|
self.device = device or LocalDevice(stack=self, did=did, ha=ha)
|
||||||
self.transactions = odict() #transactions
|
self.transactions = odict() #transactions
|
||||||
|
|
||||||
self.rxdsUdp = deque()
|
self.rxdsUdp = deque()
|
||||||
self.txdsUdp = deque()
|
self.txdsUdp = deque()
|
||||||
self.serverUdp = aiding.SocketUdpNb(ha=self.device.ha)
|
self.serverUdp = aiding.SocketUdpNb(ha=self.device.ha)
|
||||||
self.serverUdp.reopen() # open socket
|
self.serverUdp.reopen() # open socket
|
||||||
self.device.ha = self.serverUdp.ha # update device host address after open
|
self.device.ha = self.serverUdp.ha # update device host address after open
|
||||||
|
|
||||||
def addRemoteDevice(self, device, did=None):
|
def addRemoteDevice(self, device, did=None):
|
||||||
'''
|
'''
|
||||||
@ -117,14 +116,13 @@ class Stack(object):
|
|||||||
|
|
||||||
sh, sp = ra
|
sh, sp = ra
|
||||||
dh, dp = da
|
dh, dp = da
|
||||||
packet.data.update(sh=sh, sp=sp, dh=dh, dp=dp )
|
packet.data.update(sh=sh, sp=sp, dh=dh, dp=dp)
|
||||||
|
|
||||||
if not packet.parseBack():
|
if not packet.parseBack():
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return packet
|
return packet
|
||||||
|
|
||||||
|
|
||||||
def txUdp(self, packed, ddid):
|
def txUdp(self, packed, ddid):
|
||||||
'''
|
'''
|
||||||
Queue duple of (packed, da) on stack transmit queue
|
Queue duple of (packed, da) on stack transmit queue
|
||||||
@ -143,12 +141,12 @@ class Device(object):
|
|||||||
'''
|
'''
|
||||||
Did = 2 # class attribute
|
Did = 2 # class attribute
|
||||||
|
|
||||||
def __init__( self, stack=None, did=None, sid=0, tid=0,
|
def __init__(self, stack=None, did=None, sid=0, tid=0,
|
||||||
host="", port=raeting.RAET_PORT, ha=None, ):
|
host="", port=raeting.RAET_PORT, ha=None, ):
|
||||||
'''
|
'''
|
||||||
Setup Device instance
|
Setup Device instance
|
||||||
'''
|
'''
|
||||||
self.stack = stack # Stack object that manages this device
|
self.stack = stack # Stack object that manages this device
|
||||||
if did is None:
|
if did is None:
|
||||||
if self.stack:
|
if self.stack:
|
||||||
while Device.Did in self.stack.devices:
|
while Device.Did in self.stack.devices:
|
||||||
@ -164,7 +162,7 @@ class Device(object):
|
|||||||
self.sid = sid # current session ID
|
self.sid = sid # current session ID
|
||||||
self.tid = tid # current transaction ID
|
self.tid = tid # current transaction ID
|
||||||
|
|
||||||
if ha: #takes precendence
|
if ha: # takes precendence
|
||||||
host, port = ha
|
host, port = ha
|
||||||
self.host = socket.gethostbyname(host)
|
self.host = socket.gethostbyname(host)
|
||||||
self.port = port
|
self.port = port
|
||||||
@ -185,8 +183,8 @@ class Device(object):
|
|||||||
Generates next session id number.
|
Generates next session id number.
|
||||||
'''
|
'''
|
||||||
self.sid += 1
|
self.sid += 1
|
||||||
if (self.sid > 0xffffffffL):
|
if self.sid > 0xffffffffL:
|
||||||
self.sid = 1 # rollover to 1
|
self.sid = 1 # rollover to 1
|
||||||
return self.sid
|
return self.sid
|
||||||
|
|
||||||
def nextTid(self):
|
def nextTid(self):
|
||||||
@ -194,10 +192,11 @@ class Device(object):
|
|||||||
Generates next session id number.
|
Generates next session id number.
|
||||||
'''
|
'''
|
||||||
self.tid += 1
|
self.tid += 1
|
||||||
if (self.tid > 0xffffffffL):
|
if self.tid > 0xffffffffL:
|
||||||
self.tid = 1 # rollover to 1
|
self.tid = 1 # rollover to 1
|
||||||
return self.tid
|
return self.tid
|
||||||
|
|
||||||
|
|
||||||
class LocalDevice(Device):
|
class LocalDevice(Device):
|
||||||
'''
|
'''
|
||||||
RAET protocol endpoint local device object
|
RAET protocol endpoint local device object
|
||||||
@ -214,6 +213,7 @@ class LocalDevice(Device):
|
|||||||
self.signer = nacling.Signer(signkey)
|
self.signer = nacling.Signer(signkey)
|
||||||
self.priver = nacling.Privateer(prikey) # Long term key
|
self.priver = nacling.Privateer(prikey) # Long term key
|
||||||
|
|
||||||
|
|
||||||
class RemoteDevice(Device):
|
class RemoteDevice(Device):
|
||||||
'''
|
'''
|
||||||
RAET protocol endpoint remote device object
|
RAET protocol endpoint remote device object
|
||||||
@ -234,6 +234,7 @@ class RemoteDevice(Device):
|
|||||||
self.publee = nacling.Publican() # short term key
|
self.publee = nacling.Publican() # short term key
|
||||||
self.privee = nacling.Privateer() # short term key
|
self.privee = nacling.Privateer() # short term key
|
||||||
|
|
||||||
|
|
||||||
class Transaction(object):
|
class Transaction(object):
|
||||||
'''
|
'''
|
||||||
RAET protocol transaction class
|
RAET protocol transaction class
|
||||||
@ -248,7 +249,7 @@ class Transaction(object):
|
|||||||
self.kind = kind or raeting.PACKET_DEFAULTS['sk']
|
self.kind = kind or raeting.PACKET_DEFAULTS['sk']
|
||||||
|
|
||||||
# local device is the .stack.device
|
# local device is the .stack.device
|
||||||
self.rdid = rdid # remote device did
|
self.rdid = rdid # remote device did
|
||||||
|
|
||||||
self.crdr = crdr
|
self.crdr = crdr
|
||||||
self.bcst = bcst
|
self.bcst = bcst
|
||||||
@ -258,8 +259,8 @@ class Transaction(object):
|
|||||||
|
|
||||||
self.rxData = rxData or odict()
|
self.rxData = rxData or odict()
|
||||||
self.txData = txData or odict()
|
self.txData = txData or odict()
|
||||||
self.rxPacket = None # last rx packet
|
self.rxPacket = None # last rx packet
|
||||||
self.txPacket = None # last tx packet
|
self.txPacket = None # last tx packet
|
||||||
|
|
||||||
def transmit(self, packet):
|
def transmit(self, packet):
|
||||||
'''
|
'''
|
||||||
@ -276,13 +277,14 @@ class Initiator(Transaction):
|
|||||||
'''
|
'''
|
||||||
Setup Transaction instance
|
Setup Transaction instance
|
||||||
'''
|
'''
|
||||||
crdr = False # force crdr to False
|
crdr = False # force crdr to False
|
||||||
super(Initiator, self).__init__(crdr=crdr, **kwa)
|
super(Initiator, self).__init__(crdr=crdr, **kwa)
|
||||||
if self.sid is None: # use current session id of local device
|
if self.sid is None: # use current session id of local device
|
||||||
self.sid = self.stack.device.sid
|
self.sid = self.stack.device.sid
|
||||||
if self.tid is None: # use next tid
|
if self.tid is None: # use next tid
|
||||||
self.tid = self.stack.device.nextTid()
|
self.tid = self.stack.device.nextTid()
|
||||||
|
|
||||||
|
|
||||||
class Corresponder(Transaction):
|
class Corresponder(Transaction):
|
||||||
'''
|
'''
|
||||||
RAET protocol corresponder transaction class
|
RAET protocol corresponder transaction class
|
||||||
@ -291,9 +293,10 @@ class Corresponder(Transaction):
|
|||||||
'''
|
'''
|
||||||
Setup Transaction instance
|
Setup Transaction instance
|
||||||
'''
|
'''
|
||||||
crdr = True # force crdr to True
|
crdr = True # force crdr to True
|
||||||
super(Corresponder, self).__init__(crdr=crdr, **kwa)
|
super(Corresponder, self).__init__(crdr=crdr, **kwa)
|
||||||
|
|
||||||
|
|
||||||
class Joiner(Initiator):
|
class Joiner(Initiator):
|
||||||
'''
|
'''
|
||||||
RAET protocol Joiner transaction class Dual of Acceptor
|
RAET protocol Joiner transaction class Dual of Acceptor
|
||||||
|
@ -2,4 +2,4 @@
|
|||||||
'''
|
'''
|
||||||
package
|
package
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
@ -7,14 +7,14 @@ Tests to try out packeting. Potentially ephemeral
|
|||||||
from salt.transport.road.raet import packeting
|
from salt.transport.road.raet import packeting
|
||||||
from ioflo.base.odicting import odict
|
from ioflo.base.odicting import odict
|
||||||
|
|
||||||
|
|
||||||
def test():
|
def test():
|
||||||
data = odict(hk=1, bk=1, bf=1, cf=1)
|
data = odict(hk=1, bk=1, bf=1, cf=1)
|
||||||
body=odict(msg='Hello Raet World', extra='what is this')
|
body = odict(msg='Hello Raet World', extra='what is this')
|
||||||
packet1 = packeting.Packet(data=data, body=body)
|
packet1 = packeting.Packet(data=data, body=body)
|
||||||
print packet1.body.data
|
print packet1.body.data
|
||||||
print packet1.pack()
|
print packet1.pack()
|
||||||
|
|
||||||
|
|
||||||
packet2 = packeting.Packet()
|
packet2 = packeting.Packet()
|
||||||
packet2.parse(packet1.packed)
|
packet2.parse(packet1.packed)
|
||||||
print packet2.body.data
|
print packet2.body.data
|
||||||
@ -27,6 +27,5 @@ def test():
|
|||||||
print packet1.pack()
|
print packet1.pack()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test()
|
test()
|
||||||
|
@ -14,20 +14,17 @@ def test():
|
|||||||
privateer = nacling.Privateer()
|
privateer = nacling.Privateer()
|
||||||
masterPriKeyHex = privateer.keyhex
|
masterPriKeyHex = privateer.keyhex
|
||||||
|
|
||||||
|
|
||||||
signer = nacling.Signer()
|
signer = nacling.Signer()
|
||||||
minionSignKeyHex = signer.keyhex
|
minionSignKeyHex = signer.keyhex
|
||||||
privateer = nacling.Privateer()
|
privateer = nacling.Privateer()
|
||||||
masterPriKeyHex = privateer.keyhex
|
masterPriKeyHex = privateer.keyhex
|
||||||
|
|
||||||
|
|
||||||
# initially
|
# initially
|
||||||
# master on port 7530 with did of 1
|
# master on port 7530 with did of 1
|
||||||
# minion on port 7531 with did of 0
|
# minion on port 7531 with did of 0
|
||||||
# eventually
|
# eventually
|
||||||
# minion did of 2
|
# minion did of 2
|
||||||
|
|
||||||
|
|
||||||
#master stack
|
#master stack
|
||||||
device = stacking.LocalDevice( did=1,
|
device = stacking.LocalDevice( did=1,
|
||||||
signkey=masterSignKeyHex,
|
signkey=masterSignKeyHex,
|
||||||
@ -48,6 +45,7 @@ def test():
|
|||||||
data = odict(hk=1, bk=1)
|
data = odict(hk=1, bk=1)
|
||||||
joiner = stacking.Joiner(stack=stack2, sid=0, txData=data)
|
joiner = stacking.Joiner(stack=stack2, sid=0, txData=data)
|
||||||
joiner.join()
|
joiner.join()
|
||||||
|
|
||||||
stack2.serviceUdp()
|
stack2.serviceUdp()
|
||||||
stack1.serviceUdp()
|
stack1.serviceUdp()
|
||||||
|
|
||||||
@ -89,6 +87,5 @@ def test():
|
|||||||
joiner.pend(packet.data)
|
joiner.pend(packet.data)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test()
|
test()
|
||||||
|
@ -11,8 +11,8 @@ from salt.transport import table
|
|||||||
|
|
||||||
def test_table():
|
def test_table():
|
||||||
|
|
||||||
bob_pub = table.Public()
|
bob_pub = table.Public()
|
||||||
print json.dumps(bob_pub.keydata, indent=2)
|
print json.dumps(bob_pub.keydata, indent=2)
|
||||||
|
|
||||||
#print bob_pub.backend
|
#print bob_pub.backend
|
||||||
#print bob_pub.sec_backend
|
#print bob_pub.sec_backend
|
||||||
@ -30,8 +30,7 @@ def test_table():
|
|||||||
|
|
||||||
signature = bob_pub.signature("What have we here.")
|
signature = bob_pub.signature("What have we here.")
|
||||||
print signature
|
print signature
|
||||||
print signature == signed.signature
|
print signature == signed.signature
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -710,15 +710,21 @@ def path_join(*parts):
|
|||||||
))
|
))
|
||||||
|
|
||||||
|
|
||||||
def pem_finger(path, sum_type='md5'):
|
def pem_finger(path=None, key=None, sum_type='md5'):
|
||||||
'''
|
'''
|
||||||
Pass in the location of a pem file and the type of cryptographic hash to
|
Pass in either a raw pem string, or the path on disk to the location of a
|
||||||
use. The default is md5.
|
pem file, and the type of cryptographic hash to use. The default is md5.
|
||||||
|
The fingerprint of the pem will be returned.
|
||||||
|
|
||||||
|
If neither a key nor a path are passed in, a blank string will be returned.
|
||||||
'''
|
'''
|
||||||
if not os.path.isfile(path):
|
if not key:
|
||||||
return ''
|
if not os.path.isfile(path):
|
||||||
with fopen(path, 'rb') as fp_:
|
return ''
|
||||||
key = ''.join(fp_.readlines()[1:-1])
|
|
||||||
|
with fopen(path, 'rb') as fp_:
|
||||||
|
key = ''.join(fp_.readlines()[1:-1])
|
||||||
|
|
||||||
pre = getattr(hashlib, sum_type)(key).hexdigest()
|
pre = getattr(hashlib, sum_type)(key).hexdigest()
|
||||||
finger = ''
|
finger = ''
|
||||||
for ind in range(len(pre)):
|
for ind in range(len(pre)):
|
||||||
|
@ -15,6 +15,7 @@ import subprocess
|
|||||||
import multiprocessing
|
import multiprocessing
|
||||||
import logging
|
import logging
|
||||||
import pipes
|
import pipes
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
# Let's import pwd and catch the ImportError. We'll raise it if this is not
|
# Let's import pwd and catch the ImportError. We'll raise it if this is not
|
||||||
@ -1484,7 +1485,135 @@ def list_nodes_select(nodes, selection, call=None):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def salt_cloud_force_ascii(exc):
|
def init_cachedir(base=None):
|
||||||
|
'''
|
||||||
|
Initialize the cachedir needed for Salt Cloud to keep track of minions
|
||||||
|
'''
|
||||||
|
if base is None:
|
||||||
|
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||||
|
needed_dirs = (base,
|
||||||
|
os.path.join(base, 'requested'),
|
||||||
|
os.path.join(base, 'active'))
|
||||||
|
for dir_ in needed_dirs:
|
||||||
|
if not os.path.exists(dir_):
|
||||||
|
os.makedirs(dir_)
|
||||||
|
os.chmod(base, 0755)
|
||||||
|
|
||||||
|
|
||||||
|
def request_minion_cachedir(
|
||||||
|
minion_id,
|
||||||
|
fingerprint='',
|
||||||
|
pubkey=None,
|
||||||
|
provider=None,
|
||||||
|
base=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Creates an entry in the requested/ cachedir. This means that Salt Cloud has
|
||||||
|
made a request to a cloud provider to create an instance, but it has not
|
||||||
|
yet verified that the instance properly exists.
|
||||||
|
|
||||||
|
If the fingerprint is unknown, a raw pubkey can be passed in, and a
|
||||||
|
fingerprint will be calculated. If both are empty, then the fingerprint
|
||||||
|
will be set to None.
|
||||||
|
'''
|
||||||
|
if base is None:
|
||||||
|
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||||
|
|
||||||
|
if not fingerprint:
|
||||||
|
if pubkey is not None:
|
||||||
|
fingerprint = salt.utils.pem_finger(key=pubkey)
|
||||||
|
|
||||||
|
init_cachedir(base)
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'minion_id': minion_id,
|
||||||
|
'fingerprint': fingerprint,
|
||||||
|
'provider': provider,
|
||||||
|
}
|
||||||
|
|
||||||
|
fname = '{0}.json'.format(minion_id)
|
||||||
|
path = os.path.join(base, 'requested', fname)
|
||||||
|
with salt.utils.fopen(path, 'w') as fh_:
|
||||||
|
json.dump(data, fh_)
|
||||||
|
|
||||||
|
|
||||||
|
def change_minion_cachedir(
|
||||||
|
minion_id,
|
||||||
|
cachedir,
|
||||||
|
data=None,
|
||||||
|
base=None,
|
||||||
|
):
|
||||||
|
'''
|
||||||
|
Changes the info inside a minion's cachedir entry. The type of cachedir
|
||||||
|
must be specified (i.e., 'requested' or 'active'). A dict is also passed in
|
||||||
|
which contains the data to be changed.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
change_minion_cachedir(
|
||||||
|
'myminion',
|
||||||
|
'requested',
|
||||||
|
{'fingerprint': '26:5c:8c:de:be:fe:89:c0:02:ed:27:65:0e:bb:be:60'},
|
||||||
|
)
|
||||||
|
'''
|
||||||
|
if not isinstance(data, dict):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if base is None:
|
||||||
|
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||||
|
|
||||||
|
fname = '{0}.json'.format(minion_id)
|
||||||
|
path = os.path.join(base, cachedir, fname)
|
||||||
|
|
||||||
|
with salt.utils.fopen(path, 'r') as fh_:
|
||||||
|
cache_data = json.load(fh_)
|
||||||
|
|
||||||
|
cache_data.update(data)
|
||||||
|
|
||||||
|
with salt.utils.fopen(path, 'w') as fh_:
|
||||||
|
json.dump(cache_data, fh_)
|
||||||
|
|
||||||
|
|
||||||
|
def activate_minion_cachedir(minion_id, base=None):
|
||||||
|
'''
|
||||||
|
Moves a minion from the requested/ cachedir into the active/ cachedir. This
|
||||||
|
means that Salt Cloud has verified that a requested instance properly
|
||||||
|
exists, and should be expected to exist from here on out.
|
||||||
|
'''
|
||||||
|
if base is None:
|
||||||
|
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||||
|
|
||||||
|
fname = '{0}.json'.format(minion_id)
|
||||||
|
src = os.path.join(base, 'requested', fname)
|
||||||
|
dst = os.path.join(base, 'active')
|
||||||
|
shutil.move(src, dst)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_minion_cachedir(minion_id, base=None):
|
||||||
|
'''
|
||||||
|
Deletes a minion's entry from the cloud cachedir. It will search through
|
||||||
|
all cachedirs to find the minion's cache file.
|
||||||
|
'''
|
||||||
|
if base is None:
|
||||||
|
base = os.path.join(syspaths.CACHE_DIR, 'cloud')
|
||||||
|
|
||||||
|
fname = '{0}.json'.format(minion_id)
|
||||||
|
for cachedir in ('requested', 'active'):
|
||||||
|
path = os.path.join(base, cachedir, fname)
|
||||||
|
if os.path.exists(path):
|
||||||
|
os.remove(path)
|
||||||
|
|
||||||
|
|
||||||
|
def _salt_cloud_force_ascii(exc):
|
||||||
|
'''
|
||||||
|
Helper method to try its best to convert any Unicode text into ASCII
|
||||||
|
without stack tracing since salt internally does not handle Unicode strings
|
||||||
|
|
||||||
|
This method is not supposed to be used directly. Once
|
||||||
|
`py:module: salt.utils.cloud` is imported this method register's with
|
||||||
|
python's codecs module for proper automatic conversion in case of encoding
|
||||||
|
errors.
|
||||||
|
'''
|
||||||
if not isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
|
if not isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
|
||||||
raise TypeError('Can\'t handle {0}'.format(exc))
|
raise TypeError('Can\'t handle {0}'.format(exc))
|
||||||
|
|
||||||
@ -1499,4 +1628,4 @@ def salt_cloud_force_ascii(exc):
|
|||||||
# There's nothing else we can do, raise the exception
|
# There's nothing else we can do, raise the exception
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
codecs.register_error('salt-cloud-force-ascii', salt_cloud_force_ascii)
|
codecs.register_error('salt-cloud-force-ascii', _salt_cloud_force_ascii)
|
||||||
|
@ -171,7 +171,7 @@ def _interfaces_ip(out):
|
|||||||
for line in group.splitlines():
|
for line in group.splitlines():
|
||||||
if not ' ' in line:
|
if not ' ' in line:
|
||||||
continue
|
continue
|
||||||
match = re.match(r'^\d*:\s+([\w.]+)(?:@)?([\w.]+)?:\s+<(.+)>', line)
|
match = re.match(r'^\d*:\s+([\w.-]+)(?:@)?([\w.-]+)?:\s+<(.+)>', line)
|
||||||
if match:
|
if match:
|
||||||
iface, parent, attrs = match.groups()
|
iface, parent, attrs = match.groups()
|
||||||
if 'UP' in attrs.split(','):
|
if 'UP' in attrs.split(','):
|
||||||
|
@ -258,7 +258,9 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None):
|
|||||||
unicode_context[key] = unicode(value, 'utf-8')
|
unicode_context[key] = unicode(value, 'utf-8')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
output = jinja_env.from_string(tmplstr).render(**unicode_context)
|
template = jinja_env.from_string(tmplstr)
|
||||||
|
template.globals.update(unicode_context)
|
||||||
|
output = template.render(**unicode_context)
|
||||||
except jinja2.exceptions.TemplateSyntaxError as exc:
|
except jinja2.exceptions.TemplateSyntaxError as exc:
|
||||||
trace = traceback.extract_tb(sys.exc_info()[2])
|
trace = traceback.extract_tb(sys.exc_info()[2])
|
||||||
line, out = _get_jinja_error(trace, context=unicode_context)
|
line, out = _get_jinja_error(trace, context=unicode_context)
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
# Import Salt Testing libs
|
# Import Salt Testing libs
|
||||||
from salttesting import skipIf
|
from salttesting import skipIf
|
||||||
from salttesting.helpers import ensure_in_syspath
|
from salttesting.helpers import ensure_in_syspath
|
||||||
from salttesting.mock import patch, call, NO_MOCK, NO_MOCK_REASON
|
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||||
|
|
||||||
ensure_in_syspath('../')
|
ensure_in_syspath('../')
|
||||||
|
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
|
|
||||||
# Import Salt Testing libs
|
# Import Salt Testing libs
|
||||||
from salttesting import skipIf
|
from salttesting import skipIf
|
||||||
from salttesting.helpers import (ensure_in_syspath, destructiveTest)
|
from salttesting.helpers import ensure_in_syspath
|
||||||
from salttesting.mock import patch, call, NO_MOCK, NO_MOCK_REASON
|
from salttesting.mock import patch, NO_MOCK, NO_MOCK_REASON
|
||||||
ensure_in_syspath('../')
|
ensure_in_syspath('../')
|
||||||
|
|
||||||
# Import salt libs
|
# Import salt libs
|
||||||
|
@ -19,7 +19,6 @@ except ImportError:
|
|||||||
# Import salt libs
|
# Import salt libs
|
||||||
import integration
|
import integration
|
||||||
import salt.utils
|
import salt.utils
|
||||||
from salt.exceptions import CommandExecutionError
|
|
||||||
|
|
||||||
SUBSALT_DIR = os.path.join(integration.TMP, 'subsalt')
|
SUBSALT_DIR = os.path.join(integration.TMP, 'subsalt')
|
||||||
AUTHORIZED_KEYS = os.path.join(SUBSALT_DIR, 'authorized_keys')
|
AUTHORIZED_KEYS = os.path.join(SUBSALT_DIR, 'authorized_keys')
|
||||||
|
47
tests/unit/modules/blockdev_test.py
Normal file
47
tests/unit/modules/blockdev_test.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Import Salt Testing Libs
|
||||||
|
from salttesting.unit import skipIf, TestCase
|
||||||
|
from salttesting.helpers import ensure_in_syspath
|
||||||
|
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
|
||||||
|
ensure_in_syspath('../../')
|
||||||
|
|
||||||
|
# Import Salt Libs
|
||||||
|
import salt.modules.blockdev as blockdev
|
||||||
|
|
||||||
|
blockdev.__salt__ = {
|
||||||
|
'cmd.has_exec': MagicMock(return_value=True),
|
||||||
|
'config.option': MagicMock(return_value=None)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||||
|
class TestBlockdevModule(TestCase):
|
||||||
|
def test_dump(self):
|
||||||
|
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||||
|
with patch.dict(blockdev.__salt__, {'cmd.run_all': mock}):
|
||||||
|
blockdev.dump('/dev/sda')
|
||||||
|
mock.assert_called_once_with(
|
||||||
|
'blockdev --getro --getsz --getss --getpbsz --getiomin '
|
||||||
|
'--getioopt --getalignoff --getmaxsect --getsize '
|
||||||
|
'--getsize64 --getra --getfra /dev/sda'
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_wipe(self):
|
||||||
|
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||||
|
with patch.dict(blockdev.__salt__, {'cmd.run_all': mock}):
|
||||||
|
blockdev.wipe('/dev/sda')
|
||||||
|
mock.assert_called_once_with(
|
||||||
|
'wipefs /dev/sda'
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_tune(self):
|
||||||
|
mock = MagicMock(return_value='712971264\n512\n512\n512\n0\n0\n88\n712971264\n365041287168\n512\n512')
|
||||||
|
with patch.dict(blockdev.__salt__, {'cmd.run': mock}):
|
||||||
|
mock_dump = MagicMock(return_value={'retcode': 0, 'stdout': ''})
|
||||||
|
with patch('salt.modules.blockdev.dump', mock_dump):
|
||||||
|
kwargs = {'read-ahead': 512, 'filesystem-read-ahead': 512}
|
||||||
|
blockdev.tune('/dev/sda', **kwargs)
|
||||||
|
mock.assert_called_once_with(
|
||||||
|
'blockdev --setra 512 --setfra 512 /dev/sda'
|
||||||
|
)
|
@ -1,15 +1,20 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
'''
|
'''
|
||||||
:codauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
|
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# Import Salt Testing Libs
|
# Import Salt Testing Libs
|
||||||
from salttesting import TestCase, skipIf
|
from salttesting import TestCase, skipIf
|
||||||
from salttesting.mock import MagicMock, patch, call
|
from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON
|
||||||
|
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
|
||||||
|
ensure_in_syspath('../../')
|
||||||
|
|
||||||
|
# Import salt libs
|
||||||
from salt.modules import dig
|
from salt.modules import dig
|
||||||
|
|
||||||
@skipIf(not dig.__virtual__(), 'Dig must be installed')
|
|
||||||
|
@skipIf(NO_MOCK, NO_MOCK_REASON)
|
||||||
|
@requires_salt_modules('dig')
|
||||||
class DigTestCase(TestCase):
|
class DigTestCase(TestCase):
|
||||||
|
|
||||||
def test_check_ip(self):
|
def test_check_ip(self):
|
||||||
@ -18,13 +23,92 @@ class DigTestCase(TestCase):
|
|||||||
def test_check_ip_ipv6(self):
|
def test_check_ip_ipv6(self):
|
||||||
self.assertTrue(dig.check_ip('1111:2222:3333:4444:5555:6666:7777:8888'), msg='Not a valid ip address')
|
self.assertTrue(dig.check_ip('1111:2222:3333:4444:5555:6666:7777:8888'), msg='Not a valid ip address')
|
||||||
|
|
||||||
|
@skipIf(True, 'Waiting for 2014.1 release')
|
||||||
|
def test_check_ip_ipv6_valid(self):
|
||||||
|
self.assertTrue(dig.check_ip('2607:fa18:0:3::4'))
|
||||||
|
|
||||||
def test_check_ip_neg(self):
|
def test_check_ip_neg(self):
|
||||||
self.assertFalse(dig.check_ip('-127.0.0.1'), msg="Did not detect negative value as invalid")
|
self.assertFalse(dig.check_ip('-127.0.0.1'), msg="Did not detect negative value as invalid")
|
||||||
|
|
||||||
def test_check_ip_empty(self):
|
def test_check_ip_empty(self):
|
||||||
self.assertFalse(dig.check_ip(''), msg="Did not detect empty value as invalid")
|
self.assertFalse(dig.check_ip(''), msg="Did not detect empty value as invalid")
|
||||||
|
|
||||||
def test_A(self):
|
def test_a(self):
|
||||||
dig.__salt__ = {}
|
dig.__salt__ = {}
|
||||||
with patch.dict(dig.__salt__, {'cmd.run_all': MagicMock(return_value={'pid': 3656, 'retcode': 0, 'stderr': '', 'stdout': '74.125.193.104\n74.125.193.105\n74.125.193.99\n74.125.193.106\n74.125.193.103\n74.125.193.147'})}):
|
dig_mock = MagicMock(return_value={
|
||||||
self.assertEqual(dig.A('www.google.com'), ['74.125.193.104', '74.125.193.105', '74.125.193.99', '74.125.193.106', '74.125.193.103', '74.125.193.147'])
|
'pid': 3656, 'retcode': 0, 'stderr': '', 'stdout': '74.125.193.104\n'
|
||||||
|
'74.125.193.105\n'
|
||||||
|
'74.125.193.99\n'
|
||||||
|
'74.125.193.106\n'
|
||||||
|
'74.125.193.103\n'
|
||||||
|
'74.125.193.147'
|
||||||
|
})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.A('www.google.com'), ['74.125.193.104',
|
||||||
|
'74.125.193.105',
|
||||||
|
'74.125.193.99',
|
||||||
|
'74.125.193.106',
|
||||||
|
'74.125.193.103',
|
||||||
|
'74.125.193.147'])
|
||||||
|
|
||||||
|
@skipIf(True, 'Waiting for 2014.1 release')
|
||||||
|
def test_aaaa(self):
|
||||||
|
dig.__salt__ = {}
|
||||||
|
dig_mock = MagicMock(return_value={
|
||||||
|
'pid': 25451, 'retcode': 0, 'stderr': '', 'stdout': '2607:f8b0:400f:801::1014'
|
||||||
|
})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.AAAA('www.google.com'), ['2607:f8b0:400f:801::1014'])
|
||||||
|
|
||||||
|
@patch('salt.modules.dig.A', MagicMock(return_value=['ns4.google.com.']))
|
||||||
|
def test_ns(self):
|
||||||
|
dig.__salt__ = {}
|
||||||
|
dig_mock = MagicMock(return_value={
|
||||||
|
'pid': 26136, 'retcode': 0, 'stderr': '', 'stdout': 'ns4.google.com.'
|
||||||
|
})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.NS('google.com'), ['ns4.google.com.'])
|
||||||
|
|
||||||
|
def test_spf(self):
|
||||||
|
dig.__salt__ = {}
|
||||||
|
dig_mock = MagicMock(return_value={'pid': 26795,
|
||||||
|
'retcode': 0,
|
||||||
|
'stderr': '',
|
||||||
|
'stdout': 'v=spf1'
|
||||||
|
' include:_spf.google.com '
|
||||||
|
'ip4:216.73.93.70/31 '
|
||||||
|
'ip4:216.73.93.72/31 ~all'})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.SPF('google.com'),
|
||||||
|
['216.73.93.70/31', '216.73.93.72/31'])
|
||||||
|
|
||||||
|
@skipIf(True, 'Waiting for 2014.1 release')
|
||||||
|
def test_spf_redir(self):
|
||||||
|
'''
|
||||||
|
Test was written after a bug was found when a domain is redirecting the SPF ipv4 range
|
||||||
|
'''
|
||||||
|
dig.__salt__ = {}
|
||||||
|
dig_mock = MagicMock(return_value={'pid': 27282,
|
||||||
|
'retcode': 0,
|
||||||
|
'stderr': '',
|
||||||
|
'stdout': 'v=spf1 a mx '
|
||||||
|
'include:_spf.xmission.com ?all'})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.SPF('xmission.com'), ['198.60.22.0/24', '166.70.13.0/24'])
|
||||||
|
|
||||||
|
def test_mx(self):
|
||||||
|
dig.__salt__ = {}
|
||||||
|
dig_mock = MagicMock(return_value={'pid': 27780,
|
||||||
|
'retcode': 0,
|
||||||
|
'stderr': '',
|
||||||
|
'stdout': '10 aspmx.l.google.com.\n'
|
||||||
|
'20 alt1.aspmx.l.google.com.\n'
|
||||||
|
'40 alt3.aspmx.l.google.com.\n'
|
||||||
|
'50 alt4.aspmx.l.google.com.\n'
|
||||||
|
'30 alt2.aspmx.l.google.com.'})
|
||||||
|
with patch.dict(dig.__salt__, {'cmd.run_all': dig_mock}):
|
||||||
|
self.assertEqual(dig.MX('google.com'), [['10', 'aspmx.l.google.com.'],
|
||||||
|
['20', 'alt1.aspmx.l.google.com.'],
|
||||||
|
['40', 'alt3.aspmx.l.google.com.'],
|
||||||
|
['50', 'alt4.aspmx.l.google.com.'],
|
||||||
|
['30', 'alt2.aspmx.l.google.com.']])
|
||||||
|
Loading…
Reference in New Issue
Block a user